mirror of https://git.zx2c4.com/wireguard-rs
15 changed files with 350 additions and 1242 deletions
@ -1,190 +0,0 @@
|
||||
use std::mem; |
||||
use std::sync::atomic::Ordering; |
||||
use std::sync::Arc; |
||||
|
||||
use crossbeam_channel::Receiver; |
||||
use ring::aead::{Aad, LessSafeKey, Nonce, UnboundKey, CHACHA20_POLY1305}; |
||||
use zerocopy::{AsBytes, LayoutVerified}; |
||||
|
||||
use super::constants::MAX_INORDER_CONSUME; |
||||
use super::device::DecryptionState; |
||||
use super::device::Device; |
||||
use super::messages::TransportHeader; |
||||
use super::peer::Peer; |
||||
use super::pool::*; |
||||
use super::types::Callbacks; |
||||
use super::{tun, udp, Endpoint}; |
||||
use super::{REJECT_AFTER_MESSAGES, SIZE_TAG}; |
||||
|
||||
pub struct Inbound<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>> { |
||||
msg: Vec<u8>, |
||||
failed: bool, |
||||
state: Arc<DecryptionState<E, C, T, B>>, |
||||
endpoint: Option<E>, |
||||
} |
||||
|
||||
impl<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>> Inbound<E, C, T, B> { |
||||
pub fn new( |
||||
msg: Vec<u8>, |
||||
state: Arc<DecryptionState<E, C, T, B>>, |
||||
endpoint: E, |
||||
) -> Inbound<E, C, T, B> { |
||||
Inbound { |
||||
msg, |
||||
state, |
||||
failed: false, |
||||
endpoint: Some(endpoint), |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[inline(always)] |
||||
pub fn parallel<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
device: Device<E, C, T, B>, |
||||
receiver: Receiver<Job<Peer<E, C, T, B>, Inbound<E, C, T, B>>>, |
||||
) { |
||||
// parallel work to apply
|
||||
#[inline(always)] |
||||
fn work<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
peer: &Peer<E, C, T, B>, |
||||
body: &mut Inbound<E, C, T, B>, |
||||
) { |
||||
log::trace!("worker, parallel section, obtained job"); |
||||
|
||||
// cast to header followed by payload
|
||||
let (header, packet): (LayoutVerified<&mut [u8], TransportHeader>, &mut [u8]) = |
||||
match LayoutVerified::new_from_prefix(&mut body.msg[..]) { |
||||
Some(v) => v, |
||||
None => { |
||||
log::debug!("inbound worker: failed to parse message"); |
||||
return; |
||||
} |
||||
}; |
||||
|
||||
// authenticate and decrypt payload
|
||||
{ |
||||
// create nonce object
|
||||
let mut nonce = [0u8; 12]; |
||||
debug_assert_eq!(nonce.len(), CHACHA20_POLY1305.nonce_len()); |
||||
nonce[4..].copy_from_slice(header.f_counter.as_bytes()); |
||||
let nonce = Nonce::assume_unique_for_key(nonce); |
||||
|
||||
// do the weird ring AEAD dance
|
||||
let key = LessSafeKey::new( |
||||
UnboundKey::new(&CHACHA20_POLY1305, &body.state.keypair.recv.key[..]).unwrap(), |
||||
); |
||||
|
||||
// attempt to open (and authenticate) the body
|
||||
match key.open_in_place(nonce, Aad::empty(), packet) { |
||||
Ok(_) => (), |
||||
Err(_) => { |
||||
// fault and return early
|
||||
log::trace!("inbound worker: authentication failure"); |
||||
body.failed = true; |
||||
return; |
||||
} |
||||
} |
||||
} |
||||
|
||||
// check that counter not after reject
|
||||
if header.f_counter.get() >= REJECT_AFTER_MESSAGES { |
||||
body.failed = true; |
||||
return; |
||||
} |
||||
|
||||
// cryptokey route and strip padding
|
||||
let inner_len = { |
||||
let length = packet.len() - SIZE_TAG; |
||||
if length > 0 { |
||||
peer.device.table.check_route(&peer, &packet[..length]) |
||||
} else { |
||||
Some(0) |
||||
} |
||||
}; |
||||
|
||||
// truncate to remove tag
|
||||
match inner_len { |
||||
None => { |
||||
log::trace!("inbound worker: cryptokey routing failed"); |
||||
body.failed = true; |
||||
} |
||||
Some(len) => { |
||||
log::trace!( |
||||
"inbound worker: good route, length = {} {}", |
||||
len, |
||||
if len == 0 { "(keepalive)" } else { "" } |
||||
); |
||||
body.msg.truncate(mem::size_of::<TransportHeader>() + len); |
||||
} |
||||
} |
||||
} |
||||
|
||||
worker_parallel(device, |dev| &dev.run_inbound, receiver, work) |
||||
} |
||||
|
||||
#[inline(always)] |
||||
pub fn sequential<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
device: Device<E, C, T, B>, |
||||
) { |
||||
// sequential work to apply
|
||||
fn work<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
peer: &Peer<E, C, T, B>, |
||||
body: &mut Inbound<E, C, T, B>, |
||||
) { |
||||
log::trace!("worker, sequential section, obtained job"); |
||||
|
||||
// decryption failed, return early
|
||||
if body.failed { |
||||
log::trace!("job faulted, remove from queue and ignore"); |
||||
return; |
||||
} |
||||
|
||||
// cast transport header
|
||||
let (header, packet): (LayoutVerified<&[u8], TransportHeader>, &[u8]) = |
||||
match LayoutVerified::new_from_prefix(&body.msg[..]) { |
||||
Some(v) => v, |
||||
None => { |
||||
log::debug!("inbound worker: failed to parse message"); |
||||
return; |
||||
} |
||||
}; |
||||
|
||||
// check for replay
|
||||
if !body.state.protector.lock().update(header.f_counter.get()) { |
||||
log::debug!("inbound worker: replay detected"); |
||||
return; |
||||
} |
||||
|
||||
// check for confirms key
|
||||
if !body.state.confirmed.swap(true, Ordering::SeqCst) { |
||||
log::debug!("inbound worker: message confirms key"); |
||||
peer.confirm_key(&body.state.keypair); |
||||
} |
||||
|
||||
// update endpoint
|
||||
*peer.endpoint.lock() = body.endpoint.take(); |
||||
|
||||
// check if should be written to TUN
|
||||
let mut sent = false; |
||||
if packet.len() > 0 { |
||||
sent = match peer.device.inbound.write(&packet[..]) { |
||||
Err(e) => { |
||||
log::debug!("failed to write inbound packet to TUN: {:?}", e); |
||||
false |
||||
} |
||||
Ok(_) => true, |
||||
} |
||||
} else { |
||||
log::debug!("inbound worker: received keepalive") |
||||
} |
||||
|
||||
// trigger callback
|
||||
C::recv(&peer.opaque, body.msg.len(), sent, &body.state.keypair); |
||||
} |
||||
|
||||
// handle message from the peers inbound queue
|
||||
device.run_inbound.run(|peer| { |
||||
peer.inbound |
||||
.handle(|body| work(&peer, body), MAX_INORDER_CONSUME) |
||||
}); |
||||
} |
||||
@ -1,110 +0,0 @@
|
||||
use std::sync::Arc; |
||||
|
||||
use crossbeam_channel::Receiver; |
||||
use ring::aead::{Aad, LessSafeKey, Nonce, UnboundKey, CHACHA20_POLY1305}; |
||||
use zerocopy::{AsBytes, LayoutVerified}; |
||||
|
||||
use super::constants::MAX_INORDER_CONSUME; |
||||
use super::device::Device; |
||||
use super::messages::{TransportHeader, TYPE_TRANSPORT}; |
||||
use super::peer::Peer; |
||||
use super::pool::*; |
||||
use super::types::Callbacks; |
||||
use super::KeyPair; |
||||
use super::{tun, udp, Endpoint}; |
||||
use super::{REJECT_AFTER_MESSAGES, SIZE_TAG}; |
||||
|
||||
pub struct Outbound { |
||||
msg: Vec<u8>, |
||||
keypair: Arc<KeyPair>, |
||||
counter: u64, |
||||
} |
||||
|
||||
impl Outbound { |
||||
pub fn new(msg: Vec<u8>, keypair: Arc<KeyPair>, counter: u64) -> Outbound { |
||||
Outbound { |
||||
msg, |
||||
keypair, |
||||
counter, |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[inline(always)] |
||||
pub fn parallel<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
device: Device<E, C, T, B>, |
||||
receiver: Receiver<Job<Peer<E, C, T, B>, Outbound>>, |
||||
) { |
||||
#[inline(always)] |
||||
fn work<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
_peer: &Peer<E, C, T, B>, |
||||
body: &mut Outbound, |
||||
) { |
||||
log::trace!("worker, parallel section, obtained job"); |
||||
|
||||
// make space for the tag
|
||||
body.msg.extend([0u8; SIZE_TAG].iter()); |
||||
|
||||
// cast to header (should never fail)
|
||||
let (mut header, packet): (LayoutVerified<&mut [u8], TransportHeader>, &mut [u8]) = |
||||
LayoutVerified::new_from_prefix(&mut body.msg[..]) |
||||
.expect("earlier code should ensure that there is ample space"); |
||||
|
||||
// set header fields
|
||||
debug_assert!( |
||||
body.counter < REJECT_AFTER_MESSAGES, |
||||
"should be checked when assigning counters" |
||||
); |
||||
header.f_type.set(TYPE_TRANSPORT); |
||||
header.f_receiver.set(body.keypair.send.id); |
||||
header.f_counter.set(body.counter); |
||||
|
||||
// create a nonce object
|
||||
let mut nonce = [0u8; 12]; |
||||
debug_assert_eq!(nonce.len(), CHACHA20_POLY1305.nonce_len()); |
||||
nonce[4..].copy_from_slice(header.f_counter.as_bytes()); |
||||
let nonce = Nonce::assume_unique_for_key(nonce); |
||||
|
||||
// do the weird ring AEAD dance
|
||||
let key = LessSafeKey::new( |
||||
UnboundKey::new(&CHACHA20_POLY1305, &body.keypair.send.key[..]).unwrap(), |
||||
); |
||||
|
||||
// encrypt content of transport message in-place
|
||||
let end = packet.len() - SIZE_TAG; |
||||
let tag = key |
||||
.seal_in_place_separate_tag(nonce, Aad::empty(), &mut packet[..end]) |
||||
.unwrap(); |
||||
|
||||
// append tag
|
||||
packet[end..].copy_from_slice(tag.as_ref()); |
||||
} |
||||
|
||||
worker_parallel(device, |dev| &dev.run_outbound, receiver, work); |
||||
} |
||||
|
||||
#[inline(always)] |
||||
pub fn sequential<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
device: Device<E, C, T, B>, |
||||
) { |
||||
device.run_outbound.run(|peer| { |
||||
peer.outbound.handle( |
||||
|body| { |
||||
log::trace!("worker, sequential section, obtained job"); |
||||
|
||||
// send to peer
|
||||
let xmit = peer.send(&body.msg[..]).is_ok(); |
||||
|
||||
// trigger callback
|
||||
C::send( |
||||
&peer.opaque, |
||||
body.msg.len(), |
||||
xmit, |
||||
&body.keypair, |
||||
body.counter, |
||||
); |
||||
}, |
||||
MAX_INORDER_CONSUME, |
||||
) |
||||
}); |
||||
} |
||||
@ -1,164 +0,0 @@
|
||||
use std::mem; |
||||
use std::sync::Arc; |
||||
|
||||
use arraydeque::ArrayDeque; |
||||
use crossbeam_channel::Receiver; |
||||
use spin::{Mutex, MutexGuard}; |
||||
|
||||
use super::constants::INORDER_QUEUE_SIZE; |
||||
use super::runq::{RunQueue, ToKey}; |
||||
|
||||
pub struct InnerJob<P, B> { |
||||
// peer (used by worker to schedule/handle inorder queue),
|
||||
// when the peer is None, the job is complete
|
||||
peer: Option<P>, |
||||
pub body: B, |
||||
} |
||||
|
||||
pub struct Job<P, B> { |
||||
inner: Arc<Mutex<InnerJob<P, B>>>, |
||||
} |
||||
|
||||
impl<P, B> Clone for Job<P, B> { |
||||
fn clone(&self) -> Job<P, B> { |
||||
Job { |
||||
inner: self.inner.clone(), |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl<P, B> Job<P, B> { |
||||
pub fn new(peer: P, body: B) -> Job<P, B> { |
||||
Job { |
||||
inner: Arc::new(Mutex::new(InnerJob { |
||||
peer: Some(peer), |
||||
body, |
||||
})), |
||||
} |
||||
} |
||||
} |
||||
|
||||
impl<P, B> Job<P, B> { |
||||
/// Returns a mutex guard to the inner job if complete
|
||||
pub fn complete(&self) -> Option<MutexGuard<InnerJob<P, B>>> { |
||||
self.inner |
||||
.try_lock() |
||||
.and_then(|m| if m.peer.is_none() { Some(m) } else { None }) |
||||
} |
||||
} |
||||
|
||||
pub struct InorderQueue<P, B> { |
||||
queue: Mutex<ArrayDeque<[Job<P, B>; INORDER_QUEUE_SIZE]>>, |
||||
} |
||||
|
||||
impl<P, B> InorderQueue<P, B> { |
||||
pub fn new() -> InorderQueue<P, B> { |
||||
InorderQueue { |
||||
queue: Mutex::new(ArrayDeque::new()), |
||||
} |
||||
} |
||||
|
||||
/// Add a new job to the in-order queue
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// - `job`: The job added to the back of the queue
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// True if the element was added,
|
||||
/// false to indicate that the queue is full.
|
||||
pub fn send(&self, job: Job<P, B>) -> bool { |
||||
self.queue.lock().push_back(job).is_ok() |
||||
} |
||||
|
||||
/// Consume completed jobs from the in-order queue
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// - `f`: function to apply to the body of each jobof each job.
|
||||
/// - `limit`: maximum number of jobs to handle before returning
|
||||
///
|
||||
/// # Returns
|
||||
///
|
||||
/// A boolean indicating if the limit was reached:
|
||||
/// true indicating that the limit was reached,
|
||||
/// while false implies that the queue is empty or an uncompleted job was reached.
|
||||
#[inline(always)] |
||||
pub fn handle<F: Fn(&mut B)>(&self, f: F, mut limit: usize) -> bool { |
||||
// take the mutex
|
||||
let mut queue = self.queue.lock(); |
||||
|
||||
while limit > 0 { |
||||
// attempt to extract front element
|
||||
let front = queue.pop_front(); |
||||
let elem = match front { |
||||
Some(elem) => elem, |
||||
_ => { |
||||
return false; |
||||
} |
||||
}; |
||||
|
||||
// apply function if job complete
|
||||
let ret = if let Some(mut guard) = elem.complete() { |
||||
mem::drop(queue); |
||||
f(&mut guard.body); |
||||
queue = self.queue.lock(); |
||||
false |
||||
} else { |
||||
true |
||||
}; |
||||
|
||||
// job not complete yet, return job to front
|
||||
if ret { |
||||
queue.push_front(elem).unwrap(); |
||||
return false; |
||||
} |
||||
limit -= 1; |
||||
} |
||||
|
||||
// did not complete all jobs
|
||||
true |
||||
} |
||||
} |
||||
|
||||
/// Allows easy construction of a parallel worker.
|
||||
/// Applicable for both decryption and encryption workers.
|
||||
#[inline(always)] |
||||
pub fn worker_parallel< |
||||
P: ToKey, // represents a peer (atomic reference counted pointer)
|
||||
B, // inner body type (message buffer, key material, ...)
|
||||
D, // device
|
||||
W: Fn(&P, &mut B), |
||||
Q: Fn(&D) -> &RunQueue<P>, |
||||
>( |
||||
device: D, |
||||
queue: Q, |
||||
receiver: Receiver<Job<P, B>>, |
||||
work: W, |
||||
) { |
||||
log::trace!("router worker started"); |
||||
loop { |
||||
// handle new job
|
||||
let peer = { |
||||
// get next job
|
||||
let job = match receiver.recv() { |
||||
Ok(job) => job, |
||||
_ => return, |
||||
}; |
||||
|
||||
// lock the job
|
||||
let mut job = job.inner.lock(); |
||||
|
||||
// take the peer from the job
|
||||
let peer = job.peer.take().unwrap(); |
||||
|
||||
// process job
|
||||
work(&peer, &mut job.body); |
||||
peer |
||||
}; |
||||
|
||||
// process inorder jobs for peer
|
||||
queue(&device).insert(peer); |
||||
} |
||||
} |
||||
@ -1,164 +0,0 @@
|
||||
use std::hash::Hash; |
||||
use std::mem; |
||||
use std::sync::{Condvar, Mutex}; |
||||
|
||||
use std::collections::hash_map::Entry; |
||||
use std::collections::HashMap; |
||||
use std::collections::VecDeque; |
||||
|
||||
pub trait ToKey { |
||||
type Key: Hash + Eq; |
||||
fn to_key(&self) -> Self::Key; |
||||
} |
||||
|
||||
pub struct RunQueue<T: ToKey> { |
||||
cvar: Condvar, |
||||
inner: Mutex<Inner<T>>, |
||||
} |
||||
|
||||
struct Inner<T: ToKey> { |
||||
stop: bool, |
||||
queue: VecDeque<T>, |
||||
members: HashMap<T::Key, usize>, |
||||
} |
||||
|
||||
impl<T: ToKey> RunQueue<T> { |
||||
pub fn close(&self) { |
||||
let mut inner = self.inner.lock().unwrap(); |
||||
inner.stop = true; |
||||
self.cvar.notify_all(); |
||||
} |
||||
|
||||
pub fn new() -> RunQueue<T> { |
||||
RunQueue { |
||||
cvar: Condvar::new(), |
||||
inner: Mutex::new(Inner { |
||||
stop: false, |
||||
queue: VecDeque::new(), |
||||
members: HashMap::new(), |
||||
}), |
||||
} |
||||
} |
||||
|
||||
pub fn insert(&self, v: T) { |
||||
let key = v.to_key(); |
||||
let mut inner = self.inner.lock().unwrap(); |
||||
match inner.members.entry(key) { |
||||
Entry::Occupied(mut elem) => { |
||||
*elem.get_mut() += 1; |
||||
} |
||||
Entry::Vacant(spot) => { |
||||
// add entry to back of queue
|
||||
spot.insert(0); |
||||
inner.queue.push_back(v); |
||||
|
||||
// wake a thread
|
||||
self.cvar.notify_one(); |
||||
} |
||||
} |
||||
} |
||||
|
||||
/// Run (consume from) the run queue using the provided function.
|
||||
/// The function should return wheter the given element should be rescheduled.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// - `f` : function to apply to every element
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// The function f may be called again even when the element was not inserted back in to the
|
||||
/// queue since the last applciation and no rescheduling was requested.
|
||||
///
|
||||
/// This happens then the function handles all work for T,
|
||||
/// but T is added to the run queue while the function is running.
|
||||
pub fn run<F: Fn(&T) -> bool>(&self, f: F) { |
||||
let mut inner = self.inner.lock().unwrap(); |
||||
loop { |
||||
// fetch next element
|
||||
let elem = loop { |
||||
// run-queue closed
|
||||
if inner.stop { |
||||
return; |
||||
} |
||||
|
||||
// try to pop from queue
|
||||
match inner.queue.pop_front() { |
||||
Some(elem) => { |
||||
break elem; |
||||
} |
||||
None => (), |
||||
}; |
||||
|
||||
// wait for an element to be inserted
|
||||
inner = self.cvar.wait(inner).unwrap(); |
||||
}; |
||||
|
||||
// fetch current request number
|
||||
let key = elem.to_key(); |
||||
let old_n = *inner.members.get(&key).unwrap(); |
||||
mem::drop(inner); // drop guard
|
||||
|
||||
// handle element
|
||||
let rerun = f(&elem); |
||||
|
||||
// if the function requested a re-run add the element to the back of the queue
|
||||
inner = self.inner.lock().unwrap(); |
||||
if rerun { |
||||
inner.queue.push_back(elem); |
||||
continue; |
||||
} |
||||
|
||||
// otherwise check if new requests have come in since we ran the function
|
||||
match inner.members.entry(key) { |
||||
Entry::Occupied(occ) => { |
||||
if *occ.get() == old_n { |
||||
// no new requests since last, remove entry.
|
||||
occ.remove(); |
||||
} else { |
||||
// new requests, reschedule.
|
||||
inner.queue.push_back(elem); |
||||
} |
||||
} |
||||
Entry::Vacant(_) => { |
||||
unreachable!(); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
#[cfg(test)] |
||||
mod tests { |
||||
use super::*; |
||||
use std::thread; |
||||
use std::time::Duration; |
||||
|
||||
/* |
||||
#[test] |
||||
fn test_wait() { |
||||
let queue: Arc<RunQueue<usize>> = Arc::new(RunQueue::new()); |
||||
|
||||
{ |
||||
let queue = queue.clone(); |
||||
thread::spawn(move || { |
||||
queue.run(|e| { |
||||
println!("t0 {}", e); |
||||
thread::sleep(Duration::from_millis(100)); |
||||
}) |
||||
}); |
||||
} |
||||
|
||||
{ |
||||
let queue = queue.clone(); |
||||
thread::spawn(move || { |
||||
queue.run(|e| { |
||||
println!("t1 {}", e); |
||||
thread::sleep(Duration::from_millis(100)); |
||||
}) |
||||
}); |
||||
} |
||||
|
||||
} |
||||
*/ |
||||
} |
||||
@ -1,13 +1,31 @@
|
||||
use super::Device; |
||||
|
||||
use super::super::{tun, udp, Endpoint}; |
||||
use super::types::Callbacks; |
||||
|
||||
use super::receive::ReceieveJob; |
||||
use super::queue::ParallelJob; |
||||
use super::receive::ReceiveJob; |
||||
use super::send::SendJob; |
||||
|
||||
fn worker<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
device: Device<E, C, T, B>, |
||||
use crossbeam_channel::Receiver; |
||||
|
||||
pub enum JobUnion<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>> { |
||||
Outbound(SendJob<E, C, T, B>), |
||||
Inbound(ReceiveJob<E, C, T, B>), |
||||
} |
||||
|
||||
pub fn worker<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
receiver: Receiver<JobUnion<E, C, T, B>>, |
||||
) { |
||||
// fetch job
|
||||
loop { |
||||
match receiver.recv() { |
||||
Err(_) => break, |
||||
Ok(JobUnion::Inbound(job)) => { |
||||
job.parallel_work(); |
||||
job.queue().consume(); |
||||
} |
||||
Ok(JobUnion::Outbound(job)) => { |
||||
job.parallel_work(); |
||||
job.queue().consume(); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
|
||||
@ -1,257 +0,0 @@
|
||||
use std::sync::Arc; |
||||
|
||||
use log::{debug, trace}; |
||||
|
||||
use ring::aead::{Aad, LessSafeKey, Nonce, UnboundKey, CHACHA20_POLY1305}; |
||||
|
||||
use crossbeam_channel::Receiver; |
||||
use std::sync::atomic::Ordering; |
||||
use zerocopy::{AsBytes, LayoutVerified}; |
||||
|
||||
use super::device::{DecryptionState, DeviceInner}; |
||||
use super::messages::{TransportHeader, TYPE_TRANSPORT}; |
||||
use super::peer::PeerInner; |
||||
use super::types::Callbacks; |
||||
|
||||
use super::REJECT_AFTER_MESSAGES; |
||||
|
||||
use super::super::types::KeyPair; |
||||
use super::super::{tun, udp, Endpoint}; |
||||
|
||||
pub const SIZE_TAG: usize = 16; |
||||
|
||||
pub struct JobEncryption { |
||||
pub msg: Vec<u8>, |
||||
pub keypair: Arc<KeyPair>, |
||||
pub counter: u64, |
||||
} |
||||
|
||||
pub struct JobDecryption { |
||||
pub msg: Vec<u8>, |
||||
pub keypair: Arc<KeyPair>, |
||||
} |
||||
|
||||
pub enum JobParallel { |
||||
Encryption(oneshot::Sender<JobEncryption>, JobEncryption), |
||||
Decryption(oneshot::Sender<Option<JobDecryption>>, JobDecryption), |
||||
} |
||||
|
||||
#[allow(type_alias_bounds)] |
||||
pub type JobInbound<E, C, T, B: udp::Writer<E>> = ( |
||||
Arc<DecryptionState<E, C, T, B>>, |
||||
E, |
||||
oneshot::Receiver<Option<JobDecryption>>, |
||||
); |
||||
|
||||
pub type JobOutbound = oneshot::Receiver<JobEncryption>; |
||||
|
||||
/* TODO: Replace with run-queue
|
||||
*/ |
||||
pub fn worker_inbound<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
device: Arc<DeviceInner<E, C, T, B>>, // related device
|
||||
peer: Arc<PeerInner<E, C, T, B>>, // related peer
|
||||
receiver: Receiver<JobInbound<E, C, T, B>>, |
||||
) { |
||||
loop { |
||||
// fetch job
|
||||
let (state, endpoint, rx) = match receiver.recv() { |
||||
Ok(v) => v, |
||||
_ => { |
||||
return; |
||||
} |
||||
}; |
||||
debug!("inbound worker: obtained job"); |
||||
|
||||
// wait for job to complete
|
||||
let _ = rx |
||||
.map(|buf| { |
||||
debug!("inbound worker: job complete"); |
||||
if let Some(buf) = buf { |
||||
// cast transport header
|
||||
let (header, packet): (LayoutVerified<&[u8], TransportHeader>, &[u8]) = |
||||
match LayoutVerified::new_from_prefix(&buf.msg[..]) { |
||||
Some(v) => v, |
||||
None => { |
||||
debug!("inbound worker: failed to parse message"); |
||||
return; |
||||
} |
||||
}; |
||||
|
||||
debug_assert!( |
||||
packet.len() >= CHACHA20_POLY1305.tag_len(), |
||||
"this should be checked earlier in the pipeline (decryption should fail)" |
||||
); |
||||
|
||||
// check for replay
|
||||
if !state.protector.lock().update(header.f_counter.get()) { |
||||
debug!("inbound worker: replay detected"); |
||||
return; |
||||
} |
||||
|
||||
// check for confirms key
|
||||
if !state.confirmed.swap(true, Ordering::SeqCst) { |
||||
debug!("inbound worker: message confirms key"); |
||||
peer.confirm_key(&state.keypair); |
||||
} |
||||
|
||||
// update endpoint
|
||||
*peer.endpoint.lock() = Some(endpoint); |
||||
|
||||
// calculate length of IP packet + padding
|
||||
let length = packet.len() - SIZE_TAG; |
||||
debug!("inbound worker: plaintext length = {}", length); |
||||
|
||||
// check if should be written to TUN
|
||||
let mut sent = false; |
||||
if length > 0 { |
||||
if let Some(inner_len) = device.table.check_route(&peer, &packet[..length]) |
||||
{ |
||||
// TODO: Consider moving the cryptkey route check to parallel decryption worker
|
||||
debug_assert!(inner_len <= length, "should be validated earlier"); |
||||
if inner_len <= length { |
||||
sent = match device.inbound.write(&packet[..inner_len]) { |
||||
Err(e) => { |
||||
debug!("failed to write inbound packet to TUN: {:?}", e); |
||||
false |
||||
} |
||||
Ok(_) => true, |
||||
} |
||||
} |
||||
} |
||||
} else { |
||||
debug!("inbound worker: received keepalive") |
||||
} |
||||
|
||||
// trigger callback
|
||||
C::recv(&peer.opaque, buf.msg.len(), sent, &buf.keypair); |
||||
} else { |
||||
debug!("inbound worker: authentication failure") |
||||
} |
||||
}) |
||||
.wait(); |
||||
} |
||||
} |
||||
|
||||
|
||||
pub fn worker_outbound<E: Endpoint, C: Callbacks, T: tun::Writer, B: udp::Writer<E>>( |
||||
peer: Arc<PeerInner<E, C, T, B>>, |
||||
receiver: Receiver<JobOutbound>, |
||||
) { |
||||
loop { |
||||
// fetch job
|
||||
let rx = match receiver.recv() { |
||||
Ok(v) => v, |
||||
_ => { |
||||
return; |
||||
} |
||||
}; |
||||
debug!("outbound worker: obtained job"); |
||||
|
||||
// wait for job to complete
|
||||
let _ = rx |
||||
.map(|buf| { |
||||
debug!("outbound worker: job complete"); |
||||
|
||||
// send to peer
|
||||
let xmit = peer.send(&buf.msg[..]).is_ok(); |
||||
|
||||
// trigger callback
|
||||
C::send(&peer.opaque, buf.msg.len(), xmit, &buf.keypair, buf.counter); |
||||
}) |
||||
.wait(); |
||||
} |
||||
} |
||||
|
||||
pub fn worker_parallel(receiver: Receiver<JobParallel>) { |
||||
loop { |
||||
// fetch next job
|
||||
let job = match receiver.recv() { |
||||
Err(_) => { |
||||
return; |
||||
} |
||||
Ok(val) => val, |
||||
}; |
||||
trace!("parallel worker: obtained job"); |
||||
|
||||
// handle job
|
||||
match job { |
||||
JobParallel::Encryption(tx, mut job) => { |
||||
job.msg.extend([0u8; SIZE_TAG].iter()); |
||||
|
||||
// cast to header (should never fail)
|
||||
let (mut header, body): (LayoutVerified<&mut [u8], TransportHeader>, &mut [u8]) = |
||||
LayoutVerified::new_from_prefix(&mut job.msg[..]) |
||||
.expect("earlier code should ensure that there is ample space"); |
||||
|
||||
// set header fields
|
||||
debug_assert!( |
||||
job.counter < REJECT_AFTER_MESSAGES, |
||||
"should be checked when assigning counters" |
||||
); |
||||
header.f_type.set(TYPE_TRANSPORT); |
||||
header.f_receiver.set(job.keypair.send.id); |
||||
header.f_counter.set(job.counter); |
||||
|
||||
// create a nonce object
|
||||
let mut nonce = [0u8; 12]; |
||||
debug_assert_eq!(nonce.len(), CHACHA20_POLY1305.nonce_len()); |
||||
nonce[4..].copy_from_slice(header.f_counter.as_bytes()); |
||||
let nonce = Nonce::assume_unique_for_key(nonce); |
||||
|
||||
// do the weird ring AEAD dance
|
||||
let key = LessSafeKey::new( |
||||
UnboundKey::new(&CHACHA20_POLY1305, &job.keypair.send.key[..]).unwrap(), |
||||
); |
||||
|
||||
// encrypt content of transport message in-place
|
||||
let end = body.len() - SIZE_TAG; |
||||
let tag = key |
||||
.seal_in_place_separate_tag(nonce, Aad::empty(), &mut body[..end]) |
||||
.unwrap(); |
||||
|
||||
// append tag
|
||||
body[end..].copy_from_slice(tag.as_ref()); |
||||
|
||||
// pass ownership
|
||||
let _ = tx.send(job); |
||||
} |
||||
JobParallel::Decryption(tx, mut job) => { |
||||
// cast to header (could fail)
|
||||
let layout: Option<(LayoutVerified<&mut [u8], TransportHeader>, &mut [u8])> = |
||||
LayoutVerified::new_from_prefix(&mut job.msg[..]); |
||||
|
||||
let _ = tx.send(match layout { |
||||
Some((header, body)) => { |
||||
debug_assert_eq!( |
||||
header.f_type.get(), |
||||
TYPE_TRANSPORT, |
||||
"type and reserved bits should be checked by message de-multiplexer" |
||||
); |
||||
if header.f_counter.get() < REJECT_AFTER_MESSAGES { |
||||
// create a nonce object
|
||||
let mut nonce = [0u8; 12]; |
||||
debug_assert_eq!(nonce.len(), CHACHA20_POLY1305.nonce_len()); |
||||
nonce[4..].copy_from_slice(header.f_counter.as_bytes()); |
||||
let nonce = Nonce::assume_unique_for_key(nonce); |
||||
|
||||
// do the weird ring AEAD dance
|
||||
let key = LessSafeKey::new( |
||||
UnboundKey::new(&CHACHA20_POLY1305, &job.keypair.recv.key[..]) |
||||
.unwrap(), |
||||
); |
||||
|
||||
// attempt to open (and authenticate) the body
|
||||
match key.open_in_place(nonce, Aad::empty(), body) { |
||||
Ok(_) => Some(job), |
||||
Err(_) => None, |
||||
} |
||||
} else { |
||||
None |
||||
} |
||||
} |
||||
None => None, |
||||
}); |
||||
} |
||||
} |
||||
} |
||||
} |
||||
Loading…
Reference in new issue