2020-04-09 08:49:24 +08:00
|
|
|
use core::{
|
2020-04-13 07:24:37 +08:00
|
|
|
pin::Pin,
|
2020-07-28 12:36:16 +08:00
|
|
|
future::Future,
|
|
|
|
sync::atomic::{AtomicPtr, AtomicUsize, Ordering},
|
2020-04-13 07:24:37 +08:00
|
|
|
task::{Context, Poll},
|
2020-04-09 08:49:24 +08:00
|
|
|
};
|
2020-07-28 12:36:16 +08:00
|
|
|
use alloc::boxed::Box;
|
2020-08-04 13:50:42 +08:00
|
|
|
use super::{spin_lock_yield, notify_spin_lock};
|
2020-04-09 08:49:24 +08:00
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
pub struct Sender<'a, T> where T: Clone {
|
|
|
|
list: &'a [AtomicPtr<T>],
|
|
|
|
write: &'a AtomicUsize,
|
|
|
|
read: &'a AtomicUsize,
|
2020-04-09 08:49:24 +08:00
|
|
|
}
|
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
pub struct Receiver<'a, T> where T: Clone {
|
|
|
|
list: &'a [AtomicPtr<T>],
|
|
|
|
write: &'a AtomicUsize,
|
|
|
|
read: &'a AtomicUsize,
|
2020-04-09 08:49:24 +08:00
|
|
|
}
|
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
impl<'a, T> Sender<'a, T> where T: Clone {
|
|
|
|
pub const fn new(list: &'static [AtomicPtr<T>], write: &'static AtomicUsize, read: &'static AtomicUsize) -> Self {
|
|
|
|
Sender {list, write, read}
|
2020-04-09 08:49:24 +08:00
|
|
|
}
|
2020-04-13 07:24:37 +08:00
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
pub fn try_send<B: Into<Box<T>>>(&mut self, content: B) -> Result<(), B> {
|
|
|
|
let write = self.write.load(Ordering::Relaxed);
|
|
|
|
if (write + 1) % self.list.len() == self.read.load(Ordering::Acquire) {
|
|
|
|
Err(content)
|
|
|
|
} else {
|
|
|
|
let ptr = Box::into_raw(content.into());
|
|
|
|
let entry = &self.list[write];
|
|
|
|
let prev = entry.swap(ptr, Ordering::Relaxed);
|
|
|
|
// we allow other end get it first
|
|
|
|
self.write.store((write + 1) % self.list.len(), Ordering::Release);
|
2020-08-04 13:50:42 +08:00
|
|
|
notify_spin_lock();
|
2020-07-28 12:36:16 +08:00
|
|
|
if !prev.is_null() {
|
|
|
|
unsafe {
|
2020-08-27 17:02:19 +08:00
|
|
|
Box::from_raw(prev);
|
2020-07-28 12:36:16 +08:00
|
|
|
}
|
2020-04-13 07:24:37 +08:00
|
|
|
}
|
2020-07-28 12:36:16 +08:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|
2020-04-13 07:24:37 +08:00
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
pub fn send<B: Into<Box<T>>>(&mut self, content: B) {
|
|
|
|
let mut content = content;
|
|
|
|
while let Err(back) = self.try_send(content) {
|
|
|
|
content = back;
|
2020-08-04 13:50:42 +08:00
|
|
|
spin_lock_yield();
|
2020-04-13 07:24:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub async fn async_send<B: Into<Box<T>>>(&mut self, content: B) {
|
2020-07-28 12:36:16 +08:00
|
|
|
struct Send<'a, 'b, T> where T: Clone, 'b: 'a {
|
|
|
|
sender: &'a mut Sender<'b, T>,
|
|
|
|
content: Result<(), Box<T>>,
|
2020-04-13 07:24:37 +08:00
|
|
|
}
|
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
impl<T> Future for Send<'_, '_, T> where T: Clone {
|
2020-04-13 07:24:37 +08:00
|
|
|
type Output = ();
|
|
|
|
|
|
|
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
2020-07-28 12:36:16 +08:00
|
|
|
match core::mem::replace(&mut self.content, Ok(())) {
|
|
|
|
Err(content) => {
|
|
|
|
if let Err(content) = self.sender.try_send(content) {
|
2020-04-13 07:24:37 +08:00
|
|
|
// failure
|
2020-07-28 12:36:16 +08:00
|
|
|
self.content = Err(content);
|
2020-04-13 07:24:37 +08:00
|
|
|
cx.waker().wake_by_ref();
|
|
|
|
Poll::Pending
|
|
|
|
} else {
|
|
|
|
// success
|
|
|
|
Poll::Ready(())
|
|
|
|
}
|
|
|
|
}
|
2020-07-28 12:36:16 +08:00
|
|
|
Ok(_) => panic!("Send future polled after success"),
|
2020-04-13 07:24:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Send {
|
|
|
|
sender: self,
|
2020-07-28 12:36:16 +08:00
|
|
|
content: Err(content.into()),
|
2020-04-13 07:24:37 +08:00
|
|
|
}.await
|
|
|
|
}
|
2020-08-03 15:50:31 +08:00
|
|
|
|
2020-08-05 15:29:28 +08:00
|
|
|
/// free all items in the queue. It is the user's responsibility to
|
|
|
|
/// ensure no reader is trying to copy the data.
|
|
|
|
pub unsafe fn drop_elements(&mut self) {
|
|
|
|
for v in self.list.iter() {
|
|
|
|
let original = v.swap(core::ptr::null_mut(), Ordering::Relaxed);
|
|
|
|
if !original.is_null() {
|
2020-08-27 17:02:19 +08:00
|
|
|
Box::from_raw(original);
|
2020-08-05 15:29:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-03 15:50:31 +08:00
|
|
|
/// Reset the `sync_channel`, *forget* all items in the queue. Affects both the sender and
|
|
|
|
/// receiver.
|
|
|
|
pub unsafe fn reset(&mut self) {
|
|
|
|
self.write.store(0, Ordering::Relaxed);
|
|
|
|
self.read.store(0, Ordering::Relaxed);
|
|
|
|
for v in self.list.iter() {
|
|
|
|
v.store(core::ptr::null_mut(), Ordering::Relaxed);
|
|
|
|
}
|
|
|
|
}
|
2020-04-09 08:49:24 +08:00
|
|
|
}
|
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
impl<'a, T> Receiver<'a, T> where T: Clone {
|
|
|
|
pub const fn new(list: &'static [AtomicPtr<T>], write: &'static AtomicUsize, read: &'static AtomicUsize) -> Self {
|
|
|
|
Receiver {list, write, read}
|
2020-04-09 08:49:24 +08:00
|
|
|
}
|
2020-04-13 07:24:37 +08:00
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
pub fn try_recv(&mut self) -> Result<T, ()> {
|
|
|
|
let read = self.read.load(Ordering::Relaxed);
|
|
|
|
if read == self.write.load(Ordering::Acquire) {
|
|
|
|
Err(())
|
|
|
|
} else {
|
|
|
|
let entry = &self.list[read];
|
|
|
|
let data = unsafe {
|
|
|
|
// we cannot deallocate the box
|
|
|
|
Box::leak(Box::from_raw(entry.load(Ordering::Relaxed)))
|
|
|
|
};
|
|
|
|
let result = data.clone();
|
|
|
|
self.read.store((read + 1) % self.list.len(), Ordering::Release);
|
2020-08-04 13:50:42 +08:00
|
|
|
notify_spin_lock();
|
2020-07-28 12:36:16 +08:00
|
|
|
Ok(result)
|
|
|
|
}
|
|
|
|
}
|
2020-04-13 07:24:37 +08:00
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
pub fn recv(&mut self) -> T {
|
|
|
|
loop {
|
|
|
|
if let Ok(data) = self.try_recv() {
|
|
|
|
return data;
|
2020-04-13 07:24:37 +08:00
|
|
|
}
|
2020-08-04 13:50:42 +08:00
|
|
|
spin_lock_yield();
|
2020-04-13 07:24:37 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
pub async fn async_recv(&mut self) -> T {
|
|
|
|
struct Recv<'a, 'b, T> where T: Clone, 'b: 'a {
|
|
|
|
receiver: &'a mut Receiver<'b, T>,
|
2020-04-13 07:24:37 +08:00
|
|
|
}
|
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
impl<T> Future for Recv<'_, '_, T> where T: Clone {
|
|
|
|
type Output = T;
|
2020-04-13 07:24:37 +08:00
|
|
|
|
|
|
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
2020-07-28 12:36:16 +08:00
|
|
|
if let Ok(content) = self.receiver.try_recv() {
|
2020-04-13 07:24:37 +08:00
|
|
|
Poll::Ready(content)
|
|
|
|
} else {
|
|
|
|
cx.waker().wake_by_ref();
|
|
|
|
Poll::Pending
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Recv {
|
|
|
|
receiver: self,
|
|
|
|
}.await
|
|
|
|
}
|
2020-04-09 08:49:24 +08:00
|
|
|
}
|
2020-04-09 08:56:54 +08:00
|
|
|
|
2020-07-28 12:36:16 +08:00
|
|
|
impl<'a, T> Iterator for Receiver<'a, T> where T: Clone {
|
|
|
|
type Item = T;
|
2020-04-09 08:56:54 +08:00
|
|
|
|
|
|
|
fn next(&mut self) -> Option<Self::Item> {
|
|
|
|
Some(self.recv())
|
|
|
|
}
|
|
|
|
}
|
2020-07-28 12:36:16 +08:00
|
|
|
|
|
|
|
#[macro_export]
|
|
|
|
/// Macro for initializing the sync_channel with static buffer and indexes.
|
|
|
|
/// Note that this requires `#![feature(const_in_array_repeat_expressions)]`
|
|
|
|
macro_rules! sync_channel {
|
|
|
|
($t: ty, $cap: expr) => {
|
|
|
|
{
|
|
|
|
use core::sync::atomic::{AtomicUsize, AtomicPtr};
|
|
|
|
use $crate::sync_channel::{Sender, Receiver};
|
2024-08-05 12:53:08 +08:00
|
|
|
const cnst_ptr: AtomicPtr<$t> = AtomicPtr::new(core::ptr::null_mut());
|
|
|
|
static LIST: [AtomicPtr<$t>; $cap + 1] = [cnst_ptr; $cap + 1];
|
2020-07-28 12:36:16 +08:00
|
|
|
static WRITE: AtomicUsize = AtomicUsize::new(0);
|
|
|
|
static READ: AtomicUsize = AtomicUsize::new(0);
|
|
|
|
(Sender::new(&LIST, &WRITE, &READ), Receiver::new(&LIST, &WRITE, &READ))
|
|
|
|
}
|
|
|
|
};
|
|
|
|
}
|