2020-03-26 08:35:05 +08:00
|
|
|
use core::{
|
2020-03-27 03:29:36 +08:00
|
|
|
cell::{RefCell, UnsafeCell},
|
2020-03-26 08:35:05 +08:00
|
|
|
future::Future,
|
|
|
|
mem::MaybeUninit,
|
|
|
|
pin::Pin,
|
2020-03-27 03:29:36 +08:00
|
|
|
sync::atomic::{AtomicBool, Ordering},
|
2020-03-26 08:35:05 +08:00
|
|
|
task::{Context, Poll, RawWaker, RawWakerVTable, Waker},
|
|
|
|
};
|
2020-08-17 06:58:12 +08:00
|
|
|
use alloc::{boxed::Box, vec::Vec};
|
2020-03-26 08:35:05 +08:00
|
|
|
//use futures::future::FutureExt;
|
|
|
|
use pin_utils::pin_mut;
|
|
|
|
|
|
|
|
// NOTE `*const ()` is &AtomicBool
|
|
|
|
static VTABLE: RawWakerVTable = {
|
|
|
|
unsafe fn clone(p: *const ()) -> RawWaker {
|
|
|
|
RawWaker::new(p, &VTABLE)
|
|
|
|
}
|
|
|
|
unsafe fn wake(p: *const ()) {
|
|
|
|
wake_by_ref(p)
|
|
|
|
}
|
|
|
|
unsafe fn wake_by_ref(p: *const ()) {
|
|
|
|
(*(p as *const AtomicBool)).store(true, Ordering::Relaxed)
|
|
|
|
}
|
|
|
|
unsafe fn drop(_: *const ()) {
|
|
|
|
// no-op
|
|
|
|
}
|
|
|
|
|
|
|
|
RawWakerVTable::new(clone, wake, wake_by_ref, drop)
|
|
|
|
};
|
|
|
|
|
2020-04-01 00:55:03 +08:00
|
|
|
/// ready should not move as long as this waker references it. That is
|
|
|
|
/// the reason for keeping Tasks in a pinned box.
|
|
|
|
fn wrap_waker(ready: &AtomicBool) -> Waker {
|
2020-08-17 06:58:12 +08:00
|
|
|
unsafe { Waker::from_raw(RawWaker::new(ready as *const _ as *const (), &VTABLE)) }
|
2020-04-01 00:55:03 +08:00
|
|
|
}
|
|
|
|
|
2020-03-26 08:35:05 +08:00
|
|
|
/// A single-threaded executor
|
|
|
|
///
|
|
|
|
/// This is a singleton
|
|
|
|
pub struct Executor {
|
2020-03-31 07:13:01 +08:00
|
|
|
// Entered block_on() already?
|
2020-03-27 03:29:36 +08:00
|
|
|
in_block_on: RefCell<bool>,
|
2020-03-31 07:13:01 +08:00
|
|
|
|
|
|
|
/// Tasks reside on the heap, so that we just queue pointers. They
|
|
|
|
/// must also be pinned in memory because our RawWaker is a pointer
|
|
|
|
/// to their `ready` field.
|
2020-08-17 06:58:12 +08:00
|
|
|
tasks: RefCell<Vec<Pin<Box<Task>>>>,
|
2020-03-26 08:35:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Executor {
|
|
|
|
/// Creates a new instance of the executor
|
|
|
|
pub fn new() -> Self {
|
|
|
|
Self {
|
2020-03-27 03:29:36 +08:00
|
|
|
in_block_on: RefCell::new(false),
|
2020-08-17 06:58:12 +08:00
|
|
|
tasks: RefCell::new(Vec::new()),
|
2020-03-26 08:35:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn block_on<T>(&self, f: impl Future<Output = T>) -> T {
|
|
|
|
// we want to avoid reentering `block_on` because then all the code
|
|
|
|
// below has to become more complex. It's also likely that the
|
|
|
|
// application will only call `block_on` once on an infinite task
|
|
|
|
// (`Future<Output = !>`)
|
|
|
|
{
|
2020-03-27 03:29:36 +08:00
|
|
|
let mut in_block_on = self.in_block_on.borrow_mut();
|
2020-03-26 08:35:05 +08:00
|
|
|
if *in_block_on {
|
|
|
|
panic!("nested `block_on`");
|
|
|
|
}
|
|
|
|
*in_block_on = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
pin_mut!(f);
|
|
|
|
let ready = AtomicBool::new(true);
|
2020-04-01 00:55:03 +08:00
|
|
|
let waker = wrap_waker(&ready);
|
2020-09-07 16:13:08 +08:00
|
|
|
let mut backup = Vec::new();
|
2020-03-26 08:35:05 +08:00
|
|
|
let val = loop {
|
|
|
|
// advance the main task
|
|
|
|
if ready.load(Ordering::Relaxed) {
|
|
|
|
ready.store(false, Ordering::Relaxed);
|
|
|
|
|
2020-03-27 03:29:36 +08:00
|
|
|
// println!("run block_on");
|
2020-03-26 08:35:05 +08:00
|
|
|
let mut cx = Context::from_waker(&waker);
|
|
|
|
if let Poll::Ready(val) = f.as_mut().poll(&mut cx) {
|
|
|
|
break val;
|
|
|
|
}
|
2020-03-27 03:29:36 +08:00
|
|
|
// println!("ran block_on");
|
2020-03-26 08:35:05 +08:00
|
|
|
}
|
|
|
|
|
2020-08-17 06:58:12 +08:00
|
|
|
// advance all tasks
|
2020-09-07 16:13:08 +08:00
|
|
|
core::mem::swap(&mut *self.tasks.borrow_mut(), &mut backup);
|
|
|
|
for mut task in backup.drain(..) {
|
2020-03-26 08:35:05 +08:00
|
|
|
// NOTE we don't need a CAS operation here because `wake` invocations that come from
|
|
|
|
// interrupt handlers (the only source of 'race conditions' (!= data races)) are
|
|
|
|
// "oneshot": they'll issue a `wake` and then disable themselves to not run again
|
|
|
|
// until the woken task has made more work
|
|
|
|
if task.ready.load(Ordering::Relaxed) {
|
|
|
|
// we are about to service the task so switch the `ready` flag to `false`
|
|
|
|
task.ready.store(false, Ordering::Relaxed);
|
|
|
|
|
2020-04-01 00:55:03 +08:00
|
|
|
let waker = wrap_waker(&task.ready);
|
2020-03-26 08:35:05 +08:00
|
|
|
let mut cx = Context::from_waker(&waker);
|
2020-03-31 07:13:01 +08:00
|
|
|
let ready = task.f.as_mut().poll(&mut cx).is_ready();
|
|
|
|
if ready {
|
|
|
|
// Task is finished, do not requeue
|
|
|
|
continue;
|
2020-03-26 08:35:05 +08:00
|
|
|
}
|
|
|
|
}
|
2020-03-31 07:13:01 +08:00
|
|
|
// Requeue
|
2020-08-17 06:58:12 +08:00
|
|
|
self.tasks.borrow_mut().push(task);
|
2020-03-26 08:35:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
// // try to sleep; this will be a no-op if any of the previous tasks generated a SEV or an
|
|
|
|
// // interrupt ran (regardless of whether it generated a wake-up or not)
|
|
|
|
// asm::wfe();
|
|
|
|
};
|
2020-03-27 03:29:36 +08:00
|
|
|
self.in_block_on.replace(false);
|
2020-03-26 08:35:05 +08:00
|
|
|
val
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn spawn(&self, f: impl Future + 'static) {
|
2020-03-31 07:13:01 +08:00
|
|
|
let task = Box::pin(Task::new(f));
|
2020-08-17 06:58:12 +08:00
|
|
|
self.tasks.borrow_mut().push(task);
|
2020-03-26 08:35:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub struct Task {
|
|
|
|
ready: AtomicBool,
|
2020-03-31 07:13:01 +08:00
|
|
|
f: Pin<Box<dyn Future<Output = ()>>>,
|
2020-03-26 08:35:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Task {
|
|
|
|
fn new(f: impl Future + 'static) -> Self {
|
|
|
|
Task {
|
|
|
|
ready: AtomicBool::new(true),
|
2020-03-31 07:13:01 +08:00
|
|
|
f: Box::pin(async { f.await; }),
|
2020-03-26 08:35:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns a handle to the executor singleton
|
|
|
|
///
|
|
|
|
/// This lazily initializes the executor and allocator when first called
|
|
|
|
pub(crate) fn current() -> &'static Executor {
|
|
|
|
static INIT: AtomicBool = AtomicBool::new(false);
|
|
|
|
static mut EXECUTOR: UnsafeCell<MaybeUninit<Executor>> = UnsafeCell::new(MaybeUninit::uninit());
|
|
|
|
|
|
|
|
if INIT.load(Ordering::Relaxed) {
|
|
|
|
unsafe { &*(EXECUTOR.get() as *const Executor) }
|
|
|
|
} else {
|
|
|
|
unsafe {
|
|
|
|
let executorp = EXECUTOR.get() as *mut Executor;
|
|
|
|
executorp.write(Executor::new());
|
|
|
|
INIT.store(true, Ordering::Relaxed);
|
|
|
|
&*executorp
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|