forked from ROMEO/nexosim
First release candidate for v0.1.0
This commit is contained in:
194
asynchronix/src/executor/injector.rs
Normal file
194
asynchronix/src/executor/injector.rs
Normal file
@ -0,0 +1,194 @@
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Mutex;
|
||||
use std::{mem, vec};
|
||||
|
||||
/// An unfair injector queue which stores batches of tasks in bounded-size
|
||||
/// buckets.
|
||||
///
|
||||
/// This is a simple but effective unfair injector design which, despite being
|
||||
/// based on a mutex-protected `Vec`, ensures low contention and low latency in
|
||||
/// most realistic cases.
|
||||
///
|
||||
/// This is achieved by enabling the worker to push and pop batches of tasks
|
||||
/// readily stored in buckets. Since only the handles to the buckets are moved
|
||||
/// to and from the injector, pushing and popping a bucket is very fast and the
|
||||
/// lock is therefore only held for a very short time.
|
||||
///
|
||||
/// Also, since tasks in a bucket are memory-contiguous, they can be efficiently
|
||||
/// copied to and from worker queues. The use of buckets also keeps the size of
|
||||
/// the injector queue small (its size is the number of buckets) so
|
||||
/// re-allocation is rare and fast.
|
||||
///
|
||||
/// As an additional optimization, an `is_empty` atomic flag allows workers
|
||||
/// seeking for tasks to skip taking the lock if the queue is likely to be
|
||||
/// empty.
|
||||
///
|
||||
/// The queue is not strictly LIFO. While buckets are indeed pushed and popped
|
||||
/// in LIFO order, individual tasks are stored in a bucket at the front of the
|
||||
/// queue and this bucket is only moved to the back of the queue when full.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Injector<T, const BUCKET_CAPACITY: usize> {
|
||||
/// A mutex-protected list of tasks.
|
||||
inner: Mutex<Vec<Bucket<T, BUCKET_CAPACITY>>>,
|
||||
/// A flag indicating whether the injector queue is empty.
|
||||
is_empty: AtomicBool,
|
||||
}
|
||||
|
||||
impl<T, const BUCKET_CAPACITY: usize> Injector<T, BUCKET_CAPACITY> {
|
||||
/// Creates an empty injector queue.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the capacity is 0.
|
||||
pub(crate) const fn new() -> Self {
|
||||
assert!(BUCKET_CAPACITY >= 1);
|
||||
|
||||
Self {
|
||||
inner: Mutex::new(Vec::new()),
|
||||
is_empty: AtomicBool::new(true),
|
||||
}
|
||||
}
|
||||
|
||||
/// Inserts a task.
|
||||
///
|
||||
/// The task is inserted in a bucket at the front of the queue. Once this
|
||||
/// bucket is full, it is moved to the back of the queue.
|
||||
pub(crate) fn insert_task(&self, task: T) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
// Try to push the task onto the first bucket if it has enough capacity left.
|
||||
if let Some(bucket) = inner.first_mut() {
|
||||
if let Err(task) = bucket.push(task) {
|
||||
// The bucket is full: move it to the back of the vector and
|
||||
// replace it with a newly created bucket that contains the
|
||||
// task.
|
||||
let mut new_bucket = Bucket::new();
|
||||
let _ = new_bucket.push(task); // this cannot fail provided the capacity is >=1
|
||||
|
||||
let full_bucket = mem::replace(bucket, new_bucket);
|
||||
inner.push(full_bucket);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// The queue is empty: create a new bucket.
|
||||
let mut new_bucket = Bucket::new();
|
||||
let _ = new_bucket.push(task); // this cannot fail provided the capacity is >=1
|
||||
|
||||
inner.push(new_bucket);
|
||||
|
||||
// Ordering: this flag is only used as a hint so Relaxed ordering is
|
||||
// sufficient.
|
||||
self.is_empty.store(false, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Appends a bucket to the back of the queue.
|
||||
pub(crate) fn push_bucket(&self, bucket: Bucket<T, BUCKET_CAPACITY>) {
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
let was_empty = inner.is_empty();
|
||||
inner.push(bucket);
|
||||
|
||||
// If the queue was empty before, update the flag.
|
||||
if was_empty {
|
||||
// Ordering: this flag is only used as a hint so Relaxed ordering is
|
||||
// sufficient.
|
||||
self.is_empty.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes the bucket at the back of the queue, if any.
|
||||
///
|
||||
/// Note that this can spuriously return `None` even though the queue is
|
||||
/// populated, unless a happens-before relationship exists between the
|
||||
/// thread that populated the queue and the thread calling this method (this
|
||||
/// is obviously the case if they are the same thread).
|
||||
///
|
||||
/// This is not an issue in practice because it cannot lead to executor
|
||||
/// deadlock. Indeed, if the last task/bucket was inserted by a worker
|
||||
/// thread, that worker thread will always see that the injector queue is
|
||||
/// populated (unless the bucket was already popped). Therefore, if all
|
||||
/// workers exit, then all tasks they have re-injected will necessarily have
|
||||
/// been processed. Likewise, if the last task/bucket was inserted by the
|
||||
/// main executor thread before `Executor::run()` is called, the
|
||||
/// synchronization established when the executor unparks worker threads
|
||||
/// ensures that the task is visible to all unparked workers (there is
|
||||
/// actually an edge case when the executor cannot unpark a thread after
|
||||
/// pushing tasks, but this is taken care of by some extra synchronization
|
||||
/// when deactivating workers).
|
||||
pub(crate) fn pop_bucket(&self) -> Option<Bucket<T, BUCKET_CAPACITY>> {
|
||||
// Ordering: this flag is only used as a hint so Relaxed ordering is
|
||||
// sufficient.
|
||||
if self.is_empty.load(Ordering::Relaxed) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut inner = self.inner.lock().unwrap();
|
||||
|
||||
let bucket = inner.pop();
|
||||
|
||||
if inner.is_empty() {
|
||||
// Ordering: this flag is only used as a hint so Relaxed ordering is
|
||||
// sufficient.
|
||||
self.is_empty.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
bucket
|
||||
}
|
||||
|
||||
/// Checks whether the queue is empty.
|
||||
///
|
||||
/// Note that this can spuriously return `true` even though the queue is
|
||||
/// populated, unless a happens-before relationship exists between the
|
||||
/// thread that populated the queue and the thread calling this method (this
|
||||
/// is obviously the case if they are the same thread).
|
||||
pub(crate) fn is_empty(&self) -> bool {
|
||||
self.is_empty.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
/// A collection of tasks with a bounded size.
|
||||
///
|
||||
/// This is just a very thin wrapper around a `Vec` that ensures that the
|
||||
/// nominal capacity bound is never exceeded.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Bucket<T, const CAPACITY: usize>(Vec<T>);
|
||||
|
||||
impl<T, const CAPACITY: usize> Bucket<T, CAPACITY> {
|
||||
/// Creates a new bucket, allocating the full capacity upfront.
|
||||
pub(crate) fn new() -> Self {
|
||||
Self(Vec::with_capacity(CAPACITY))
|
||||
}
|
||||
|
||||
/// Returns the bucket's nominal capacity.
|
||||
pub(crate) const fn capacity() -> usize {
|
||||
CAPACITY
|
||||
}
|
||||
|
||||
/// Appends one task if capacity allows; otherwise returns the task in the
|
||||
/// error.
|
||||
pub(crate) fn push(&mut self, task: T) -> Result<(), T> {
|
||||
if self.0.len() < CAPACITY {
|
||||
self.0.push(task);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(task)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const CAPACITY: usize> IntoIterator for Bucket<T, CAPACITY> {
|
||||
type Item = T;
|
||||
type IntoIter = vec::IntoIter<T>;
|
||||
|
||||
fn into_iter(self) -> Self::IntoIter {
|
||||
self.0.into_iter()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, const CAPACITY: usize> FromIterator<T> for Bucket<T, CAPACITY> {
|
||||
fn from_iter<U: IntoIterator<Item = T>>(iter: U) -> Self {
|
||||
Self(Vec::from_iter(iter.into_iter().take(CAPACITY)))
|
||||
}
|
||||
}
|
372
asynchronix/src/executor/pool_manager.rs
Normal file
372
asynchronix/src/executor/pool_manager.rs
Normal file
@ -0,0 +1,372 @@
|
||||
use std::any::Any;
|
||||
use std::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crossbeam_utils::sync::Unparker;
|
||||
|
||||
use super::Stealer;
|
||||
use crate::util::bit;
|
||||
use crate::util::rng;
|
||||
|
||||
/// Manager of worker threads.
|
||||
///
|
||||
/// The manager currently only supports up to `usize::BITS` threads.
|
||||
pub(super) struct PoolManager {
|
||||
/// Number of worker threads.
|
||||
pool_size: usize,
|
||||
/// List of the stealers associated to each worker thread.
|
||||
stealers: Box<[Stealer]>,
|
||||
/// List of the thread unparkers associated to each worker thread.
|
||||
worker_unparkers: Box<[Unparker]>,
|
||||
/// Bit field of all workers that are currently unparked.
|
||||
active_workers: AtomicUsize,
|
||||
/// Count of all workers currently searching for tasks.
|
||||
searching_workers: AtomicUsize,
|
||||
/// Flag requesting all workers to return immediately.
|
||||
terminate_signal: AtomicBool,
|
||||
/// Panic caught in a worker thread.
|
||||
worker_panic: Mutex<Option<Box<dyn Any + Send + 'static>>>,
|
||||
#[cfg(feature = "dev-logs")]
|
||||
/// Thread wake-up statistics.
|
||||
record: Record,
|
||||
}
|
||||
|
||||
impl PoolManager {
|
||||
/// Creates a new pool manager.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This will panic if the specified pool size is zero or is more than
|
||||
/// `usize::BITS`.
|
||||
pub(super) fn new(
|
||||
pool_size: usize,
|
||||
stealers: Box<[Stealer]>,
|
||||
worker_unparkers: Box<[Unparker]>,
|
||||
) -> Self {
|
||||
assert!(
|
||||
pool_size >= 1,
|
||||
"the executor pool size should be at least one"
|
||||
);
|
||||
assert!(
|
||||
pool_size <= usize::BITS as usize,
|
||||
"the executor pool size should be at most {}",
|
||||
usize::BITS
|
||||
);
|
||||
|
||||
Self {
|
||||
pool_size,
|
||||
stealers,
|
||||
worker_unparkers,
|
||||
active_workers: AtomicUsize::new(0),
|
||||
searching_workers: AtomicUsize::new(0),
|
||||
terminate_signal: AtomicBool::new(false),
|
||||
worker_panic: Mutex::new(None),
|
||||
#[cfg(feature = "dev-logs")]
|
||||
record: Record::new(pool_size),
|
||||
}
|
||||
}
|
||||
|
||||
/// Unparks an idle worker if any is found and mark it as active, or do
|
||||
/// nothing otherwise.
|
||||
///
|
||||
/// For performance reasons, no synchronization is established if no worker
|
||||
/// is found, meaning that workers in other threads may later transition to
|
||||
/// idle state without observing the tasks scheduled by this caller. If this
|
||||
/// is not tolerable (for instance if this method is called from a
|
||||
/// non-worker thread), use the more expensive `activate_worker`.
|
||||
pub(super) fn activate_worker_relaxed(&self) {
|
||||
let mut active_workers = self.active_workers.load(Ordering::Relaxed);
|
||||
loop {
|
||||
let first_idle_worker = active_workers.trailing_ones() as usize;
|
||||
if first_idle_worker >= self.pool_size {
|
||||
return;
|
||||
};
|
||||
active_workers = self
|
||||
.active_workers
|
||||
.fetch_or(1 << first_idle_worker, Ordering::Relaxed);
|
||||
if active_workers & (1 << first_idle_worker) == 0 {
|
||||
#[cfg(feature = "dev-logs")]
|
||||
self.record.increment(first_idle_worker);
|
||||
self.begin_worker_search();
|
||||
self.worker_unparkers[first_idle_worker].unpark();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Unparks an idle worker if any is found and mark it as active, or ensure
|
||||
/// that at least the last active worker will observe all memory operations
|
||||
/// performed before this call when calling `try_set_worker_inactive`.
|
||||
pub(super) fn activate_worker(&self) {
|
||||
let mut active_workers = self.active_workers.load(Ordering::Relaxed);
|
||||
loop {
|
||||
let first_idle_worker = active_workers.trailing_ones() as usize;
|
||||
if first_idle_worker >= self.pool_size {
|
||||
// There is apparently no free worker, so a dummy RMW with
|
||||
// Release ordering is performed with the sole purpose of
|
||||
// synchronizing with the Acquire fence in `set_inactive` so
|
||||
// that the last worker sees the tasks that were queued prior to
|
||||
// this call to `activate_worker`.
|
||||
let new_active_workers = self.active_workers.fetch_or(0, Ordering::Release);
|
||||
if new_active_workers == active_workers {
|
||||
return;
|
||||
}
|
||||
active_workers = new_active_workers;
|
||||
} else {
|
||||
active_workers = self
|
||||
.active_workers
|
||||
.fetch_or(1 << first_idle_worker, Ordering::Relaxed);
|
||||
if active_workers & (1 << first_idle_worker) == 0 {
|
||||
#[cfg(feature = "dev-logs")]
|
||||
self.record.increment(first_idle_worker);
|
||||
self.begin_worker_search();
|
||||
self.worker_unparkers[first_idle_worker].unpark();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Marks the specified worker as inactive unless it is the last active
|
||||
/// worker.
|
||||
///
|
||||
/// Parking the worker thread is the responsibility of the caller.
|
||||
///
|
||||
/// If this was the last active worker, `false` is returned and it is
|
||||
/// guaranteed that all memory operations performed by threads that called
|
||||
/// `activate_worker` will be visible. The worker is in such case expected
|
||||
/// to check again the injector queue and then to explicitly call
|
||||
/// `set_all_workers_inactive` if it can confirm that the injector queue is
|
||||
/// empty.
|
||||
pub(super) fn try_set_worker_inactive(&self, worker_id: usize) -> bool {
|
||||
// Ordering: this Release operation synchronizes with the Acquire fence
|
||||
// in the below conditional if this is is the last active worker, and/or
|
||||
// with the Acquire state load in the `pool_state` method.
|
||||
let active_workers = self
|
||||
.active_workers
|
||||
.fetch_update(Ordering::Release, Ordering::Relaxed, |active_workers| {
|
||||
if active_workers == (1 << worker_id) {
|
||||
// It looks like this is the last worker, but the value
|
||||
// could be stale so it is necessary to make sure of this by
|
||||
// enforcing the CAS rather than returning `None`.
|
||||
Some(active_workers)
|
||||
} else {
|
||||
Some(active_workers & !(1 << worker_id))
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
assert_ne!(active_workers & (1 << worker_id), 0);
|
||||
|
||||
if active_workers == (1 << worker_id) {
|
||||
// This is the last worker so we need to ensures that after this
|
||||
// call, all tasks pushed on the injector queue before
|
||||
// `set_one_active` was called unsuccessfully are visible.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with all Release RMWs
|
||||
// in this and in the previous calls to `set_inactive` via a release
|
||||
// sequence.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
/// Marks all pool workers as active.
|
||||
///
|
||||
/// Unparking the worker threads is the responsibility of the caller.
|
||||
pub(super) fn set_all_workers_active(&self) {
|
||||
// Mark all workers as busy.
|
||||
self.active_workers.store(
|
||||
!0 >> (usize::BITS - self.pool_size as u32),
|
||||
Ordering::Relaxed,
|
||||
);
|
||||
}
|
||||
|
||||
/// Marks all pool workers as inactive.
|
||||
///
|
||||
/// This should only be called by the last active worker. Unparking the
|
||||
/// executor threads is the responsibility of the caller.
|
||||
pub(super) fn set_all_workers_inactive(&self) {
|
||||
// Ordering: this Release store synchronizes with the Acquire load in
|
||||
// `is_idle`.
|
||||
self.active_workers.store(0, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Check if the pool is idle, i.e. if no worker is currently active.
|
||||
///
|
||||
/// If `true` is returned, it is guaranteed that all operations performed by
|
||||
/// the now-inactive workers become visible in this thread.
|
||||
pub(super) fn pool_is_idle(&self) -> bool {
|
||||
// Ordering: this Acquire operation synchronizes with all Release
|
||||
// RMWs in the `set_worker_inactive` method via a release sequence.
|
||||
self.active_workers.load(Ordering::Acquire) == 0
|
||||
}
|
||||
|
||||
/// Increments the count of workers actively searching for tasks.
|
||||
pub(super) fn begin_worker_search(&self) {
|
||||
self.searching_workers.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Decrements the count of workers actively searching for tasks.
|
||||
pub(super) fn end_worker_search(&self) {
|
||||
self.searching_workers.fetch_sub(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Returns the count of workers actively searching for tasks.
|
||||
pub(super) fn searching_worker_count(&self) -> usize {
|
||||
self.searching_workers.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Triggers the termination signal and unparks all worker threads so they
|
||||
/// can cleanly terminate.
|
||||
pub(super) fn trigger_termination(&self) {
|
||||
self.terminate_signal.store(true, Ordering::Relaxed);
|
||||
|
||||
self.set_all_workers_active();
|
||||
for unparker in &*self.worker_unparkers {
|
||||
unparker.unpark();
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns true if the termination signal was triggered.
|
||||
pub(super) fn termination_is_triggered(&self) -> bool {
|
||||
self.terminate_signal.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Registers a panic associated with the provided worker ID.
|
||||
///
|
||||
/// If no panic is currently registered, the panic in argument is
|
||||
/// registered. If a panic was already registered by a worker and was not
|
||||
/// yet processed by the executor, then nothing is done.
|
||||
pub(super) fn register_panic(&self, panic: Box<dyn Any + Send + 'static>) {
|
||||
let mut worker_panic = self.worker_panic.lock().unwrap();
|
||||
if worker_panic.is_none() {
|
||||
*worker_panic = Some(panic);
|
||||
}
|
||||
}
|
||||
|
||||
/// Takes a worker panic if any is registered.
|
||||
pub(super) fn take_panic(&self) -> Option<Box<dyn Any + Send + 'static>> {
|
||||
let mut worker_panic = self.worker_panic.lock().unwrap();
|
||||
worker_panic.take()
|
||||
}
|
||||
|
||||
/// Returns an iterator yielding the stealers associated with all active
|
||||
/// workers, starting from a randomly selected active worker. The worker
|
||||
/// which ID is provided in argument (if any) is excluded from the pool of
|
||||
/// candidates.
|
||||
pub(super) fn shuffled_stealers<'a>(
|
||||
&'a self,
|
||||
excluded_worker_id: Option<usize>,
|
||||
rng: &'_ rng::Rng,
|
||||
) -> ShuffledStealers<'a> {
|
||||
// All active workers except the specified one are candidate for stealing.
|
||||
let mut candidates = self.active_workers.load(Ordering::Relaxed);
|
||||
if let Some(excluded_worker_id) = excluded_worker_id {
|
||||
candidates &= !(1 << excluded_worker_id);
|
||||
}
|
||||
|
||||
ShuffledStealers::new(candidates, &self.stealers, rng)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "dev-logs")]
|
||||
impl Drop for PoolManager {
|
||||
fn drop(&mut self) {
|
||||
println!("Thread launch count: {:?}", self.record.get());
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over active workers that yields their associated stealer,
|
||||
/// starting from a randomly selected active worker.
|
||||
pub(super) struct ShuffledStealers<'a> {
|
||||
stealers: &'a [Stealer],
|
||||
// A bit-rotated bit field of the remaining candidate workers to steal from.
|
||||
// If set, the LSB represents the next candidate.
|
||||
candidates: usize,
|
||||
next_candidate: usize, // index of the next candidate
|
||||
}
|
||||
impl<'a> ShuffledStealers<'a> {
|
||||
/// A new `ShuffledStealer` iterator initialized at a randomly selected
|
||||
/// active worker.
|
||||
fn new(candidates: usize, stealers: &'a [Stealer], rng: &'_ rng::Rng) -> Self {
|
||||
let (candidates, next_candidate) = if candidates == 0 {
|
||||
(0, 0)
|
||||
} else {
|
||||
let next_candidate = bit::find_bit(candidates, |count| {
|
||||
rng.gen_bounded(count as u64) as usize + 1
|
||||
});
|
||||
|
||||
// Right-rotate the candidates so that the bit corresponding to the
|
||||
// randomly selected worker becomes the LSB.
|
||||
let candidate_count = stealers.len();
|
||||
let lower_bits = candidates & ((1 << next_candidate) - 1);
|
||||
let candidates =
|
||||
(candidates >> next_candidate) | (lower_bits << (candidate_count - next_candidate));
|
||||
|
||||
(candidates, next_candidate)
|
||||
};
|
||||
|
||||
Self {
|
||||
stealers,
|
||||
candidates,
|
||||
next_candidate,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Iterator for ShuffledStealers<'a> {
|
||||
type Item = &'a Stealer;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
if self.candidates == 0 {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Clear the bit corresponding to the current candidate worker.
|
||||
self.candidates &= !1;
|
||||
|
||||
let current_candidate = self.next_candidate;
|
||||
|
||||
if self.candidates != 0 {
|
||||
// Locate the next candidate worker and make it the LSB.
|
||||
let shift = self.candidates.trailing_zeros();
|
||||
self.candidates >>= shift;
|
||||
|
||||
// Update the next candidate.
|
||||
self.next_candidate += shift as usize;
|
||||
if self.next_candidate >= self.stealers.len() {
|
||||
self.next_candidate -= self.stealers.len();
|
||||
}
|
||||
}
|
||||
|
||||
Some(&self.stealers[current_candidate])
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "dev-logs")]
|
||||
#[derive(Debug)]
|
||||
struct Record {
|
||||
stats: Vec<AtomicUsize>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "dev-logs")]
|
||||
impl Record {
|
||||
fn new(worker_count: usize) -> Self {
|
||||
let mut stats = Vec::new();
|
||||
stats.resize_with(worker_count, Default::default);
|
||||
Self { stats }
|
||||
}
|
||||
fn increment(&self, worker_id: usize) {
|
||||
self.stats[worker_id].fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
fn get(&self) -> Vec<usize> {
|
||||
self.stats
|
||||
.iter()
|
||||
.map(|s| s.load(Ordering::Relaxed))
|
||||
.collect()
|
||||
}
|
||||
}
|
398
asynchronix/src/executor/task.rs
Normal file
398
asynchronix/src/executor/task.rs
Normal file
@ -0,0 +1,398 @@
|
||||
extern crate alloc;
|
||||
|
||||
use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
|
||||
use std::future::Future;
|
||||
use std::mem::{self, ManuallyDrop};
|
||||
use std::task::{RawWaker, RawWakerVTable};
|
||||
|
||||
use crate::loom_exports::cell::UnsafeCell;
|
||||
use crate::loom_exports::sync::atomic::{self, AtomicU64, Ordering};
|
||||
|
||||
mod cancel_token;
|
||||
mod promise;
|
||||
mod runnable;
|
||||
mod util;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub(crate) use cancel_token::CancelToken;
|
||||
pub(crate) use promise::Promise;
|
||||
pub(crate) use runnable::Runnable;
|
||||
|
||||
use self::util::{runnable_exists, RunOnDrop};
|
||||
|
||||
/// Flag indicating that the future has not been polled to completion yet.
|
||||
const POLLING: u64 = 1 << 0;
|
||||
/// Flag indicating that the task has been cancelled or that the output has
|
||||
/// already been moved out.
|
||||
const CLOSED: u64 = 1 << 1;
|
||||
/// A single reference count increment.
|
||||
const REF_INC: u64 = 1 << 2;
|
||||
/// A single wake count increment.
|
||||
const WAKE_INC: u64 = 1 << 33;
|
||||
/// Reference count mask.
|
||||
const REF_MASK: u64 = !(REF_INC - 1) & (WAKE_INC - 1);
|
||||
/// Wake count mask.
|
||||
const WAKE_MASK: u64 = !(WAKE_INC - 1);
|
||||
/// Critical value of the reference count at which preventive measures must be
|
||||
/// enacted to prevent counter overflow.
|
||||
const REF_CRITICAL: u64 = (REF_MASK / 2) & REF_MASK;
|
||||
/// Critical value of the wake count at which preventive measures must be
|
||||
/// enacted to prevent counter overflow.
|
||||
const WAKE_CRITICAL: u64 = (WAKE_MASK / 2) & WAKE_MASK;
|
||||
|
||||
/// Either a future, its output, or uninitialized (empty).
|
||||
union TaskCore<F: Future> {
|
||||
/// Field present during the `Polling` and the `Wind-down` phases.
|
||||
future: ManuallyDrop<F>,
|
||||
|
||||
/// Field present during the `Completed` phase.
|
||||
output: ManuallyDrop<F::Output>,
|
||||
}
|
||||
|
||||
/// A task.
|
||||
///
|
||||
/// A task contains both the scheduling function and the future to be polled (or
|
||||
/// its output if available). `Waker`, `Runnable`, `Promise` and `CancelToken`
|
||||
/// are all type-erased (fat) pointers to a `Task`. The task is automatically
|
||||
/// deallocated when all the formers have been dropped.
|
||||
///
|
||||
/// The lifetime of a task involves up to 4 phases:
|
||||
/// - `Polling` phase: the future needs to be polled,
|
||||
/// - `Completed` phase: the future has been polled to completion and its output
|
||||
/// is available,
|
||||
/// - `Wind-down` phase: the task has been cancelled while it was already
|
||||
/// scheduled for processing, so the future had to be kept temporarily alive
|
||||
/// to avoid a race; the `Closed` phase will be entered only when the
|
||||
/// scheduled task is processed,
|
||||
/// - `Closed` phase: neither the future nor its output are available, either
|
||||
/// because the task has been cancelled or because the output has been moved
|
||||
/// out.
|
||||
///
|
||||
/// It is possible to move from `Polling` to `Completed`, `Wind-down` or
|
||||
/// `Closed`, but the only possible transition from `Wind-down` and from
|
||||
/// `Completed` is to `Closed`.
|
||||
///
|
||||
/// The different states and sub-states and their corresponding flags are
|
||||
/// summarized below:
|
||||
///
|
||||
/// | Phase | CLOSED | POLLING | WAKE_COUNT | Runnable exists? |
|
||||
/// |---------------------|--------|---------|------------|------------------|
|
||||
/// | Polling (idle) | 0 | 1 | 0 | No |
|
||||
/// | Polling (scheduled) | 0 | 1 | ≠0 | Yes |
|
||||
/// | Completed | 0 | 0 | any | No |
|
||||
/// | Wind-down | 1 | 1 | any | Yes |
|
||||
/// | Closed | 1 | 0 | any | No |
|
||||
///
|
||||
/// A `Runnable` is a reference to a task that has been scheduled. There can be
|
||||
/// at most one `Runnable` at any given time.
|
||||
///
|
||||
/// `WAKE_COUNT` is a counter incremented each time the task is awaken and reset
|
||||
/// each time the `Runnable` has finished polling the task. The waker that
|
||||
/// increments the wake count from 0 to 1 is responsible for creating and
|
||||
/// scheduling a new `Runnable`.
|
||||
///
|
||||
/// The state includes as well a reference count `REF_COUNT` that accounts for
|
||||
/// the `Promise`, the `CancelToken` and all `Waker`s. The `Runnable` is _not_
|
||||
/// included in `REF_COUNT` because its existence can be inferred from `CLOSED`,
|
||||
/// `POLLING` and `WAKE_COUNT` (see table above).
|
||||
struct Task<F: Future, S, T> {
|
||||
/// State of the task.
|
||||
///
|
||||
/// The state has the following layout, where bit 0 is the LSB and bit 63 is
|
||||
/// the MSB:
|
||||
///
|
||||
/// | 33-63 | 2-32 | 1 | 0 |
|
||||
/// |------------|-----------|--------|---------|
|
||||
/// | WAKE_COUNT | REF_COUNT | CLOSED | POLLING |
|
||||
state: AtomicU64,
|
||||
|
||||
/// The future, its output, or nothing.
|
||||
core: UnsafeCell<TaskCore<F>>,
|
||||
|
||||
/// The task scheduling function.
|
||||
schedule_fn: S,
|
||||
|
||||
/// An arbitrary `Clone` tag that is passed to the scheduling function.
|
||||
tag: T,
|
||||
}
|
||||
|
||||
impl<F, S, T> Task<F, S, T>
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
|
||||
Self::clone_waker,
|
||||
Self::wake_by_val,
|
||||
Self::wake_by_ref,
|
||||
Self::drop_waker,
|
||||
);
|
||||
|
||||
/// Clones a waker.
|
||||
unsafe fn clone_waker(ptr: *const ()) -> RawWaker {
|
||||
let this = &*(ptr as *const Self);
|
||||
|
||||
let ref_count = this.state.fetch_add(REF_INC, Ordering::Relaxed) & REF_MASK;
|
||||
if ref_count > REF_CRITICAL {
|
||||
panic!("Attack of the clones: the waker was cloned too many times");
|
||||
}
|
||||
|
||||
RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE)
|
||||
}
|
||||
|
||||
/// Wakes the task by value.
|
||||
unsafe fn wake_by_val(ptr: *const ()) {
|
||||
// Verify that the scheduling function does not capture any variable.
|
||||
//
|
||||
// It is always possible for the `Runnable` scheduled in the call to
|
||||
// `wake` to be called and complete its execution before the scheduling
|
||||
// call returns. For efficiency reasons, the reference count is
|
||||
// preemptively decremented, which implies that the `Runnable` could
|
||||
// prematurely drop and deallocate this task. By making sure that the
|
||||
// schedule function is zero-sized, we ensure that premature
|
||||
// deallocation is safe since the scheduling function does not access
|
||||
// any allocated data.
|
||||
if mem::size_of::<S>() != 0 {
|
||||
// Note: a static assert is not possible as `S` is defined in the
|
||||
// outer scope.
|
||||
Self::drop_waker(ptr);
|
||||
panic!("Scheduling functions with captured variables are not supported");
|
||||
}
|
||||
|
||||
// Wake the task, decreasing at the same time the reference count.
|
||||
let state = Self::wake(ptr, WAKE_INC - REF_INC);
|
||||
|
||||
// Deallocate the task if this waker is the last reference to the task,
|
||||
// meaning that the reference count was 1 and the `POLLING` flag was
|
||||
// cleared. Note that if the `POLLING` flag was set then a `Runnable`
|
||||
// must exist.
|
||||
|
||||
if state & (REF_MASK | POLLING) == REF_INC {
|
||||
// Ensure that the newest state of the task output (if any) is
|
||||
// visible before it is dropped.
|
||||
//
|
||||
// Ordering: Acquire ordering is necessary to synchronize with the
|
||||
// Release ordering in all previous reference count decrements
|
||||
// and/or in the wake count reset (the latter is equivalent to a
|
||||
// reference count decrement for a `Runnable`).
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
let this = &*(ptr as *const Self);
|
||||
|
||||
// Set a drop guard to ensure that the task is deallocated whether
|
||||
// or not `output` panics when dropped.
|
||||
let _drop_guard = RunOnDrop::new(|| {
|
||||
dealloc(ptr as *mut u8, Layout::new::<Self>());
|
||||
});
|
||||
|
||||
if state & CLOSED == 0 {
|
||||
// Since the `CLOSED` and `POLLING` flags are both cleared, the
|
||||
// output is present and must be dropped.
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).output));
|
||||
}
|
||||
// Else the `CLOSED` flag is set and the `POLLING` flag is cleared
|
||||
// so the task is already in the `Closed` phase.
|
||||
}
|
||||
}
|
||||
|
||||
/// Wakes the task by reference.
|
||||
unsafe fn wake_by_ref(ptr: *const ()) {
|
||||
// Wake the task.
|
||||
Self::wake(ptr, WAKE_INC);
|
||||
}
|
||||
|
||||
/// Wakes the task, either by value or by reference.
|
||||
#[inline(always)]
|
||||
unsafe fn wake(ptr: *const (), state_delta: u64) -> u64 {
|
||||
let this = &*(ptr as *const Self);
|
||||
|
||||
// Increment the wake count and, if woken by value, decrement the
|
||||
// reference count at the same time.
|
||||
//
|
||||
// Ordering: Release ordering is necessary to synchronize with either
|
||||
// the Acquire load or with the RMW in `Runnable::run`, which ensures
|
||||
// that all memory operations performed by the user before the call to
|
||||
// `wake` will be visible when the future is polled. Note that there is
|
||||
// no need to use AcqRel ordering to synchronize with all calls to
|
||||
// `wake` that precede the call to `Runnable::run`. This is because,
|
||||
// according to the C++ memory model, an RMW takes part in a Release
|
||||
// sequence irrespective of its ordering. The below RMW also happens to
|
||||
// takes part in another Release sequence: it allows the Acquire-Release
|
||||
// RMW that zeroes the wake count in the previous call to
|
||||
// `Runnable::run` to synchronizes with the initial Acquire load of the
|
||||
// state in the next call `Runnable::run` (or the Acquire fence in
|
||||
// `Runnable::cancel`), thus ensuring that the next `Runnable` sees the
|
||||
// newest state of the future.
|
||||
let state = this.state.fetch_add(state_delta, Ordering::Release);
|
||||
|
||||
if state & WAKE_MASK > WAKE_CRITICAL {
|
||||
panic!("The task was woken too many times: {:0x}", state);
|
||||
}
|
||||
|
||||
// Schedule the task if it is in the `Polling` phase but is not
|
||||
// scheduled yet.
|
||||
if state & (WAKE_MASK | CLOSED | POLLING) == POLLING {
|
||||
// Safety: calling `new_unchecked` is safe since: there is no other
|
||||
// `Runnable` running (the wake count was 0, the `POLLING` flag was
|
||||
// set, the `CLOSED` flag was cleared); the wake count is now 1; the
|
||||
// `POLLING` flag is set; the `CLOSED` flag is cleared; the task
|
||||
// contains a live future.
|
||||
|
||||
let runnable = Runnable::new_unchecked(ptr as *const Self);
|
||||
(this.schedule_fn)(runnable, this.tag.clone());
|
||||
}
|
||||
|
||||
state
|
||||
}
|
||||
|
||||
/// Drops a waker.
|
||||
unsafe fn drop_waker(ptr: *const ()) {
|
||||
let this = &*(ptr as *const Self);
|
||||
|
||||
// Ordering: Release ordering is necessary to synchronize with the
|
||||
// Acquire fence in the drop handler of the last reference to the task
|
||||
// and to make sure that all previous operations on the `core` member
|
||||
// are visible when it is dropped.
|
||||
let state = this.state.fetch_sub(REF_INC, Ordering::Release);
|
||||
|
||||
// Deallocate the task if this waker was the last reference to the task.
|
||||
if state & REF_MASK == REF_INC && !runnable_exists(state) {
|
||||
// Ensure that the newest state of the `core` member is visible
|
||||
// before it is dropped.
|
||||
//
|
||||
// Ordering: Acquire ordering is necessary to synchronize with the
|
||||
// Release ordering in all previous reference count decrements
|
||||
// and/or in the wake count reset (the latter is equivalent to a
|
||||
// reference count decrement for a `Runnable`).
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// Set a drop guard to ensure that the task is deallocated whether
|
||||
// or not the `core` member panics when dropped.
|
||||
let _drop_guard = RunOnDrop::new(|| {
|
||||
dealloc(ptr as *mut u8, Layout::new::<Self>());
|
||||
});
|
||||
|
||||
if state & POLLING == POLLING {
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).future));
|
||||
} else if state & CLOSED == 0 {
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).output));
|
||||
}
|
||||
// Else the `CLOSED` flag is set but the `POLLING` flag is cleared
|
||||
// so the future was already dropped.
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a task.
|
||||
///
|
||||
/// An arbitrary tag can be attached to the task, a clone of which will be
|
||||
/// passed to the scheduling function each time it is called.
|
||||
|
||||
/// The returned `Runnable` must be scheduled by the user.
|
||||
pub(crate) fn spawn<F, S, T>(
|
||||
future: F,
|
||||
schedule_fn: S,
|
||||
tag: T,
|
||||
) -> (Promise<F::Output>, Runnable, CancelToken)
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
// Create a task with preemptively incremented reference and wake counts to
|
||||
// account for the returned `Promise`, `CancelToken` and `Runnable` (a
|
||||
// non-zero wake count with the `POLLING` flag set indicates that there is a
|
||||
// live `Runnable`).
|
||||
let task = Task {
|
||||
state: AtomicU64::new((2 * REF_INC) | WAKE_INC | POLLING),
|
||||
core: UnsafeCell::new(TaskCore {
|
||||
future: ManuallyDrop::new(future),
|
||||
}),
|
||||
schedule_fn,
|
||||
tag,
|
||||
};
|
||||
|
||||
// Pin the task with its future to the heap.
|
||||
unsafe {
|
||||
let layout = Layout::new::<Task<F, S, T>>();
|
||||
let ptr = alloc(layout) as *mut Task<F, S, T>;
|
||||
if ptr.is_null() {
|
||||
handle_alloc_error(layout);
|
||||
}
|
||||
*ptr = task;
|
||||
|
||||
// Safety: this is safe since the task was allocated with the global
|
||||
// allocator, there is no other `Runnable` running since the task was
|
||||
// just created, the wake count is 1, the `POLLING` flag is set, the
|
||||
// `CLOSED` flag is cleared and `core` contains a future.
|
||||
let runnable = Runnable::new_unchecked(ptr);
|
||||
|
||||
// Safety: this is safe since the task was allocated with the global
|
||||
// allocator and the reference count is 2.
|
||||
let promise = Promise::new_unchecked(ptr);
|
||||
let cancel_token = CancelToken::new_unchecked(ptr);
|
||||
|
||||
(promise, runnable, cancel_token)
|
||||
}
|
||||
}
|
||||
|
||||
/// Spawns a task which output will never be retrieved.
|
||||
///
|
||||
/// This is mostly useful to avoid undue reference counting for futures that
|
||||
/// return a `()` type.
|
||||
///
|
||||
/// An arbitrary tag can be attached to the task, a clone of which will be
|
||||
/// passed to the scheduling function each time it is called.
|
||||
///
|
||||
/// The returned `Runnable` must be scheduled by the user.
|
||||
pub(crate) fn spawn_and_forget<F, S, T>(
|
||||
future: F,
|
||||
schedule_fn: S,
|
||||
tag: T,
|
||||
) -> (Runnable, CancelToken)
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
// Create a task with preemptively incremented reference and wake counts to
|
||||
// account for the returned `CancelToken` and `Runnable` (a non-zero wake
|
||||
// count with the `POLLING` flag set indicates that there is a live
|
||||
// `Runnable`).
|
||||
let task = Task {
|
||||
state: AtomicU64::new(REF_INC | WAKE_INC | POLLING),
|
||||
core: UnsafeCell::new(TaskCore {
|
||||
future: ManuallyDrop::new(future),
|
||||
}),
|
||||
schedule_fn,
|
||||
tag,
|
||||
};
|
||||
|
||||
// Pin the task with its future to the heap.
|
||||
unsafe {
|
||||
let layout = Layout::new::<Task<F, S, T>>();
|
||||
let ptr = alloc(layout) as *mut Task<F, S, T>;
|
||||
if ptr.is_null() {
|
||||
handle_alloc_error(layout);
|
||||
}
|
||||
*ptr = task;
|
||||
|
||||
// Safety: this is safe since the task was allocated with the global
|
||||
// allocator, there is no other `Runnable` running since the task was
|
||||
// just created, the wake count is 1, the `POLLING` flag is set, the
|
||||
// `CLOSED` flag is cleared and `core` contains a future.
|
||||
let runnable = Runnable::new_unchecked(ptr);
|
||||
|
||||
// Safety: this is safe since the task was allocated with the global
|
||||
// allocator and the reference count is 1.
|
||||
let cancel_token = CancelToken::new_unchecked(ptr);
|
||||
|
||||
(runnable, cancel_token)
|
||||
}
|
||||
}
|
222
asynchronix/src/executor/task/cancel_token.rs
Normal file
222
asynchronix/src/executor/task/cancel_token.rs
Normal file
@ -0,0 +1,222 @@
|
||||
extern crate alloc;
|
||||
|
||||
use std::alloc::{dealloc, Layout};
|
||||
use std::future::Future;
|
||||
use std::mem::ManuallyDrop;
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
|
||||
use crate::loom_exports::sync::atomic::{self, Ordering};
|
||||
|
||||
use super::runnable::Runnable;
|
||||
use super::util::{runnable_exists, RunOnDrop};
|
||||
use super::Task;
|
||||
use super::{CLOSED, POLLING, REF_INC, REF_MASK};
|
||||
|
||||
/// Virtual table for a `CancelToken`.
|
||||
#[derive(Debug)]
|
||||
struct VTable {
|
||||
cancel: unsafe fn(*const ()),
|
||||
drop: unsafe fn(*const ()),
|
||||
}
|
||||
|
||||
/// Cancels a pending task.
|
||||
///
|
||||
/// If the task is completed, nothing is done. If the task is not completed
|
||||
/// but not currently scheduled (no `Runnable` exist) then the future is
|
||||
/// dropped immediately. Otherwise, the future will be dropped at a later
|
||||
/// time by the scheduled `Runnable` once it runs.
|
||||
unsafe fn cancel<F: Future, S, T>(ptr: *const ())
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
let this = &*(ptr as *const Task<F, S, T>);
|
||||
|
||||
// Enter the `Closed` or `Wind-down` phase if the tasks is not
|
||||
// completed.
|
||||
//
|
||||
// Ordering: Acquire ordering is necessary to synchronize with any
|
||||
// operation that modified or dropped the future or output. This ensures
|
||||
// that the future or output can be safely dropped or that the task can
|
||||
// be safely deallocated if necessary. The Release ordering synchronizes
|
||||
// with any of the Acquire atomic fences and ensure that this atomic
|
||||
// access is fully completed upon deallocation.
|
||||
let state = this
|
||||
.state
|
||||
.fetch_update(Ordering::AcqRel, Ordering::Relaxed, |s| {
|
||||
if s & POLLING == 0 {
|
||||
// The task has completed or is closed so there is no need
|
||||
// to drop the future or output and the reference count can
|
||||
// be decremented right away.
|
||||
Some(s - REF_INC)
|
||||
} else if runnable_exists(s) {
|
||||
// A `Runnable` exists so the future cannot be dropped (this
|
||||
// will be done by the `Runnable`) and the reference count
|
||||
// can be decremented right away.
|
||||
Some((s | CLOSED) - REF_INC)
|
||||
} else {
|
||||
// The future or the output needs to be dropped so the
|
||||
// reference count cannot be decremented just yet, otherwise
|
||||
// another reference could deallocate the task before the
|
||||
// drop is complete.
|
||||
Some((s | CLOSED) & !POLLING)
|
||||
}
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
if runnable_exists(state) {
|
||||
// The task is in the `Wind-down` phase so the cancellation is now
|
||||
// the responsibility of the current `Runnable`.
|
||||
return;
|
||||
}
|
||||
|
||||
if state & POLLING == 0 {
|
||||
// Deallocate the task if this was the last reference.
|
||||
if state & REF_MASK == REF_INC {
|
||||
// Ensure that all atomic accesses to the state are visible.
|
||||
|
||||
// FIXME: the fence does not seem necessary since the fetch_update
|
||||
// uses AcqRel.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with all Release
|
||||
// operations that decrement the number of references to the task.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// Set a drop guard to ensure that the task is deallocated,
|
||||
// whether or not the output panics when dropped.
|
||||
let _drop_guard = RunOnDrop::new(|| {
|
||||
dealloc(ptr as *mut u8, Layout::new::<Task<F, S, T>>());
|
||||
});
|
||||
|
||||
// Drop the output if any.
|
||||
if state & CLOSED == 0 {
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).output));
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Set a drop guard to ensure that reference count is decremented and
|
||||
// the task is deallocated if this is the last reference, whether or not
|
||||
// the future panics when dropped.
|
||||
let _drop_guard = RunOnDrop::new(|| {
|
||||
// Ordering: Release ordering is necessary to ensure that the drop
|
||||
// of the future or output is visible when the last reference
|
||||
// deallocates the task.
|
||||
let state = this.state.fetch_sub(REF_INC, Ordering::Release);
|
||||
if state & REF_MASK == REF_INC {
|
||||
// Ensure that all atomic accesses to the state are visible.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with all Release
|
||||
// operations that decrement the number of references to the
|
||||
// task.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
dealloc(ptr as *mut u8, Layout::new::<Task<F, S, T>>());
|
||||
}
|
||||
});
|
||||
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).future));
|
||||
}
|
||||
|
||||
/// Drops the token without cancelling the task.
|
||||
unsafe fn drop<F: Future, S, T>(ptr: *const ())
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
let this = &*(ptr as *const Task<F, S, T>);
|
||||
|
||||
// Decrement the reference count.
|
||||
//
|
||||
// Ordering: the Release ordering synchronizes with any of the Acquire
|
||||
// atomic fences and ensure that this atomic access is fully completed
|
||||
// upon deallocation.
|
||||
let state = this.state.fetch_sub(REF_INC, Ordering::Release);
|
||||
|
||||
// Deallocate the task if this token was the last reference to the task.
|
||||
if state & REF_MASK == REF_INC && !runnable_exists(state) {
|
||||
// Ensure that the newest state of the future or output is visible
|
||||
// before it is dropped.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with all Release
|
||||
// operations that decrement the number of references to the task.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// Set a drop guard to ensure that the task is deallocated whether
|
||||
// or not the future or output panics when dropped.
|
||||
let _drop_guard = RunOnDrop::new(|| {
|
||||
dealloc(ptr as *mut u8, Layout::new::<Task<F, S, T>>());
|
||||
});
|
||||
|
||||
if state & POLLING == POLLING {
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).future));
|
||||
} else if state & CLOSED == 0 {
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).output));
|
||||
}
|
||||
// Else the `CLOSED` flag is set but the `POLLING` flag is cleared
|
||||
// so the future was already dropped.
|
||||
}
|
||||
}
|
||||
|
||||
/// A token that can be used to cancel a task.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct CancelToken {
|
||||
task: *const (),
|
||||
vtable: &'static VTable,
|
||||
}
|
||||
|
||||
impl CancelToken {
|
||||
/// Creates a `CancelToken`.
|
||||
///
|
||||
/// Safety: this is safe provided that:
|
||||
///
|
||||
/// - the task pointer points to a live task allocated with the global
|
||||
/// allocator,
|
||||
/// - the reference count has been incremented to account for this new task
|
||||
/// reference.
|
||||
pub(super) unsafe fn new_unchecked<F: Future, S, T>(task: *const Task<F, S, T>) -> Self
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
Self {
|
||||
task: task as *const (),
|
||||
vtable: &VTable {
|
||||
cancel: cancel::<F, S, T>,
|
||||
drop: drop::<F, S, T>,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancels the task.
|
||||
///
|
||||
/// If the task is completed, nothing is done. If the task is not completed
|
||||
/// but not currently scheduled (no `Runnable` exist) then the future is
|
||||
/// dropped immediately. Otherwise, the future will be dropped at a later
|
||||
/// time by the scheduled `Runnable` once it runs.
|
||||
pub(crate) fn cancel(self) {
|
||||
// Prevent the drop handler from being called, as it would call
|
||||
// `drop_token` on the inner field.
|
||||
let this = ManuallyDrop::new(self);
|
||||
|
||||
unsafe { (this.vtable.cancel)(this.task) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for CancelToken {
|
||||
fn drop(&mut self) {
|
||||
unsafe { (self.vtable.drop)(self.task) }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for CancelToken {}
|
||||
impl UnwindSafe for CancelToken {}
|
||||
impl RefUnwindSafe for CancelToken {}
|
203
asynchronix/src/executor/task/promise.rs
Normal file
203
asynchronix/src/executor/task/promise.rs
Normal file
@ -0,0 +1,203 @@
|
||||
extern crate alloc;
|
||||
|
||||
use std::alloc::{dealloc, Layout};
|
||||
use std::future::Future;
|
||||
use std::mem::ManuallyDrop;
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
|
||||
use crate::loom_exports::sync::atomic::{self, Ordering};
|
||||
|
||||
use super::runnable::Runnable;
|
||||
use super::util::{runnable_exists, RunOnDrop};
|
||||
use super::Task;
|
||||
use super::{CLOSED, POLLING, REF_INC, REF_MASK};
|
||||
|
||||
/// Virtual table for a `Promise`.
|
||||
#[derive(Debug)]
|
||||
struct VTable<U: Send + 'static> {
|
||||
poll: unsafe fn(*const ()) -> Stage<U>,
|
||||
drop: unsafe fn(*const ()),
|
||||
}
|
||||
|
||||
/// Retrieves the output of the task if ready.
|
||||
unsafe fn poll<F: Future, S, T>(ptr: *const ()) -> Stage<F::Output>
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
let this = &*(ptr as *const Task<F, S, T>);
|
||||
|
||||
// Set the `CLOSED` flag if the task is in the `Completed` phase.
|
||||
//
|
||||
// Ordering: Acquire ordering is necessary to synchronize with the
|
||||
// operation that modified or dropped the future or output. This ensures
|
||||
// that the newest state of the output is visible before it is moved
|
||||
// out, or that the future can be safely dropped when the promised is
|
||||
// dropped if the promise is the last reference to the task.
|
||||
let state = this
|
||||
.state
|
||||
.fetch_update(Ordering::Acquire, Ordering::Relaxed, |s| {
|
||||
if s & (POLLING | CLOSED) == 0 {
|
||||
Some(s | CLOSED)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
|
||||
if let Err(s) = state {
|
||||
if s & CLOSED == CLOSED {
|
||||
// The task is either in the `Wind-down` or `Closed` phase.
|
||||
return Stage::Cancelled;
|
||||
} else {
|
||||
// The task is in the `Polling` phase.
|
||||
return Stage::Pending;
|
||||
};
|
||||
}
|
||||
|
||||
let output = this.core.with_mut(|c| ManuallyDrop::take(&mut (*c).output));
|
||||
|
||||
Stage::Ready(output)
|
||||
}
|
||||
|
||||
/// Drops the promise.
|
||||
unsafe fn drop<F: Future, S, T>(ptr: *const ())
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
let this = &*(ptr as *const Task<F, S, T>);
|
||||
|
||||
// Decrement the reference count.
|
||||
//
|
||||
// Ordering: Release ordering is necessary to ensure that if the output
|
||||
// was moved out by using `poll`, then the move has completed when the
|
||||
// last reference deallocates the task.
|
||||
let state = this.state.fetch_sub(REF_INC, Ordering::Release);
|
||||
|
||||
// Deallocate the task if this token was the last reference to the task.
|
||||
if state & REF_MASK == REF_INC && !runnable_exists(state) {
|
||||
// Ensure that the newest state of the future or output is visible
|
||||
// before it is dropped.
|
||||
//
|
||||
// Ordering: Acquire ordering is necessary to synchronize with the
|
||||
// Release ordering in all previous reference count decrements
|
||||
// and/or in the wake count reset (the latter is equivalent to a
|
||||
// reference count decrement for a `Runnable`).
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// Set a drop guard to ensure that the task is deallocated whether
|
||||
// or not the `core` member panics when dropped.
|
||||
let _drop_guard = RunOnDrop::new(|| {
|
||||
dealloc(ptr as *mut u8, Layout::new::<Task<F, S, T>>());
|
||||
});
|
||||
|
||||
if state & POLLING == POLLING {
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).future));
|
||||
} else if state & CLOSED == 0 {
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).output));
|
||||
}
|
||||
// Else the `CLOSED` flag is set but the `POLLING` flag is cleared
|
||||
// so the future was already dropped.
|
||||
}
|
||||
}
|
||||
|
||||
/// The stage of progress of a promise.
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Ord, PartialOrd, Hash)]
|
||||
pub(crate) enum Stage<T> {
|
||||
/// The task has completed.
|
||||
Ready(T),
|
||||
/// The task is still being processed.
|
||||
Pending,
|
||||
/// The task has been cancelled.
|
||||
Cancelled,
|
||||
}
|
||||
|
||||
impl<U> Stage<U> {
|
||||
/// Maps a `Stage<U>` to `Stage<V>` by applying a function to a contained value.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn map<V, F>(self, f: F) -> Stage<V>
|
||||
where
|
||||
F: FnOnce(U) -> V,
|
||||
{
|
||||
match self {
|
||||
Stage::Ready(t) => Stage::Ready(f(t)),
|
||||
Stage::Pending => Stage::Pending,
|
||||
Stage::Cancelled => Stage::Cancelled,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if the promise is a [`Stage::Ready`] value.
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
pub(crate) fn is_ready(&self) -> bool {
|
||||
matches!(*self, Stage::Ready(_))
|
||||
}
|
||||
|
||||
/// Returns `true` if the promise is a [`Stage::Pending`] value.
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
pub(crate) fn is_pending(&self) -> bool {
|
||||
matches!(*self, Stage::Pending)
|
||||
}
|
||||
|
||||
/// Returns `true` if the promise is a [`Stage::Cancelled`] value.
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
pub(crate) fn is_cancelled(&self) -> bool {
|
||||
matches!(*self, Stage::Cancelled)
|
||||
}
|
||||
}
|
||||
|
||||
/// A promise that can poll a task's output of type `U`.
|
||||
///
|
||||
/// Note that dropping a promise does not cancel the task.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Promise<U: Send + 'static> {
|
||||
task: *const (),
|
||||
vtable: &'static VTable<U>,
|
||||
}
|
||||
|
||||
impl<U: Send + 'static> Promise<U> {
|
||||
/// Creates a `Promise`.
|
||||
///
|
||||
/// Safety: this is safe provided that:
|
||||
///
|
||||
/// - the task pointer points to a live task allocated with the global
|
||||
/// allocator,
|
||||
/// - the reference count has been incremented to account for this new task
|
||||
/// reference.
|
||||
pub(super) unsafe fn new_unchecked<F, S, T>(task: *const Task<F, S, T>) -> Self
|
||||
where
|
||||
F: Future<Output = U> + Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
Self {
|
||||
task: task as *const (),
|
||||
vtable: &VTable::<U> {
|
||||
poll: poll::<F, S, T>,
|
||||
drop: drop::<F, S, T>,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the output of the task if ready.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn poll(&self) -> Stage<U> {
|
||||
unsafe { (self.vtable.poll)(self.task) }
|
||||
}
|
||||
}
|
||||
|
||||
impl<U: Send + 'static> Drop for Promise<U> {
|
||||
fn drop(&mut self) {
|
||||
unsafe { (self.vtable.drop)(self.task) }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<U: Send + 'static> Send for Promise<U> {}
|
||||
impl<U: Send + 'static> UnwindSafe for Promise<U> {}
|
||||
impl<U: Send + 'static> RefUnwindSafe for Promise<U> {}
|
322
asynchronix/src/executor/task/runnable.rs
Normal file
322
asynchronix/src/executor/task/runnable.rs
Normal file
@ -0,0 +1,322 @@
|
||||
extern crate alloc;
|
||||
|
||||
use std::alloc::{dealloc, Layout};
|
||||
use std::future::Future;
|
||||
use std::mem::{self, ManuallyDrop};
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll, RawWaker, Waker};
|
||||
|
||||
use crate::loom_exports::debug_or_loom_assert;
|
||||
use crate::loom_exports::sync::atomic::{self, AtomicU64, Ordering};
|
||||
|
||||
use super::util::RunOnDrop;
|
||||
use super::Task;
|
||||
use super::{CLOSED, POLLING, REF_MASK, WAKE_MASK};
|
||||
|
||||
/// Virtual table for a `Runnable`.
|
||||
#[derive(Debug)]
|
||||
struct VTable {
|
||||
run: unsafe fn(*const ()),
|
||||
cancel: unsafe fn(*const ()),
|
||||
}
|
||||
|
||||
/// Polls the inner future.
|
||||
unsafe fn run<F: Future, S, T>(ptr: *const ())
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
let this = &*(ptr as *const Task<F, S, T>);
|
||||
|
||||
// A this point, the task cannot be in the `Completed` phase, otherwise
|
||||
// it would not have been scheduled in the first place. It could,
|
||||
// however, have been cancelled and transitioned from `Polling` to
|
||||
// `Wind-down` after it was already scheduled. It is possible that in
|
||||
// such case the `CLOSED` flag may not be visible when loading the
|
||||
// state, but this is not a problem: when a task is cancelled while
|
||||
// already scheduled (i.e. while the wake count is non-zero), its future
|
||||
// is kept alive so even if the state loaded is stale, the worse that
|
||||
// can happen is that the future will be unnecessarily polled.
|
||||
//
|
||||
// It is worth mentioning that, in order to detect if the task was
|
||||
// awaken while polled, other executors reset a notification flag with
|
||||
// an RMW when entering `run`. The idea here is to avoid such RMW and
|
||||
// instead load a wake count. Only once the task has been polled, an RMW
|
||||
// checks the wake count again to detect if the task was notified in the
|
||||
// meantime. This method may be slightly more prone to spurious false
|
||||
// positives but is much faster (1 vs 2 RMWs) and still prevent the
|
||||
// occurrence of lost wake-ups.
|
||||
|
||||
// Load the state.
|
||||
//
|
||||
// Ordering: the below Acquire load synchronizes with the Release
|
||||
// operation at the end of the call to `run` by the previous `Runnable`
|
||||
// and ensures that the new state of the future stored by the previous
|
||||
// call to `run` is visible. This synchronization exists because the RMW
|
||||
// in the call to `Task::wake` or `Task::wake_by_ref` that scheduled
|
||||
// this `Runnable` establishes a Release sequence. This load also
|
||||
// synchronizes with the Release operation in `wake` and ensures that
|
||||
// all memory operations performed by their callers are visible. Since
|
||||
// this is a simple load, it may be stale and some wake requests may not
|
||||
// be visible yet, but the post-polling RMW will later check if all wake
|
||||
// requests were serviced.
|
||||
let mut state = this.state.load(Ordering::Acquire);
|
||||
let mut wake_count = state & WAKE_MASK;
|
||||
|
||||
debug_or_loom_assert!(state & POLLING == POLLING);
|
||||
|
||||
loop {
|
||||
// Drop the future if the phase has transitioned to `Wind-down`.
|
||||
if state & CLOSED == CLOSED {
|
||||
cancel::<F, S, T>(ptr);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Poll the task.
|
||||
let raw_waker = RawWaker::new(ptr, &Task::<F, S, T>::RAW_WAKER_VTABLE);
|
||||
let waker = ManuallyDrop::new(Waker::from_raw(raw_waker));
|
||||
|
||||
let cx = &mut Context::from_waker(&waker);
|
||||
let fut = Pin::new_unchecked(this.core.with_mut(|c| &mut *(*c).future));
|
||||
|
||||
// Set a panic guard to cancel the task if the future panics when
|
||||
// polled.
|
||||
let panic_guard = RunOnDrop::new(|| cancel::<F, S, T>(ptr));
|
||||
|
||||
let poll_state = fut.poll(cx);
|
||||
mem::forget(panic_guard);
|
||||
|
||||
if let Poll::Ready(output) = poll_state {
|
||||
// Set a panic guard to close the task if the future or the output
|
||||
// panic when dropped. Miri complains if a reference to `this` is
|
||||
// captured and `mem::forget` is called on the guard after
|
||||
// deallocation, which is why the state is taken by pointer.
|
||||
let state_ptr = &this.state as *const AtomicU64;
|
||||
let panic_guard = RunOnDrop::new(|| {
|
||||
// Clear the `POLLING` flag while setting the `CLOSED` flag
|
||||
// to enter the `Closed` phase.
|
||||
//
|
||||
// Ordering: Release ordering on success is necessary to
|
||||
// ensure that all memory operations on the future or the
|
||||
// output are visible when the last reference deallocates
|
||||
// the task.
|
||||
let state = (*state_ptr)
|
||||
.fetch_update(Ordering::Release, Ordering::Relaxed, |s| {
|
||||
Some((s | CLOSED) & !POLLING)
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Deallocate if there are no more references to the task.
|
||||
if state & REF_MASK == 0 {
|
||||
// Ensure that all atomic accesses to the state are
|
||||
// visible.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with all
|
||||
// Release operations that decrement the number of
|
||||
// references to the task.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
dealloc(ptr as *mut u8, Layout::new::<Task<F, S, T>>());
|
||||
}
|
||||
});
|
||||
|
||||
// Drop the future and publish its output.
|
||||
this.core.with_mut(|c| {
|
||||
ManuallyDrop::drop(&mut (*c).future);
|
||||
(*c).output = ManuallyDrop::new(output);
|
||||
});
|
||||
|
||||
// Clear the `POLLING` flag to enter the `Completed` phase,
|
||||
// unless the task has concurrently transitioned to the
|
||||
// `Wind-down` phase or unless this `Runnable` is the last
|
||||
// reference to the task.
|
||||
if this
|
||||
.state
|
||||
.fetch_update(Ordering::Release, Ordering::Relaxed, |s| {
|
||||
if s & CLOSED == CLOSED || s & REF_MASK == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(s & !POLLING)
|
||||
}
|
||||
})
|
||||
.is_ok()
|
||||
{
|
||||
mem::forget(panic_guard);
|
||||
return;
|
||||
}
|
||||
|
||||
// The task is in the `Wind-down` phase or this `Runnable`
|
||||
// was the last reference, so the output must be dropped.
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).output));
|
||||
mem::forget(panic_guard);
|
||||
|
||||
// Clear the `POLLING` flag to enter the `Closed` phase. This is
|
||||
// not actually necessary if the `Runnable` is the last
|
||||
// reference, but that should be a very rare occurrence.
|
||||
//
|
||||
// Ordering: Release ordering is necessary to ensure that the
|
||||
// drop of the output is visible when the last reference
|
||||
// deallocates the task.
|
||||
state = this.state.fetch_and(!POLLING, Ordering::Release);
|
||||
|
||||
// Deallocate the task if there are no task references left.
|
||||
if state & REF_MASK == 0 {
|
||||
// Ensure that all atomic accesses to the state are visible.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with all
|
||||
// Release operations that decrement the number of
|
||||
// references to the task.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
dealloc(ptr as *mut u8, Layout::new::<Task<F, S, T>>());
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// The future is `Pending`: try to reset the wake count.
|
||||
//
|
||||
// Ordering: a Release ordering is required in case the wake count
|
||||
// is successfully cleared; it synchronizes, via a Release sequence,
|
||||
// with the Acquire load upon entering `Runnable::run` the next time
|
||||
// it is called. Acquire ordering is in turn necessary in case the
|
||||
// wake count has changed and the future must be polled again; it
|
||||
// synchronizes with the Release RMW in `wake` and ensures that all
|
||||
// memory operations performed by their callers are visible when the
|
||||
// polling loop is repeated.
|
||||
state = this.state.fetch_sub(wake_count, Ordering::AcqRel);
|
||||
debug_or_loom_assert!(state > wake_count);
|
||||
wake_count = (state & WAKE_MASK) - wake_count;
|
||||
|
||||
// Return now if the wake count has been successfully cleared,
|
||||
// provided that the task was not concurrently cancelled.
|
||||
if wake_count == 0 && state & CLOSED == 0 {
|
||||
// If there are no task references left, cancel and deallocate
|
||||
// the task since it can never be scheduled again.
|
||||
if state & REF_MASK == 0 {
|
||||
let _drop_guard = RunOnDrop::new(|| {
|
||||
dealloc(ptr as *mut u8, Layout::new::<Task<F, S, T>>());
|
||||
});
|
||||
|
||||
// Drop the future;
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).future));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancels the task, dropping the inner future.
|
||||
unsafe fn cancel<F, S, T>(ptr: *const ())
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
let this = &*(ptr as *const Task<F, S, T>);
|
||||
|
||||
// Ensure that the modifications of the future by the previous
|
||||
// `Runnable` are visible.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with the Release operation
|
||||
// at the end of the call to `run` by the previous `Runnable` and
|
||||
// ensures that the new state of the future stored by the previous call
|
||||
// to `run` is visible. This synchronization exists because the wake
|
||||
// count RMW in the call to `Task::wake` that created this `Runnable`
|
||||
// establishes a Release sequence.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// Set a drop guard to enter the `Closed` phase whether or not the
|
||||
// future panics when dropped.
|
||||
let _drop_guard = RunOnDrop::new(|| {
|
||||
// Clear the `POLLING` flag while setting the `CLOSED` flag to enter
|
||||
// the `Closed` phase.
|
||||
//
|
||||
// Ordering: Release ordering on success is necessary to ensure that
|
||||
// all memory operations on the future are visible when the last
|
||||
// reference deallocates the task.
|
||||
let state = this
|
||||
.state
|
||||
.fetch_update(Ordering::Release, Ordering::Relaxed, |s| {
|
||||
Some((s | CLOSED) & !POLLING)
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Deallocate if there are no more references to the task.
|
||||
if state & REF_MASK == 0 {
|
||||
// Ensure that all atomic accesses to the state are visible.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with all Release
|
||||
// operations that decrement the number of references to the
|
||||
// task.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
dealloc(ptr as *mut u8, Layout::new::<Task<F, S, T>>());
|
||||
}
|
||||
});
|
||||
|
||||
// Drop the future;
|
||||
this.core.with_mut(|c| ManuallyDrop::drop(&mut (*c).future));
|
||||
}
|
||||
|
||||
/// Handle to a scheduled task.
|
||||
///
|
||||
/// Dropping the runnable directly instead of calling `run` cancels the task.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Runnable {
|
||||
task: *const (),
|
||||
vtable: &'static VTable,
|
||||
}
|
||||
|
||||
impl Runnable {
|
||||
/// Creates a `Runnable`.
|
||||
///
|
||||
/// Safety: this is safe provided that:
|
||||
///
|
||||
/// - the task pointer points to a live task allocated with the global
|
||||
/// allocator,
|
||||
/// - there is not other live `Runnable` for this task,
|
||||
/// - the wake count is non-zero,
|
||||
/// - the `POLLING` flag is set and the `CLOSED` flag is cleared,
|
||||
/// - the task contains a live future.
|
||||
pub(super) unsafe fn new_unchecked<F, S, T>(task: *const Task<F, S, T>) -> Self
|
||||
where
|
||||
F: Future + Send + 'static,
|
||||
F::Output: Send + 'static,
|
||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||
T: Clone + Send + Sync + 'static,
|
||||
{
|
||||
Self {
|
||||
task: task as *const (),
|
||||
vtable: &VTable {
|
||||
run: run::<F, S, T>,
|
||||
cancel: cancel::<F, S, T>,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Polls the wrapped future.
|
||||
pub(crate) fn run(self) {
|
||||
// Prevent the drop handler from being called, as it would call `cancel`
|
||||
// on the inner field.
|
||||
let this = ManuallyDrop::new(self);
|
||||
|
||||
// Poll the future.
|
||||
unsafe { (this.vtable.run)(this.task) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for Runnable {
|
||||
fn drop(&mut self) {
|
||||
// Cancel the task.
|
||||
unsafe { (self.vtable.cancel)(self.task) }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Send for Runnable {}
|
||||
impl UnwindSafe for Runnable {}
|
||||
impl RefUnwindSafe for Runnable {}
|
7
asynchronix/src/executor/task/tests.rs
Normal file
7
asynchronix/src/executor/task/tests.rs
Normal file
@ -0,0 +1,7 @@
|
||||
use super::*;
|
||||
|
||||
#[cfg(not(asynchronix_loom))]
|
||||
mod general;
|
||||
|
||||
#[cfg(asynchronix_loom)]
|
||||
mod loom;
|
626
asynchronix/src/executor/task/tests/general.rs
Normal file
626
asynchronix/src/executor/task/tests/general.rs
Normal file
@ -0,0 +1,626 @@
|
||||
use std::future::Future;
|
||||
use std::ops::Deref;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::{Context, Poll};
|
||||
use std::thread;
|
||||
|
||||
use futures_channel::{mpsc, oneshot};
|
||||
use futures_util::StreamExt;
|
||||
|
||||
use super::super::promise::Stage;
|
||||
use super::*;
|
||||
|
||||
// Test prelude to simulates a single-slot scheduler queue.
|
||||
macro_rules! test_prelude {
|
||||
() => {
|
||||
static QUEUE: Mutex<Vec<Runnable>> = Mutex::new(Vec::new());
|
||||
|
||||
// Schedules one runnable task.
|
||||
//
|
||||
// Will panic if the slot was already occupied since there should exist
|
||||
// at most 1 runnable per task at any time.
|
||||
#[allow(dead_code)]
|
||||
fn schedule_runnable(runnable: Runnable, _tag: ()) {
|
||||
let mut queue = QUEUE.lock().unwrap();
|
||||
queue.push(runnable);
|
||||
}
|
||||
|
||||
// Runs one runnable task and returns true if a task was scheduled,
|
||||
// otherwise returns false.
|
||||
#[allow(dead_code)]
|
||||
fn run_scheduled_runnable() -> bool {
|
||||
if let Some(runnable) = QUEUE.lock().unwrap().pop() {
|
||||
runnable.run();
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
// Drops a runnable task and returns true if a task was scheduled, otherwise
|
||||
// returns false.
|
||||
#[allow(dead_code)]
|
||||
fn drop_runnable() -> bool {
|
||||
if let Some(_runnable) = QUEUE.lock().unwrap().pop() {
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// A friendly wrapper over a shared atomic boolean that uses only Relaxed
|
||||
// ordering.
|
||||
#[derive(Clone)]
|
||||
struct Flag(Arc<AtomicBool>);
|
||||
impl Flag {
|
||||
fn new(value: bool) -> Self {
|
||||
Self(Arc::new(AtomicBool::new(value)))
|
||||
}
|
||||
fn set(&self, value: bool) {
|
||||
self.0.store(value, Ordering::Relaxed);
|
||||
}
|
||||
fn get(&self) -> bool {
|
||||
self.0.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
// A simple wrapper for the output of a future with a liveness flag.
|
||||
struct MonitoredOutput<T> {
|
||||
is_alive: Flag,
|
||||
inner: T,
|
||||
}
|
||||
impl<T> Deref for MonitoredOutput<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &T {
|
||||
&self.inner
|
||||
}
|
||||
}
|
||||
impl<T> Drop for MonitoredOutput<T> {
|
||||
fn drop(&mut self) {
|
||||
self.is_alive.set(false);
|
||||
}
|
||||
}
|
||||
|
||||
// A simple future wrapper with a liveness flag returning a `MonitoredOutput` on
|
||||
// completion.
|
||||
struct MonitoredFuture<F: Future> {
|
||||
future_is_alive: Flag,
|
||||
output_is_alive: Flag,
|
||||
inner: F,
|
||||
}
|
||||
impl<F: Future> MonitoredFuture<F> {
|
||||
// Returns the `MonitoredFuture`, a liveness flag for the future and a
|
||||
// liveness flag for the output.
|
||||
fn new(future: F) -> (Self, Flag, Flag) {
|
||||
let future_is_alive = Flag::new(true);
|
||||
let output_is_alive = Flag::new(false);
|
||||
let future_is_alive_remote = future_is_alive.clone();
|
||||
let output_is_alive_remote = output_is_alive.clone();
|
||||
|
||||
(
|
||||
Self {
|
||||
future_is_alive,
|
||||
output_is_alive,
|
||||
inner: future,
|
||||
},
|
||||
future_is_alive_remote,
|
||||
output_is_alive_remote,
|
||||
)
|
||||
}
|
||||
}
|
||||
impl<F: Future> Future for MonitoredFuture<F> {
|
||||
type Output = MonitoredOutput<F::Output>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let inner = unsafe { self.as_mut().map_unchecked_mut(|s| &mut s.inner) };
|
||||
match inner.poll(cx) {
|
||||
Poll::Pending => Poll::Pending,
|
||||
Poll::Ready(value) => {
|
||||
self.output_is_alive.set(true);
|
||||
let test_output = MonitoredOutput {
|
||||
is_alive: self.output_is_alive.clone(),
|
||||
inner: value,
|
||||
};
|
||||
Poll::Ready(test_output)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<F: Future> Drop for MonitoredFuture<F> {
|
||||
fn drop(&mut self) {
|
||||
self.future_is_alive.set(false);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_schedule() {
|
||||
test_prelude!();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move { 42 });
|
||||
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
|
||||
assert_eq!(future_is_alive.get(), true);
|
||||
assert_eq!(output_is_alive.get(), false);
|
||||
|
||||
// The task should complete immediately when ran.
|
||||
runnable.run();
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
assert_eq!(promise.poll().map(|v| *v), Stage::Ready(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_schedule_mt() {
|
||||
test_prelude!();
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(async move { 42 }, schedule_runnable, ());
|
||||
|
||||
let th = thread::spawn(move || runnable.run());
|
||||
loop {
|
||||
match promise.poll() {
|
||||
Stage::Pending => {}
|
||||
Stage::Cancelled => unreachable!(),
|
||||
Stage::Ready(v) => {
|
||||
assert_eq!(v, 42);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
th.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_schedule_and_forget() {
|
||||
test_prelude!();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async {});
|
||||
let (runnable, _cancel_token) = spawn_and_forget(future, schedule_runnable, ());
|
||||
assert_eq!(future_is_alive.get(), true);
|
||||
assert_eq!(output_is_alive.get(), false);
|
||||
|
||||
// The task should complete immediately when ran.
|
||||
runnable.run();
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_wake() {
|
||||
test_prelude!();
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move {
|
||||
let result = receiver.await.unwrap();
|
||||
result
|
||||
});
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
|
||||
runnable.run();
|
||||
|
||||
// The future should have been polled but should not have completed.
|
||||
assert_eq!(output_is_alive.get(), false);
|
||||
assert!(promise.poll().is_pending());
|
||||
|
||||
// Wake the task.
|
||||
sender.send(42).unwrap();
|
||||
|
||||
// The task should have been scheduled by the channel sender.
|
||||
assert_eq!(run_scheduled_runnable(), true);
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
assert_eq!(promise.poll().map(|v| *v), Stage::Ready(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_wake_mt() {
|
||||
test_prelude!();
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(
|
||||
async move {
|
||||
let result = receiver.await.unwrap();
|
||||
result
|
||||
},
|
||||
schedule_runnable,
|
||||
(),
|
||||
);
|
||||
runnable.run();
|
||||
|
||||
let th_sender = thread::spawn(move || sender.send(42).unwrap());
|
||||
let th_exec = thread::spawn(|| while !run_scheduled_runnable() {});
|
||||
|
||||
loop {
|
||||
match promise.poll() {
|
||||
Stage::Pending => {}
|
||||
Stage::Cancelled => unreachable!(),
|
||||
Stage::Ready(v) => {
|
||||
assert_eq!(v, 42);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
th_sender.join().unwrap();
|
||||
th_exec.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_wake_and_forget() {
|
||||
test_prelude!();
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move {
|
||||
let _ = receiver.await;
|
||||
});
|
||||
|
||||
let (runnable, _cancel_token) = spawn_and_forget(future, schedule_runnable, ());
|
||||
runnable.run();
|
||||
|
||||
// The future should have been polled but should not have completed.
|
||||
assert_eq!(output_is_alive.get(), false);
|
||||
|
||||
// Wake the task.
|
||||
sender.send(42).unwrap();
|
||||
|
||||
// The task should have been scheduled by the channel sender.
|
||||
assert_eq!(run_scheduled_runnable(), true);
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_multiple_wake() {
|
||||
test_prelude!();
|
||||
|
||||
let (mut sender, mut receiver) = mpsc::channel(3);
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move {
|
||||
let mut sum = 0;
|
||||
for _ in 0..5 {
|
||||
sum += receiver.next().await.unwrap();
|
||||
}
|
||||
sum
|
||||
});
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
|
||||
runnable.run();
|
||||
|
||||
// The future should have been polled but should not have completed.
|
||||
assert!(promise.poll().is_pending());
|
||||
|
||||
// Wake the task 3 times.
|
||||
sender.try_send(1).unwrap();
|
||||
sender.try_send(2).unwrap();
|
||||
sender.try_send(3).unwrap();
|
||||
|
||||
// The task should have been scheduled by the channel sender.
|
||||
assert_eq!(run_scheduled_runnable(), true);
|
||||
assert!(promise.poll().is_pending());
|
||||
|
||||
// The channel should be empty. Wake the task 2 more times.
|
||||
sender.try_send(4).unwrap();
|
||||
sender.try_send(5).unwrap();
|
||||
|
||||
// The task should have been scheduled by the channel sender.
|
||||
assert_eq!(run_scheduled_runnable(), true);
|
||||
|
||||
// The task should have completed.
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
assert_eq!(promise.poll().map(|v| *v), Stage::Ready(15));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_multiple_wake_mt() {
|
||||
test_prelude!();
|
||||
|
||||
let (mut sender1, mut receiver) = mpsc::channel(3);
|
||||
let mut sender2 = sender1.clone();
|
||||
let mut sender3 = sender1.clone();
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(
|
||||
async move {
|
||||
let mut sum = 0;
|
||||
for _ in 0..3 {
|
||||
sum += receiver.next().await.unwrap();
|
||||
}
|
||||
sum
|
||||
},
|
||||
schedule_runnable,
|
||||
(),
|
||||
);
|
||||
runnable.run();
|
||||
|
||||
// Wake the task 3 times.
|
||||
let th_sender1 = thread::spawn(move || {
|
||||
sender1.try_send(1).unwrap();
|
||||
while run_scheduled_runnable() {}
|
||||
});
|
||||
let th_sender2 = thread::spawn(move || {
|
||||
sender2.try_send(2).unwrap();
|
||||
while run_scheduled_runnable() {}
|
||||
});
|
||||
let th_sender3 = thread::spawn(move || {
|
||||
sender3.try_send(3).unwrap();
|
||||
while run_scheduled_runnable() {}
|
||||
});
|
||||
|
||||
loop {
|
||||
match promise.poll() {
|
||||
Stage::Pending => {}
|
||||
Stage::Cancelled => unreachable!(),
|
||||
Stage::Ready(v) => {
|
||||
assert_eq!(v, 6);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
th_sender1.join().unwrap();
|
||||
th_sender2.join().unwrap();
|
||||
th_sender3.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_cancel_scheduled() {
|
||||
test_prelude!();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async {});
|
||||
|
||||
let (promise, runnable, cancel_token) = spawn(future, schedule_runnable, ());
|
||||
|
||||
// Cancel the task while a `Runnable` exists (i.e. while the task is
|
||||
// considered scheduled).
|
||||
cancel_token.cancel();
|
||||
|
||||
// The future should not be dropped while the `Runnable` exists, even if the
|
||||
// task is cancelled, but the task should be seen as cancelled.
|
||||
assert_eq!(future_is_alive.get(), true);
|
||||
assert!(promise.poll().is_cancelled());
|
||||
|
||||
// An attempt to run the task should now drop the future without polling it.
|
||||
runnable.run();
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_cancel_unscheduled() {
|
||||
test_prelude!();
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move {
|
||||
let _ = receiver.await;
|
||||
});
|
||||
|
||||
let (promise, runnable, cancel_token) = spawn(future, schedule_runnable, ());
|
||||
runnable.run();
|
||||
assert_eq!(future_is_alive.get(), true);
|
||||
assert_eq!(output_is_alive.get(), false);
|
||||
|
||||
// Cancel the task while no `Runnable` exists (the task is not scheduled as
|
||||
// it needs to be woken by the channel sender first).
|
||||
cancel_token.cancel();
|
||||
assert!(promise.poll().is_cancelled());
|
||||
assert!(sender.send(()).is_err());
|
||||
|
||||
// The future should be dropped immediately upon cancellation without
|
||||
// completing.
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_cancel_completed() {
|
||||
test_prelude!();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move { 42 });
|
||||
|
||||
let (promise, runnable, cancel_token) = spawn(future, schedule_runnable, ());
|
||||
runnable.run();
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
|
||||
// Cancel the already completed task.
|
||||
cancel_token.cancel();
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
assert_eq!(promise.poll().map(|v| *v), Stage::Ready(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_cancel_mt() {
|
||||
test_prelude!();
|
||||
|
||||
let (runnable, cancel_token) = spawn_and_forget(async {}, schedule_runnable, ());
|
||||
|
||||
let th_cancel = thread::spawn(move || cancel_token.cancel());
|
||||
runnable.run();
|
||||
|
||||
th_cancel.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_drop_promise_scheduled() {
|
||||
test_prelude!();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async {});
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
|
||||
// Drop the promise while a `Runnable` exists (i.e. while the task is
|
||||
// considered scheduled).
|
||||
drop(promise);
|
||||
|
||||
// The task should complete immediately when ran.
|
||||
runnable.run();
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_drop_promise_unscheduled() {
|
||||
test_prelude!();
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move {
|
||||
let _ = receiver.await;
|
||||
});
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
|
||||
runnable.run();
|
||||
|
||||
// Drop the promise while no `Runnable` exists (the task is not scheduled as
|
||||
// it needs to be woken by the channel sender first).
|
||||
drop(promise);
|
||||
|
||||
// Wake the task.
|
||||
assert!(sender.send(()).is_ok());
|
||||
|
||||
// The task should have been scheduled by the channel sender.
|
||||
assert_eq!(run_scheduled_runnable(), true);
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_drop_promise_mt() {
|
||||
test_prelude!();
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(async {}, schedule_runnable, ());
|
||||
|
||||
let th_drop = thread::spawn(move || drop(promise));
|
||||
runnable.run();
|
||||
|
||||
th_drop.join().unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_drop_runnable() {
|
||||
test_prelude!();
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move {
|
||||
let _ = receiver.await;
|
||||
});
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
|
||||
runnable.run();
|
||||
|
||||
// Wake the task.
|
||||
assert!(sender.send(()).is_ok());
|
||||
|
||||
// Drop the task scheduled by the channel sender.
|
||||
assert_eq!(drop_runnable(), true);
|
||||
assert_eq!(future_is_alive.get(), false);
|
||||
assert_eq!(output_is_alive.get(), false);
|
||||
assert!(promise.poll().is_cancelled());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_drop_runnable_mt() {
|
||||
test_prelude!();
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
|
||||
let (runnable, _cancel_token) = spawn_and_forget(
|
||||
async move {
|
||||
let _ = receiver.await;
|
||||
},
|
||||
schedule_runnable,
|
||||
(),
|
||||
);
|
||||
runnable.run();
|
||||
|
||||
let th_sender = thread::spawn(move || sender.send(()).is_ok());
|
||||
drop_runnable();
|
||||
|
||||
th_sender.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn task_drop_cycle() {
|
||||
test_prelude!();
|
||||
|
||||
let (sender1, mut receiver1) = mpsc::channel(2);
|
||||
let (sender2, mut receiver2) = mpsc::channel(2);
|
||||
let (sender3, mut receiver3) = mpsc::channel(2);
|
||||
|
||||
static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
// Spawn 3 tasks that wake one another when dropped.
|
||||
let (runnable1, cancel_token1) = spawn_and_forget(
|
||||
{
|
||||
let mut sender2 = sender2.clone();
|
||||
let mut sender3 = sender3.clone();
|
||||
|
||||
async move {
|
||||
let _guard = RunOnDrop::new(move || {
|
||||
let _ = sender2.try_send(());
|
||||
let _ = sender3.try_send(());
|
||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
let _ = receiver1.next().await;
|
||||
}
|
||||
},
|
||||
schedule_runnable,
|
||||
(),
|
||||
);
|
||||
runnable1.run();
|
||||
|
||||
let (runnable2, cancel_token2) = spawn_and_forget(
|
||||
{
|
||||
let mut sender1 = sender1.clone();
|
||||
let mut sender3 = sender3.clone();
|
||||
|
||||
async move {
|
||||
let _guard = RunOnDrop::new(move || {
|
||||
let _ = sender1.try_send(());
|
||||
let _ = sender3.try_send(());
|
||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
let _ = receiver2.next().await;
|
||||
}
|
||||
},
|
||||
schedule_runnable,
|
||||
(),
|
||||
);
|
||||
runnable2.run();
|
||||
|
||||
let (runnable3, cancel_token3) = spawn_and_forget(
|
||||
{
|
||||
let mut sender1 = sender1.clone();
|
||||
let mut sender2 = sender2.clone();
|
||||
|
||||
async move {
|
||||
let _guard = RunOnDrop::new(move || {
|
||||
let _ = sender1.try_send(());
|
||||
let _ = sender2.try_send(());
|
||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
let _ = receiver3.next().await;
|
||||
}
|
||||
},
|
||||
schedule_runnable,
|
||||
(),
|
||||
);
|
||||
runnable3.run();
|
||||
|
||||
let th1 = thread::spawn(move || cancel_token1.cancel());
|
||||
let th2 = thread::spawn(move || cancel_token2.cancel());
|
||||
let th3 = thread::spawn(move || cancel_token3.cancel());
|
||||
|
||||
th1.join().unwrap();
|
||||
th2.join().unwrap();
|
||||
th3.join().unwrap();
|
||||
|
||||
while run_scheduled_runnable() {}
|
||||
|
||||
assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3);
|
||||
}
|
509
asynchronix/src/executor/task/tests/loom.rs
Normal file
509
asynchronix/src/executor/task/tests/loom.rs
Normal file
@ -0,0 +1,509 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::Context;
|
||||
use std::task::Poll;
|
||||
use std::task::Waker;
|
||||
|
||||
use ::loom::cell::UnsafeCell;
|
||||
use ::loom::model::Builder;
|
||||
use ::loom::sync::atomic::AtomicBool;
|
||||
use ::loom::sync::atomic::AtomicUsize;
|
||||
use ::loom::sync::atomic::Ordering::*;
|
||||
use ::loom::sync::Arc;
|
||||
use ::loom::{lazy_static, thread};
|
||||
|
||||
use super::promise::Stage;
|
||||
use super::*;
|
||||
|
||||
// Test prelude to simulates a single-slot scheduler queue.
|
||||
macro_rules! test_prelude {
|
||||
() => {
|
||||
// A single-slot scheduling queue.
|
||||
lazy_static! {
|
||||
static ref RUNNABLE_SLOT: RunnableSlot = RunnableSlot::new();
|
||||
}
|
||||
|
||||
// Schedules one runnable task.
|
||||
//
|
||||
// Will panic if the slot was already occupied since there should exist
|
||||
// at most 1 runnable per task at any time.
|
||||
#[allow(dead_code)]
|
||||
fn schedule_task(runnable: Runnable, _tag: ()) {
|
||||
RUNNABLE_SLOT.set(runnable);
|
||||
}
|
||||
|
||||
// Runs one runnable task and returns true if a task was indeed
|
||||
// scheduled, otherwise returns false.
|
||||
#[allow(dead_code)]
|
||||
fn try_poll_task() -> bool {
|
||||
if let Some(runnable) = RUNNABLE_SLOT.take() {
|
||||
runnable.run();
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
// Cancel a scheduled task by dropping its runnable and returns true is
|
||||
// a task was indeed scheduled, otherwise returns false.
|
||||
#[allow(dead_code)]
|
||||
fn try_cancel_task() -> bool {
|
||||
if let Some(_runnable) = RUNNABLE_SLOT.take() {
|
||||
// Just drop the runnable to cancel the task.
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
struct RunnableSlot {
|
||||
state: AtomicUsize,
|
||||
runnable: UnsafeCell<Option<Runnable>>,
|
||||
}
|
||||
impl RunnableSlot {
|
||||
const LOCKED: usize = 0b01;
|
||||
const POPULATED: usize = 0b10;
|
||||
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
state: AtomicUsize::new(0),
|
||||
runnable: UnsafeCell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
fn take(&self) -> Option<Runnable> {
|
||||
self.state
|
||||
.fetch_update(Acquire, Relaxed, |s| {
|
||||
// Only lock if there is a runnable and it is not already locked.
|
||||
if s == Self::POPULATED {
|
||||
Some(Self::LOCKED)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.ok()
|
||||
.and_then(|_| {
|
||||
// Take the `Runnable`.
|
||||
let runnable = unsafe { self.runnable.with_mut(|r| (*r).take()) };
|
||||
assert!(runnable.is_some());
|
||||
|
||||
// Release the lock and signal that the slot is empty.
|
||||
self.state.store(0, Release);
|
||||
|
||||
runnable
|
||||
})
|
||||
}
|
||||
|
||||
fn set(&self, runnable: Runnable) {
|
||||
// Take the lock.
|
||||
let state = self.state.swap(Self::LOCKED, Acquire);
|
||||
|
||||
// Expect the initial state to be 0. Otherwise, there is already a
|
||||
// stored `Runnable` or one is being stored or taken, which should not
|
||||
// happen since a task can have at most 1 `Runnable` at a time.
|
||||
if state != 0 {
|
||||
panic!("Error: there are several live `Runnable`s for the same task");
|
||||
}
|
||||
|
||||
// Store the `Runnable`.
|
||||
unsafe { self.runnable.with_mut(|r| *r = Some(runnable)) };
|
||||
|
||||
// Release the lock and signal that the slot is populated.
|
||||
self.state.store(Self::POPULATED, Release);
|
||||
}
|
||||
}
|
||||
|
||||
// An asynchronous count-down counter.
|
||||
//
|
||||
// The implementation is intentionally naive and wakes the `CountWatcher` each
|
||||
// time the count is decremented, even though the future actually only completes
|
||||
// when the count reaches 0.
|
||||
//
|
||||
// Note that for simplicity, the waker may not be changed once set; this is not
|
||||
// an issue since the tested task implementation never changes the waker.
|
||||
fn count_down(init_count: usize) -> (CountController, CountWatcher) {
|
||||
let inner = Arc::new(CounterInner::new(init_count));
|
||||
|
||||
(
|
||||
CountController {
|
||||
inner: inner.clone(),
|
||||
},
|
||||
CountWatcher { inner },
|
||||
)
|
||||
}
|
||||
|
||||
// The counter inner type.
|
||||
struct CounterInner {
|
||||
waker: UnsafeCell<Option<Waker>>,
|
||||
state: AtomicUsize,
|
||||
}
|
||||
impl CounterInner {
|
||||
const HAS_WAKER: usize = 1 << 0;
|
||||
const INCREMENT: usize = 1 << 1;
|
||||
|
||||
fn new(init_count: usize) -> Self {
|
||||
Self {
|
||||
waker: UnsafeCell::new(None),
|
||||
state: AtomicUsize::new(init_count * Self::INCREMENT),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A `Clone` and `Sync` entity that can decrement the counter.
|
||||
#[derive(Clone)]
|
||||
struct CountController {
|
||||
inner: Arc<CounterInner>,
|
||||
}
|
||||
impl CountController {
|
||||
// Decrement the count and notify the counter if a waker is registered.
|
||||
//
|
||||
// This will panic if the counter is decremented too many times.
|
||||
fn decrement(&self) {
|
||||
let state = self.inner.state.fetch_sub(CounterInner::INCREMENT, Acquire);
|
||||
|
||||
if state / CounterInner::INCREMENT == 0 {
|
||||
panic!("The count-down counter has wrapped around");
|
||||
}
|
||||
|
||||
if state & CounterInner::HAS_WAKER != 0 {
|
||||
unsafe {
|
||||
self.inner
|
||||
.waker
|
||||
.with(|w| (&*w).as_ref().map(Waker::wake_by_ref))
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
unsafe impl Send for CountController {}
|
||||
unsafe impl Sync for CountController {}
|
||||
|
||||
// An entity notified by the controller each time the count is decremented.
|
||||
struct CountWatcher {
|
||||
inner: Arc<CounterInner>,
|
||||
}
|
||||
impl Future for CountWatcher {
|
||||
type Output = ();
|
||||
|
||||
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let state = self.inner.state.load(Relaxed);
|
||||
|
||||
if state / CounterInner::INCREMENT == 0 {
|
||||
return Poll::Ready(());
|
||||
}
|
||||
if state & CounterInner::HAS_WAKER == CounterInner::HAS_WAKER {
|
||||
// Changes of the waker are not supported, so check that the waker
|
||||
// indeed hasn't changed.
|
||||
assert!(
|
||||
unsafe {
|
||||
self.inner
|
||||
.waker
|
||||
.with(|w| cx.waker().will_wake((*w).as_ref().unwrap()))
|
||||
},
|
||||
"This testing primitive does not support changes of waker"
|
||||
);
|
||||
|
||||
return Poll::Pending;
|
||||
}
|
||||
|
||||
unsafe { self.inner.waker.with_mut(|w| *w = Some(cx.waker().clone())) };
|
||||
|
||||
let state = self.inner.state.fetch_or(CounterInner::HAS_WAKER, Release);
|
||||
if state / CounterInner::INCREMENT == 0 {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
unsafe impl Send for CountWatcher {}
|
||||
|
||||
#[test]
|
||||
fn loom_task_schedule() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
test_prelude!();
|
||||
lazy_static! {
|
||||
static ref READY: AtomicBool = AtomicBool::new(false);
|
||||
}
|
||||
|
||||
let (promise, runnable, _cancel_token) = spawn(async move { 42 }, schedule_task, ());
|
||||
|
||||
let t = thread::spawn(move || {
|
||||
// The task should complete immediately when ran.
|
||||
runnable.run();
|
||||
READY.store(true, Release);
|
||||
});
|
||||
|
||||
if READY.load(Acquire) {
|
||||
assert_eq!(promise.poll(), Stage::Ready(42));
|
||||
}
|
||||
|
||||
t.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_task_cancel() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
test_prelude!();
|
||||
lazy_static! {
|
||||
static ref IS_CANCELLED: AtomicBool = AtomicBool::new(false);
|
||||
}
|
||||
|
||||
let (count_controller, count_watcher) = count_down(1);
|
||||
|
||||
let (promise, runnable, cancel_token) =
|
||||
spawn(async move { count_watcher.await }, schedule_task, ());
|
||||
runnable.run();
|
||||
|
||||
let waker_thread = thread::spawn(move || {
|
||||
count_controller.decrement();
|
||||
});
|
||||
let scheduler_thread = thread::spawn(|| {
|
||||
try_poll_task();
|
||||
});
|
||||
let cancel_thread = thread::spawn(move || {
|
||||
cancel_token.cancel();
|
||||
IS_CANCELLED.store(true, Release);
|
||||
});
|
||||
|
||||
if IS_CANCELLED.load(Acquire) {
|
||||
assert!(promise.poll() != Stage::Pending);
|
||||
}
|
||||
|
||||
waker_thread.join().unwrap();
|
||||
scheduler_thread.join().unwrap();
|
||||
cancel_thread.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_task_run_and_drop() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
test_prelude!();
|
||||
|
||||
let (count_controller, count_watcher) = count_down(1);
|
||||
|
||||
let (runnable, cancel_token) =
|
||||
spawn_and_forget(async move { count_watcher.await }, schedule_task, ());
|
||||
runnable.run();
|
||||
|
||||
let waker_thread = thread::spawn(move || {
|
||||
count_controller.decrement();
|
||||
});
|
||||
let runnable_thread = thread::spawn(|| {
|
||||
try_poll_task();
|
||||
});
|
||||
drop(cancel_token);
|
||||
|
||||
waker_thread.join().unwrap();
|
||||
runnable_thread.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_task_run_and_cancel() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
test_prelude!();
|
||||
|
||||
let (count_controller, count_watcher) = count_down(1);
|
||||
|
||||
let (runnable, cancel_token) =
|
||||
spawn_and_forget(async move { count_watcher.await }, schedule_task, ());
|
||||
runnable.run();
|
||||
|
||||
let waker_thread = thread::spawn(move || {
|
||||
count_controller.decrement();
|
||||
});
|
||||
let runnable_thread = thread::spawn(|| {
|
||||
try_poll_task();
|
||||
});
|
||||
cancel_token.cancel();
|
||||
|
||||
waker_thread.join().unwrap();
|
||||
runnable_thread.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_task_drop_all() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
test_prelude!();
|
||||
|
||||
let (promise, runnable, cancel_token) = spawn(async move {}, schedule_task, ());
|
||||
|
||||
let promise_thread = thread::spawn(move || {
|
||||
drop(promise);
|
||||
});
|
||||
let runnable_thread = thread::spawn(move || {
|
||||
drop(runnable);
|
||||
});
|
||||
drop(cancel_token);
|
||||
|
||||
promise_thread.join().unwrap();
|
||||
runnable_thread.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_task_drop_with_waker() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
test_prelude!();
|
||||
|
||||
let (count_controller, count_watcher) = count_down(1);
|
||||
|
||||
let (promise, runnable, cancel_token) =
|
||||
spawn(async move { count_watcher.await }, schedule_task, ());
|
||||
runnable.run();
|
||||
|
||||
let waker_thread = thread::spawn(move || {
|
||||
count_controller.decrement();
|
||||
});
|
||||
|
||||
let promise_thread = thread::spawn(move || {
|
||||
drop(promise);
|
||||
});
|
||||
let runnable_thread = thread::spawn(|| {
|
||||
try_cancel_task(); // drop the runnable if available
|
||||
});
|
||||
drop(cancel_token);
|
||||
|
||||
waker_thread.join().unwrap();
|
||||
promise_thread.join().unwrap();
|
||||
runnable_thread.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_task_wake_single_thread() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||
const TICK_COUNT1: usize = 4;
|
||||
const TICK_COUNT2: usize = 0;
|
||||
|
||||
loom_task_wake(DEFAULT_PREEMPTION_BOUND, TICK_COUNT1, TICK_COUNT2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_task_wake_multi_thread() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||
const TICK_COUNT1: usize = 1;
|
||||
const TICK_COUNT2: usize = 2;
|
||||
|
||||
loom_task_wake(DEFAULT_PREEMPTION_BOUND, TICK_COUNT1, TICK_COUNT2);
|
||||
}
|
||||
|
||||
// Test task wakening from one or two threads.
|
||||
fn loom_task_wake(preemption_bound: usize, tick_count1: usize, tick_count2: usize) {
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(preemption_bound);
|
||||
}
|
||||
|
||||
let total_tick_count = tick_count1 + tick_count2;
|
||||
builder.check(move || {
|
||||
test_prelude!();
|
||||
lazy_static! {
|
||||
static ref POLL_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
}
|
||||
|
||||
let (count_controller1, count_watcher) = count_down(total_tick_count);
|
||||
let count_controller2 = count_controller1.clone();
|
||||
|
||||
let (promise, runnable, _cancel_token) =
|
||||
spawn(async move { count_watcher.await }, schedule_task, ());
|
||||
runnable.run();
|
||||
|
||||
let waker_thread1 = if tick_count1 != 0 {
|
||||
Some(thread::spawn(move || {
|
||||
for _ in 0..tick_count1 {
|
||||
count_controller1.decrement();
|
||||
}
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let waker_thread2 = if tick_count2 != 0 {
|
||||
Some(thread::spawn(move || {
|
||||
for _ in 0..tick_count2 {
|
||||
count_controller2.decrement();
|
||||
}
|
||||
}))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let scheduler_thread = thread::spawn(move || {
|
||||
// Try to run scheduled runnables.
|
||||
for _ in 0..total_tick_count {
|
||||
if try_poll_task() {
|
||||
POLL_COUNT.fetch_add(1, Release);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let poll_count = POLL_COUNT.load(Acquire);
|
||||
let has_completed = poll_count == total_tick_count;
|
||||
|
||||
// Check that the promise is available if the task has been polled
|
||||
// `total_tick_count` times.
|
||||
if has_completed {
|
||||
assert_eq!(promise.poll(), Stage::Ready(()));
|
||||
}
|
||||
|
||||
scheduler_thread.join().unwrap();
|
||||
waker_thread1.map(|t| t.join().unwrap());
|
||||
waker_thread2.map(|t| t.join().unwrap());
|
||||
|
||||
// If the promise has not been retrieved yet, retrieve it now. It may be
|
||||
// necessary to poll the task one last time.
|
||||
if !has_completed {
|
||||
if POLL_COUNT.load(Acquire) != total_tick_count {
|
||||
try_poll_task();
|
||||
}
|
||||
|
||||
assert_eq!(promise.poll(), Stage::Ready(()));
|
||||
}
|
||||
});
|
||||
}
|
23
asynchronix/src/executor/task/util.rs
Normal file
23
asynchronix/src/executor/task/util.rs
Normal file
@ -0,0 +1,23 @@
|
||||
use super::{CLOSED, POLLING, WAKE_MASK};
|
||||
|
||||
/// An object that runs an arbitrary closure when dropped.
|
||||
pub(crate) struct RunOnDrop<F: FnMut()> {
|
||||
drop_fn: F,
|
||||
}
|
||||
impl<F: FnMut()> RunOnDrop<F> {
|
||||
/// Creates a new `RunOnDrop`.
|
||||
pub(crate) fn new(drop_fn: F) -> Self {
|
||||
Self { drop_fn }
|
||||
}
|
||||
}
|
||||
impl<F: FnMut()> Drop for RunOnDrop<F> {
|
||||
fn drop(&mut self) {
|
||||
(self.drop_fn)();
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a `Runnable` exists based on the state.
|
||||
#[inline(always)]
|
||||
pub(crate) fn runnable_exists(state: u64) -> bool {
|
||||
state & POLLING != 0 && state & (WAKE_MASK | CLOSED) != 0
|
||||
}
|
142
asynchronix/src/executor/tests.rs
Normal file
142
asynchronix/src/executor/tests.rs
Normal file
@ -0,0 +1,142 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use futures_channel::{mpsc, oneshot};
|
||||
use futures_util::StreamExt;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// An object that runs an arbitrary closure when dropped.
|
||||
struct RunOnDrop<F: FnOnce()> {
|
||||
drop_fn: Option<F>,
|
||||
}
|
||||
impl<F: FnOnce()> RunOnDrop<F> {
|
||||
/// Creates a new `RunOnDrop`.
|
||||
fn new(drop_fn: F) -> Self {
|
||||
Self {
|
||||
drop_fn: Some(drop_fn),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<F: FnOnce()> Drop for RunOnDrop<F> {
|
||||
fn drop(&mut self) {
|
||||
self.drop_fn.take().map(|f| f());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn executor_deadlock() {
|
||||
const NUM_THREADS: usize = 3;
|
||||
|
||||
let (_sender1, receiver1) = oneshot::channel::<()>();
|
||||
let (_sender2, receiver2) = oneshot::channel::<()>();
|
||||
|
||||
let mut executor = Executor::new(NUM_THREADS);
|
||||
static LAUNCH_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
static COMPLETION_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
executor.spawn_and_forget(async move {
|
||||
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
let _ = receiver2.await;
|
||||
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
executor.spawn_and_forget(async move {
|
||||
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
let _ = receiver1.await;
|
||||
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
|
||||
executor.run();
|
||||
// Check that the executor returns on deadlock, i.e. none of the task has
|
||||
// completed.
|
||||
assert_eq!(LAUNCH_COUNT.load(Ordering::Relaxed), 2);
|
||||
assert_eq!(COMPLETION_COUNT.load(Ordering::Relaxed), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn executor_deadlock_st() {
|
||||
const NUM_THREADS: usize = 1;
|
||||
|
||||
let (_sender1, receiver1) = oneshot::channel::<()>();
|
||||
let (_sender2, receiver2) = oneshot::channel::<()>();
|
||||
|
||||
let mut executor = Executor::new(NUM_THREADS);
|
||||
static LAUNCH_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
static COMPLETION_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
executor.spawn_and_forget(async move {
|
||||
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
let _ = receiver2.await;
|
||||
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
executor.spawn_and_forget(async move {
|
||||
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
let _ = receiver1.await;
|
||||
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
|
||||
executor.run();
|
||||
// Check that the executor returnes on deadlock, i.e. none of the task has
|
||||
// completed.
|
||||
assert_eq!(LAUNCH_COUNT.load(Ordering::Relaxed), 2);
|
||||
assert_eq!(COMPLETION_COUNT.load(Ordering::Relaxed), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn executor_drop_cycle() {
|
||||
const NUM_THREADS: usize = 3;
|
||||
|
||||
let (sender1, mut receiver1) = mpsc::channel(2);
|
||||
let (sender2, mut receiver2) = mpsc::channel(2);
|
||||
let (sender3, mut receiver3) = mpsc::channel(2);
|
||||
|
||||
let mut executor = Executor::new(NUM_THREADS);
|
||||
static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
// Spawn 3 tasks that wake one another when dropped.
|
||||
executor.spawn_and_forget({
|
||||
let mut sender2 = sender2.clone();
|
||||
let mut sender3 = sender3.clone();
|
||||
|
||||
async move {
|
||||
let _guard = RunOnDrop::new(move || {
|
||||
let _ = sender2.try_send(());
|
||||
let _ = sender3.try_send(());
|
||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
let _ = receiver1.next().await;
|
||||
}
|
||||
});
|
||||
executor.spawn_and_forget({
|
||||
let mut sender1 = sender1.clone();
|
||||
let mut sender3 = sender3.clone();
|
||||
|
||||
async move {
|
||||
let _guard = RunOnDrop::new(move || {
|
||||
let _ = sender1.try_send(());
|
||||
let _ = sender3.try_send(());
|
||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
let _ = receiver2.next().await;
|
||||
}
|
||||
});
|
||||
executor.spawn_and_forget({
|
||||
let mut sender1 = sender1.clone();
|
||||
let mut sender2 = sender2.clone();
|
||||
|
||||
async move {
|
||||
let _guard = RunOnDrop::new(move || {
|
||||
let _ = sender1.try_send(());
|
||||
let _ = sender2.try_send(());
|
||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
||||
});
|
||||
let _ = receiver3.next().await;
|
||||
}
|
||||
});
|
||||
|
||||
executor.run();
|
||||
|
||||
// Make sure that all tasks are eventually dropped even though each task
|
||||
// wakes the others when dropped.
|
||||
drop(executor);
|
||||
assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3);
|
||||
}
|
25
asynchronix/src/executor/worker.rs
Normal file
25
asynchronix/src/executor/worker.rs
Normal file
@ -0,0 +1,25 @@
|
||||
use std::cell::Cell;
|
||||
use std::sync::Arc;
|
||||
|
||||
use super::task::Runnable;
|
||||
|
||||
use super::ExecutorContext;
|
||||
use super::LocalQueue;
|
||||
|
||||
/// A local worker with access to global executor resources.
|
||||
pub(crate) struct Worker {
|
||||
pub(super) local_queue: LocalQueue,
|
||||
pub(super) fast_slot: Cell<Option<Runnable>>,
|
||||
pub(super) executor_context: Arc<ExecutorContext>,
|
||||
}
|
||||
|
||||
impl Worker {
|
||||
/// Creates a new worker.
|
||||
pub(super) fn new(local_queue: LocalQueue, executor_context: Arc<ExecutorContext>) -> Self {
|
||||
Self {
|
||||
local_queue,
|
||||
fast_slot: Cell::new(None),
|
||||
executor_context,
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user