forked from ROMEO/nexosim
First release candidate for v0.1.0
This commit is contained in:
37
.github/workflows/ci.yml
vendored
37
.github/workflows/ci.yml
vendored
@ -5,9 +5,8 @@ on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
|
||||
# Uncomment before first release.
|
||||
#env:
|
||||
# RUSTFLAGS: -Dwarnings
|
||||
env:
|
||||
RUSTFLAGS: -Dwarnings
|
||||
|
||||
jobs:
|
||||
check:
|
||||
@ -59,6 +58,38 @@ jobs:
|
||||
env:
|
||||
RUSTFLAGS: --cfg asynchronix_loom
|
||||
|
||||
miri:
|
||||
name: Miri
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout sources
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install toolchain
|
||||
uses: dtolnay/rust-toolchain@nightly
|
||||
with:
|
||||
components: miri
|
||||
|
||||
- name: Run cargo miri tests
|
||||
run: cargo miri test --tests --lib
|
||||
env:
|
||||
MIRIFLAGS: -Zmiri-strict-provenance
|
||||
|
||||
- name: Run cargo miri example1
|
||||
run: cargo miri run --example espresso_machine
|
||||
env:
|
||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
||||
|
||||
- name: Run cargo miri example2
|
||||
run: cargo miri run --example power_supply
|
||||
env:
|
||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
||||
|
||||
- name: Run cargo miri example3
|
||||
run: cargo miri run --example stepper_motor
|
||||
env:
|
||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
||||
|
||||
lints:
|
||||
name: Lints
|
||||
runs-on: ubuntu-latest
|
||||
|
14
.github/workflows/loom.yml
vendored
14
.github/workflows/loom.yml
vendored
@ -5,10 +5,16 @@ on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- 'asynchronix/src/runtime/executor/queue.rs'
|
||||
- 'asynchronix/src/runtime/executor/queue/**'
|
||||
- 'asynchronix/src/runtime/executor/task.rs'
|
||||
- 'asynchronix/src/runtime/executor/task/**'
|
||||
- 'asynchronix/src/channel.rs'
|
||||
- 'asynchronix/src/channel/**'
|
||||
- 'asynchronix/src/executor/task.rs'
|
||||
- 'asynchronix/src/executor/task/**'
|
||||
- 'asynchronix/src/loom_exports.rs'
|
||||
- 'asynchronix/src/model/ports/broadcaster.rs'
|
||||
- 'asynchronix/src/model/ports/broadcaster/**'
|
||||
- 'asynchronix/src/util/slot.rs'
|
||||
- 'asynchronix/src/util/spsc_queue.rs'
|
||||
- 'asynchronix/src/util/sync_cell.rs'
|
||||
|
||||
jobs:
|
||||
loom:
|
||||
|
3
CHANGELOG.md
Normal file
3
CHANGELOG.md
Normal file
@ -0,0 +1,3 @@
|
||||
# 0.1.0 (2023-01-16)
|
||||
|
||||
Initial release
|
@ -1,6 +1,6 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2022 Serge Barral
|
||||
Copyright (c) 2023 Serge Barral
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
219
README.md
219
README.md
@ -1,82 +1,169 @@
|
||||
# Asynchronix
|
||||
|
||||
A high-performance asynchronous computation framework for system simulation.
|
||||
Asynchronix is a developer-friendly, highly optimized discrete-event simulation
|
||||
framework written in Rust. It is meant to scale from small, simple simulations
|
||||
to very large simulation benches with complex time-driven state machines.
|
||||
|
||||
## What is this?
|
||||
|
||||
> **Warning**: this page is at the moment mostly addressed at interested
|
||||
> contributors, but resources for users will be added soon.
|
||||
|
||||
In a nutshell, Asynchronix is an effort to develop a framework for
|
||||
discrete-event system simulation, with a particular focus on cyberphysical
|
||||
systems. In this context, a system might be something as large as a spacecraft,
|
||||
or as small as a IoT device.
|
||||
|
||||
Asynchronix draws from experience in the space industry but differs from
|
||||
existing tools in a number of respects, including:
|
||||
|
||||
1) *open-source license*: it is distributed under the very permissive MIT and
|
||||
Apache 2 licenses, with the intent to foster an ecosystem where models can be
|
||||
easily exchanged without reliance on proprietary APIs,
|
||||
2) *developer-friendly technology*: Rust's support for algebraic types and its
|
||||
powerful type system make it ideal for the "cyber" part in cyberphysical,
|
||||
i.e. for modelling digital devices with state machines,
|
||||
3) *very fast*: by leveraging Rust's excellent support for multithreading and
|
||||
async programming, simulation models can run efficiently in parallel with all
|
||||
required synchronization being transparently handled by the simulator.
|
||||
[](https://crates.io/crates/asynchronix)
|
||||
[](https://docs.rs/asynchronix)
|
||||
[](https://github.com/asynchronics/asynchronix#license)
|
||||
|
||||
|
||||
## General design
|
||||
## Overview
|
||||
|
||||
Asynchronix is an async compute framework for time-based discrete event
|
||||
simulation.
|
||||
Asynchronix is a simulator that leverages asynchronous programming to
|
||||
transparently and efficiently auto-parallelize simulations by means of a custom
|
||||
multi-threaded executor.
|
||||
|
||||
From the perspective of simulation model implementers and users, it closely
|
||||
resembles a flow-based programming framework: a model is essentially an isolated
|
||||
entity with a fixed set of typed inputs and outputs, communicating with other
|
||||
models and with the scheduler through message passing. Unlike in conventional
|
||||
flow-based programming, however, request-response patterns are also possible.
|
||||
It promotes a component-oriented architecture that is familiar to system
|
||||
engineers and closely resembles [flow-based programming][FBP]: a model is
|
||||
essentially an isolated entity with a fixed set of typed inputs and outputs,
|
||||
communicating with other models through message passing via connections defined
|
||||
during bench assembly.
|
||||
|
||||
Under the hood, Asynchronix' implementation is based on async Rust and the actor
|
||||
model. All inputs are forwarded to a single "mailbox" (an async channel),
|
||||
preserving the relative order of arrival of input messages.
|
||||
Although the main impetus for its development was the need for simulators able
|
||||
to handle large cyberphysical systems, Asynchronix is a general-purpose
|
||||
discrete-event simulator expected to be suitable for a wide range of simulation
|
||||
activities. It draws from experience on spacecraft real-time simulators but
|
||||
differs from existing tools in the space industry in a number of respects,
|
||||
including:
|
||||
|
||||
Computations proceed at discrete times. When executed, models can post events
|
||||
for the future, i.e. request the delayed activation of an input. Whenever the
|
||||
computation at a given time completes, the scheduler selects the nearest future
|
||||
time at which one or several events are scheduled, thus triggering another set
|
||||
of computations.
|
||||
1) *performance*: by taking advantage of Rust's excellent support for
|
||||
multithreading and asynchronous programming, simulation models can run
|
||||
efficiently in parallel with all required synchronization being transparently
|
||||
handled by the simulator,
|
||||
2) *developer-friendliness*: an ergonomic API and Rust's support for algebraic
|
||||
types make it ideal for the "cyber" part in cyberphysical, i.e. for modelling
|
||||
digital devices with even very complex state machines,
|
||||
3) *open-source*: last but not least, Asynchronix is distributed under the very
|
||||
permissive MIT and Apache 2 licenses, with the explicit intent to foster an
|
||||
ecosystem where models can be easily exchanged without reliance on
|
||||
proprietary APIs.
|
||||
|
||||
This computational process makes it difficult to use general-purposes runtimes
|
||||
such as Tokio, because the end of a set of computations is technically a
|
||||
deadlock: the computation completes when all model have nothing left to do and
|
||||
are blocked on an empty mailbox. Also, instead of managing a conventional
|
||||
reactor, the runtime manages a priority queue containing the posted events. For
|
||||
these reasons, it was necessary for Asynchronix to develop a fully custom
|
||||
[FBP]: https://en.wikipedia.org/wiki/Flow-based_programming
|
||||
|
||||
|
||||
## Documentation
|
||||
|
||||
The [API] documentation is relatively exhaustive and includes a practical
|
||||
overview which should provide all necessary information to get started.
|
||||
|
||||
More fleshed out examples can also be found in the dedicated
|
||||
[directory](examples).
|
||||
|
||||
[API]: https://docs.rs/asynchronix
|
||||
|
||||
|
||||
## Usage
|
||||
|
||||
Add this to your `Cargo.toml`:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
asynchronix = "0.1.0"
|
||||
```
|
||||
|
||||
|
||||
## Example
|
||||
|
||||
```rust
|
||||
// A system made of 2 identical models.
|
||||
// Each model is a 2× multiplier with an output delayed by 1s.
|
||||
//
|
||||
// ┌──────────────┐ ┌──────────────┐
|
||||
// │ │ │ │
|
||||
// Input ●─────▶│ multiplier 1 ├─────▶│ multiplier 2 ├─────▶ Output
|
||||
// │ │ │ │
|
||||
// └──────────────┘ └──────────────┘
|
||||
use asynchronix::model::{Model, Output};
|
||||
use asynchronix::simulation::{Mailbox, SimInit};
|
||||
use asynchronix::time::{MonotonicTime, Scheduler};
|
||||
use std::time::Duration;
|
||||
|
||||
// A model that doubles its input and forwards it with a 1s delay.
|
||||
#[derive(Default)]
|
||||
pub struct DelayedMultiplier {
|
||||
pub output: Output<f64>,
|
||||
}
|
||||
impl DelayedMultiplier {
|
||||
pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
|
||||
scheduler
|
||||
.schedule_in(Duration::from_secs(1), Self::send, 2.0 * value)
|
||||
.unwrap();
|
||||
}
|
||||
async fn send(&mut self, value: f64) {
|
||||
self.output.send(value).await;
|
||||
}
|
||||
}
|
||||
impl Model for DelayedMultiplier {}
|
||||
|
||||
// Instantiate models and their mailboxes.
|
||||
let mut multiplier1 = DelayedMultiplier::default();
|
||||
let mut multiplier2 = DelayedMultiplier::default();
|
||||
let multiplier1_mbox = Mailbox::new();
|
||||
let multiplier2_mbox = Mailbox::new();
|
||||
|
||||
// Connect the output of `multiplier1` to the input of `multiplier2`.
|
||||
multiplier1
|
||||
.output
|
||||
.connect(DelayedMultiplier::input, &multiplier2_mbox);
|
||||
|
||||
// Keep handles to the main input and output.
|
||||
let mut output_slot = multiplier2.output.connect_slot().0;
|
||||
let input_address = multiplier1_mbox.address();
|
||||
|
||||
// Instantiate the simulator
|
||||
let t0 = MonotonicTime::EPOCH; // arbitrary start time
|
||||
let mut simu = SimInit::new()
|
||||
.add_model(multiplier1, multiplier1_mbox)
|
||||
.add_model(multiplier2, multiplier2_mbox)
|
||||
.init(t0);
|
||||
|
||||
// Send a value to the first multiplier.
|
||||
simu.send_event(DelayedMultiplier::input, 3.5, &input_address);
|
||||
|
||||
// Advance time to the next event.
|
||||
simu.step();
|
||||
assert_eq!(simu.time(), t0 + Duration::from_secs(1));
|
||||
assert_eq!(output_slot.take(), None);
|
||||
|
||||
// Advance time to the next event.
|
||||
simu.step();
|
||||
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
||||
assert_eq!(output_slot.take(), Some(14.0));
|
||||
```
|
||||
|
||||
# Implementation notes
|
||||
|
||||
Under the hood, Asynchronix is based on an asynchronous implementation of the
|
||||
actor model, where each simulation model is an actor. The messages actually
|
||||
exchanged between models are `async` closures which capture the event's or
|
||||
request's value and take the model as `&mut self` argument. The mailbox
|
||||
associated to a model and to which closures are forwarded is the receiver of an
|
||||
async, bounded MPSC channel.
|
||||
|
||||
Computations proceed at discrete times. When executed, models can request the
|
||||
scheduler to send an event (or rather, a closure capturing such event) at a
|
||||
certain simulation time. Whenever computations for the current time complete,
|
||||
the scheduler selects the nearest future time at which one or several events are
|
||||
scheduled (*next event increment*), thus triggering another set of computations.
|
||||
|
||||
This computational process makes it difficult to use general-purposes
|
||||
asynchronous runtimes such as [Tokio][tokio], because the end of a set of
|
||||
computations is technically a deadlock: the computation completes when all model
|
||||
have nothing left to do and are blocked on an empty mailbox. Also, instead of
|
||||
managing a conventional reactor, the runtime manages a priority queue containing
|
||||
the posted events. For these reasons, Asynchronix relies on a fully custom
|
||||
runtime.
|
||||
|
||||
Another crucial aspect of async compute is message-passing efficiency:
|
||||
oftentimes the processing of an input is a simple action, making inter-thread
|
||||
message-passing the bottleneck. This in turns calls for a very efficient
|
||||
channel implementation, heavily optimized for the case of starved receivers
|
||||
since models are most of the time waiting for an input to become available.
|
||||
Even though the runtime was largely influenced by Tokio, it features additional
|
||||
optimization that make its faster than any other multi-threaded Rust executor on
|
||||
the typically message-passing-heavy workloads seen in discrete-event simulation
|
||||
(see [benchmark]). Asynchronix also improves over the state of the art with a
|
||||
very fast custom MPSC channel, which performance has been demonstrated through
|
||||
[Tachyonix][tachyonix], a general-purpose offshoot of this channel.
|
||||
|
||||
|
||||
## Current state
|
||||
|
||||
The simulator is rapidly approaching MVP completion and has achieved 2 major
|
||||
milestones:
|
||||
|
||||
* completion of an extremely fast asynchronous multi-threaded channel,
|
||||
demonstrated in the [Tachyonix][tachyonix] project; this channel is the
|
||||
backbone of the actor model,
|
||||
* completion of a custom `async` executor optimized for message-passing and
|
||||
deadlock detection, which has demonstrated even better performance than Tokio
|
||||
for message-passing; this executor is already in the main branch and can be
|
||||
tested against other executors using the Tachyonix [benchmark].
|
||||
|
||||
Before it becomes usable, however, further work is required to implement the
|
||||
priority queue, implement model inputs and outputs and adapt the channel.
|
||||
[tokio]: https://github.com/tokio-rs/tokio
|
||||
|
||||
[tachyonix]: https://github.com/asynchronics/tachyonix
|
||||
|
||||
|
@ -1,5 +1,10 @@
|
||||
[package]
|
||||
name = "asynchronix"
|
||||
# When incrementing version and releasing to crates.io:
|
||||
# - Update crate version in README.md
|
||||
# - Update CHANGELOG.md
|
||||
# - Update if necessary copyright notice in LICENSE-MIT
|
||||
# - Create a "vX.Y.Z" git tag
|
||||
authors = ["Serge Barral <serge.barral@asynchronics.com>"]
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
@ -21,13 +26,19 @@ dev-logs = []
|
||||
|
||||
[dependencies]
|
||||
crossbeam-utils = "0.8"
|
||||
slab = "0.4"
|
||||
cache-padded = "1.1"
|
||||
diatomic-waker = "0.1"
|
||||
futures-task = "0.3"
|
||||
multishot = "0.3"
|
||||
num_cpus = "1.13"
|
||||
recycle-box = "0.2"
|
||||
slab = "0.4"
|
||||
st3 = "0.4"
|
||||
|
||||
[target.'cfg(asynchronix_loom)'.dependencies]
|
||||
loom = "0.5"
|
||||
waker-fn = "1.1"
|
||||
|
||||
[dev-dependencies]
|
||||
futures-channel = "0.3"
|
||||
futures-util = "0.3"
|
||||
futures-channel = "0.3"
|
||||
futures-executor = "0.3"
|
||||
|
480
asynchronix/examples/espresso_machine.rs
Normal file
480
asynchronix/examples/espresso_machine.rs
Normal file
@ -0,0 +1,480 @@
|
||||
//! Example: espresso coffee machine.
|
||||
//!
|
||||
//! This example demonstrates in particular:
|
||||
//!
|
||||
//! * non-trivial state machines,
|
||||
//! * cancellation of calls scheduled at the current time step using epochs,
|
||||
//! * model initialization,
|
||||
//! * simulation monitoring with event slots.
|
||||
//!
|
||||
//! ```text
|
||||
//! flow rate
|
||||
//! ┌─────────────────────────────────────────────┐
|
||||
//! │ (≥0) │
|
||||
//! │ ┌────────────┐ │
|
||||
//! └───▶│ │ │
|
||||
//! added volume │ Water tank ├────┐ │
|
||||
//! Water fill ●───────────────────▶│ │ │ │
|
||||
//! (>0) └────────────┘ │ │
|
||||
//! │ │
|
||||
//! water sense │ │
|
||||
//! ┌──────────────────────┘ │
|
||||
//! │ (empty|not empty) │
|
||||
//! │ │
|
||||
//! │ ┌────────────┐ ┌────────────┐ │
|
||||
//! brew time └───▶│ │ command │ │ │
|
||||
//! Brew time dial ●───────────────────▶│ Controller ├─────────▶│ Water pump ├───┘
|
||||
//! (>0) ┌───▶│ │ (on|off) │ │
|
||||
//! │ └────────────┘ └────────────┘
|
||||
//! trigger │
|
||||
//! Brew command ●───────────────┘
|
||||
//! (-)
|
||||
//! ```
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::time::Duration;
|
||||
|
||||
use asynchronix::model::{InitializedModel, Model, Output};
|
||||
use asynchronix::simulation::{Mailbox, SimInit};
|
||||
use asynchronix::time::{MonotonicTime, Scheduler, SchedulerKey};
|
||||
|
||||
/// Water pump.
|
||||
pub struct Pump {
|
||||
/// Actual volumetric flow rate [m³·s⁻¹] -- output port.
|
||||
pub flow_rate: Output<f64>,
|
||||
|
||||
/// Nominal volumetric flow rate in operation [m³·s⁻¹] -- constant.
|
||||
nominal_flow_rate: f64,
|
||||
}
|
||||
|
||||
impl Pump {
|
||||
/// Creates a pump with the specified nominal flow rate [m³·s⁻¹].
|
||||
pub fn new(nominal_flow_rate: f64) -> Self {
|
||||
Self {
|
||||
nominal_flow_rate,
|
||||
flow_rate: Output::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Main ON/OFF command -- input port.
|
||||
pub async fn command(&mut self, cmd: PumpCommand) {
|
||||
let flow_rate = match cmd {
|
||||
PumpCommand::On => self.nominal_flow_rate,
|
||||
PumpCommand::Off => 0.0,
|
||||
};
|
||||
|
||||
self.flow_rate.send(flow_rate).await;
|
||||
}
|
||||
}
|
||||
|
||||
impl Model for Pump {}
|
||||
|
||||
/// Espresso machine controller.
|
||||
pub struct Controller {
|
||||
/// Pump command -- output port.
|
||||
pub pump_cmd: Output<PumpCommand>,
|
||||
|
||||
/// Brew time setting [s] -- internal state.
|
||||
brew_time: Duration,
|
||||
/// Current water sense state.
|
||||
water_sense: WaterSenseState,
|
||||
/// Scheduler key, which if present indicates that the machine is current
|
||||
/// brewing -- internal state.
|
||||
stop_brew_key: Option<SchedulerKey>,
|
||||
/// An epoch incremented when the scheduled 'stop_brew` callback must be
|
||||
/// ignored -- internal state.
|
||||
stop_brew_epoch: u64,
|
||||
}
|
||||
|
||||
impl Controller {
|
||||
/// Default brew time [s].
|
||||
const DEFAULT_BREW_TIME: Duration = Duration::new(25, 0);
|
||||
|
||||
/// Creates an espresso machine controller.
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
brew_time: Self::DEFAULT_BREW_TIME,
|
||||
pump_cmd: Output::default(),
|
||||
stop_brew_key: None,
|
||||
water_sense: WaterSenseState::Empty, // will be overridden during init
|
||||
stop_brew_epoch: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Signals a change in the water sensing state -- input port.
|
||||
pub async fn water_sense(&mut self, state: WaterSenseState, scheduler: &Scheduler<Self>) {
|
||||
// Check if the tank just got empty.
|
||||
if state == WaterSenseState::Empty && self.water_sense == WaterSenseState::NotEmpty {
|
||||
// If a brew was ongoing, we must cancel it.
|
||||
if let Some(key) = self.stop_brew_key.take() {
|
||||
// Try to abort the scheduled call to `stop_brew()`. If this will
|
||||
// fails, increment the epoch so that the call is ignored.
|
||||
if scheduler.cancel(key).is_err() {
|
||||
self.stop_brew_epoch = self.stop_brew_epoch.wrapping_add(1);
|
||||
};
|
||||
|
||||
self.pump_cmd.send(PumpCommand::Off).await;
|
||||
}
|
||||
}
|
||||
|
||||
self.water_sense = state;
|
||||
}
|
||||
|
||||
/// Sets the timing for the next brews [s] -- input port.
|
||||
pub async fn brew_time(&mut self, brew_time: Duration) {
|
||||
// Panic if the duration is null.
|
||||
assert!(!brew_time.is_zero());
|
||||
|
||||
self.brew_time = brew_time;
|
||||
}
|
||||
|
||||
/// Starts brewing or cancels the current brew -- input port.
|
||||
pub async fn brew_cmd(&mut self, _: (), scheduler: &Scheduler<Self>) {
|
||||
// If a brew was ongoing, sending the brew command is interpreted as a
|
||||
// request to cancel it.
|
||||
if let Some(key) = self.stop_brew_key.take() {
|
||||
self.pump_cmd.send(PumpCommand::Off).await;
|
||||
|
||||
// Try to abort the scheduled call to `stop_brew()`. If this will
|
||||
// fails, increment the epoch so that the call is ignored.
|
||||
if scheduler.cancel(key).is_err() {
|
||||
self.stop_brew_epoch = self.stop_brew_epoch.wrapping_add(1);
|
||||
};
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// If there is no water, do nothing.
|
||||
if self.water_sense == WaterSenseState::Empty {
|
||||
return;
|
||||
}
|
||||
|
||||
// Schedule the `stop_brew()` method and turn on the pump.
|
||||
self.stop_brew_key = Some(
|
||||
scheduler
|
||||
.schedule_in(self.brew_time, Self::stop_brew, self.stop_brew_epoch)
|
||||
.unwrap(),
|
||||
);
|
||||
self.pump_cmd.send(PumpCommand::On).await;
|
||||
}
|
||||
|
||||
/// Stops brewing.
|
||||
async fn stop_brew(&mut self, epoch: u64) {
|
||||
// Ignore this call if the epoch has been incremented.
|
||||
if self.stop_brew_epoch != epoch {
|
||||
return;
|
||||
}
|
||||
|
||||
if self.stop_brew_key.take().is_some() {
|
||||
self.pump_cmd.send(PumpCommand::Off).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Model for Controller {}
|
||||
|
||||
/// ON/OFF pump command.
|
||||
#[derive(Copy, Clone, Eq, PartialEq)]
|
||||
pub enum PumpCommand {
|
||||
On,
|
||||
Off,
|
||||
}
|
||||
|
||||
/// Water tank.
|
||||
pub struct Tank {
|
||||
/// Water sensor -- output port.
|
||||
pub water_sense: Output<WaterSenseState>,
|
||||
|
||||
/// Volume of water [m³] -- internal state.
|
||||
volume: f64,
|
||||
/// State that exists when the mass flow rate is non-zero -- internal state.
|
||||
dynamic_state: Option<TankDynamicState>,
|
||||
/// An epoch incremented when the pending call to `set_empty()` must be
|
||||
/// ignored -- internal state.
|
||||
set_empty_epoch: u64,
|
||||
}
|
||||
impl Tank {
|
||||
/// Creates a new tank with the specified amount of water [m³].
|
||||
///
|
||||
/// The initial flow rate is assumed to be zero.
|
||||
pub fn new(water_volume: f64) -> Self {
|
||||
assert!(water_volume >= 0.0);
|
||||
|
||||
Self {
|
||||
volume: water_volume,
|
||||
dynamic_state: None,
|
||||
set_empty_epoch: 0,
|
||||
water_sense: Output::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Water volume added [m³] -- input port.
|
||||
pub async fn fill(&mut self, added_volume: f64, scheduler: &Scheduler<Self>) {
|
||||
// Ignore zero and negative values. We could also impose a maximum based
|
||||
// on tank capacity.
|
||||
if added_volume <= 0.0 {
|
||||
return;
|
||||
}
|
||||
let was_empty = self.volume == 0.0;
|
||||
|
||||
// Account for the added water.
|
||||
self.volume += added_volume;
|
||||
|
||||
// If the current flow rate is non-zero, compute the current volume and
|
||||
// schedule a new update.
|
||||
if let Some(state) = self.dynamic_state.take() {
|
||||
// Try to abort the scheduled call to `set_empty()`. If this will
|
||||
// fails, increment the epoch so that the call is ignored.
|
||||
if scheduler.cancel(state.set_empty_key).is_err() {
|
||||
self.set_empty_epoch = self.set_empty_epoch.wrapping_add(1);
|
||||
}
|
||||
|
||||
// Update the volume, saturating at 0 in case of rounding errors.
|
||||
let time = scheduler.time();
|
||||
let elapsed_time = time.duration_since(state.last_volume_update).as_secs_f64();
|
||||
self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0);
|
||||
|
||||
self.schedule_empty(state.flow_rate, time, scheduler).await;
|
||||
|
||||
// There is no need to broadcast the state of the water sense since
|
||||
// it could not be previously `Empty` (otherwise the dynamic state
|
||||
// would not exist).
|
||||
return;
|
||||
}
|
||||
|
||||
if was_empty {
|
||||
self.water_sense.send(WaterSenseState::NotEmpty).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Flow rate [m³·s⁻¹] -- input port.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic if the flow rate is negative.
|
||||
pub async fn set_flow_rate(&mut self, flow_rate: f64, scheduler: &Scheduler<Self>) {
|
||||
assert!(flow_rate >= 0.0);
|
||||
|
||||
let time = scheduler.time();
|
||||
|
||||
// If the flow rate was non-zero up to now, update the volume.
|
||||
if let Some(state) = self.dynamic_state.take() {
|
||||
// Try to abort the scheduled call to `set_empty()`. If this will
|
||||
// fails, increment the epoch so that the call is ignored.
|
||||
if scheduler.cancel(state.set_empty_key).is_err() {
|
||||
self.set_empty_epoch = self.set_empty_epoch.wrapping_add(1);
|
||||
}
|
||||
|
||||
// Update the volume, saturating at 0 in case of rounding errors.
|
||||
let elapsed_time = time.duration_since(state.last_volume_update).as_secs_f64();
|
||||
self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0);
|
||||
}
|
||||
|
||||
self.schedule_empty(flow_rate, time, scheduler).await;
|
||||
}
|
||||
|
||||
/// Schedules a callback for when the tank becomes empty.
|
||||
///
|
||||
/// Pre-conditions:
|
||||
/// - `flow_rate` cannot be negative.
|
||||
/// - `self.volume` should be up to date,
|
||||
/// - `self.dynamic_state` should be `None`.
|
||||
async fn schedule_empty(
|
||||
&mut self,
|
||||
flow_rate: f64,
|
||||
time: MonotonicTime,
|
||||
scheduler: &Scheduler<Self>,
|
||||
) {
|
||||
// Determine when the tank will be empty at the current flow rate.
|
||||
let duration_until_empty = if self.volume == 0.0 {
|
||||
0.0
|
||||
} else {
|
||||
self.volume / flow_rate
|
||||
};
|
||||
if duration_until_empty.is_infinite() {
|
||||
// The flow rate is zero or very close to zero, so there is not
|
||||
// need to plan an update since the tank will never become
|
||||
// empty.
|
||||
return;
|
||||
}
|
||||
let duration_until_empty = Duration::from_secs_f64(duration_until_empty);
|
||||
|
||||
// Schedule the next update.
|
||||
match scheduler.schedule_in(duration_until_empty, Self::set_empty, self.set_empty_epoch) {
|
||||
Ok(set_empty_key) => {
|
||||
let state = TankDynamicState {
|
||||
last_volume_update: time,
|
||||
set_empty_key,
|
||||
flow_rate,
|
||||
};
|
||||
self.dynamic_state = Some(state);
|
||||
}
|
||||
Err(_) => {
|
||||
// The duration was null so the tank is already empty.
|
||||
self.volume = 0.0;
|
||||
self.water_sense.send(WaterSenseState::Empty).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Updates the state of the tank to indicate that there is no more water.
|
||||
async fn set_empty(&mut self, epoch: u64) {
|
||||
// Ignore this call if the epoch has been incremented.
|
||||
if epoch != self.set_empty_epoch {
|
||||
return;
|
||||
}
|
||||
|
||||
self.volume = 0.0;
|
||||
self.dynamic_state = None;
|
||||
self.water_sense.send(WaterSenseState::Empty).await;
|
||||
}
|
||||
}
|
||||
|
||||
impl Model for Tank {
|
||||
/// Broadcasts the initial state of the water sense.
|
||||
fn init(
|
||||
mut self,
|
||||
_scheduler: &Scheduler<Self>,
|
||||
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
|
||||
Box::pin(async move {
|
||||
self.water_sense
|
||||
.send(if self.volume == 0.0 {
|
||||
WaterSenseState::Empty
|
||||
} else {
|
||||
WaterSenseState::NotEmpty
|
||||
})
|
||||
.await;
|
||||
|
||||
self.into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Dynamic state of the tank that exists when and only when the mass flow rate
|
||||
/// is non-zero.
|
||||
struct TankDynamicState {
|
||||
last_volume_update: MonotonicTime,
|
||||
set_empty_key: SchedulerKey,
|
||||
flow_rate: f64,
|
||||
}
|
||||
|
||||
/// Water level in the tank.
|
||||
#[derive(Copy, Clone, Eq, PartialEq)]
|
||||
pub enum WaterSenseState {
|
||||
Empty,
|
||||
NotEmpty,
|
||||
}
|
||||
|
||||
fn main() {
|
||||
// ---------------
|
||||
// Bench assembly.
|
||||
// ---------------
|
||||
|
||||
// Models.
|
||||
|
||||
// The constant mass flow rate assumption is of course a gross
|
||||
// simplification, so the flow rate is set to an expected average over the
|
||||
// whole extraction [m³·s⁻¹].
|
||||
let pump_flow_rate = 4.5e-6;
|
||||
// Start with 1.5l in the tank [m³].
|
||||
let init_tank_volume = 1.5e-3;
|
||||
|
||||
let mut pump = Pump::new(pump_flow_rate);
|
||||
let mut controller = Controller::new();
|
||||
let mut tank = Tank::new(init_tank_volume);
|
||||
|
||||
// Mailboxes.
|
||||
let pump_mbox = Mailbox::new();
|
||||
let controller_mbox = Mailbox::new();
|
||||
let tank_mbox = Mailbox::new();
|
||||
|
||||
// Connections.
|
||||
controller.pump_cmd.connect(Pump::command, &pump_mbox);
|
||||
tank.water_sense
|
||||
.connect(Controller::water_sense, &controller_mbox);
|
||||
pump.flow_rate.connect(Tank::set_flow_rate, &tank_mbox);
|
||||
|
||||
// Model handles for simulation.
|
||||
let mut flow_rate = pump.flow_rate.connect_slot().0;
|
||||
let controller_addr = controller_mbox.address();
|
||||
let tank_addr = tank_mbox.address();
|
||||
|
||||
// Start time (arbitrary since models do not depend on absolute time).
|
||||
let t0 = MonotonicTime::EPOCH;
|
||||
|
||||
// Assembly and initialization.
|
||||
let mut simu = SimInit::new()
|
||||
.add_model(controller, controller_mbox)
|
||||
.add_model(pump, pump_mbox)
|
||||
.add_model(tank, tank_mbox)
|
||||
.init(t0);
|
||||
|
||||
// ----------
|
||||
// Simulation.
|
||||
// ----------
|
||||
|
||||
// Check initial conditions.
|
||||
let mut t = t0;
|
||||
assert_eq!(simu.time(), t);
|
||||
|
||||
// Brew one espresso shot with the default brew time.
|
||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
||||
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
|
||||
|
||||
simu.step();
|
||||
t += Controller::DEFAULT_BREW_TIME;
|
||||
assert_eq!(simu.time(), t);
|
||||
assert_eq!(flow_rate.take(), Some(0.0));
|
||||
|
||||
// Drink too much coffee.
|
||||
let volume_per_shot = pump_flow_rate * Controller::DEFAULT_BREW_TIME.as_secs_f64();
|
||||
let shots_per_tank = (init_tank_volume / volume_per_shot) as u64; // YOLO--who care about floating-point rounding errors?
|
||||
for _ in 0..(shots_per_tank - 1) {
|
||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
||||
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
|
||||
simu.step();
|
||||
t += Controller::DEFAULT_BREW_TIME;
|
||||
assert_eq!(simu.time(), t);
|
||||
assert_eq!(flow_rate.take(), Some(0.0));
|
||||
}
|
||||
|
||||
// Check that the tank becomes empty before the completion of the next shot.
|
||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
||||
simu.step();
|
||||
assert!(simu.time() < t + Controller::DEFAULT_BREW_TIME);
|
||||
t = simu.time();
|
||||
assert_eq!(flow_rate.take(), Some(0.0));
|
||||
|
||||
// Try to brew another shot while the tank is still empty.
|
||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
||||
assert!(flow_rate.take().is_none());
|
||||
|
||||
// Change the brew time and fill up the tank.
|
||||
let brew_time = Duration::new(30, 0);
|
||||
simu.send_event(Controller::brew_time, brew_time, &controller_addr);
|
||||
simu.send_event(Tank::fill, 1.0e-3, tank_addr);
|
||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
||||
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
|
||||
|
||||
simu.step();
|
||||
t += brew_time;
|
||||
assert_eq!(simu.time(), t);
|
||||
assert_eq!(flow_rate.take(), Some(0.0));
|
||||
|
||||
// Interrupt the brew after 15s by pressing again the brew button.
|
||||
simu.schedule_in(
|
||||
Duration::from_secs(15),
|
||||
Controller::brew_cmd,
|
||||
(),
|
||||
&controller_addr,
|
||||
)
|
||||
.unwrap();
|
||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
||||
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
|
||||
|
||||
simu.step();
|
||||
t += Duration::from_secs(15);
|
||||
assert_eq!(simu.time(), t);
|
||||
assert_eq!(flow_rate.take(), Some(0.0));
|
||||
}
|
167
asynchronix/examples/power_supply.rs
Normal file
167
asynchronix/examples/power_supply.rs
Normal file
@ -0,0 +1,167 @@
|
||||
//! Example: power supply with parallel resistive loads.
|
||||
//!
|
||||
//! This example demonstrates in particular:
|
||||
//!
|
||||
//! * the use of requestor and replier ports,
|
||||
//! * simulation monitoring with event slots.
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌────────┐
|
||||
//! │ │
|
||||
//! ┌───▶│ Load ├───▶ Power
|
||||
//! │ │ │
|
||||
//! │ └────────┘
|
||||
//! │
|
||||
//! │ ┌────────┐
|
||||
//! │ │ │
|
||||
//! ├───▶│ Load ├───▶ Power
|
||||
//! │ │ │
|
||||
//! │ └────────┘
|
||||
//! │
|
||||
//! │ ┌────────┐
|
||||
//! ┌──────────┐ voltage▶ │ │ │
|
||||
//! Voltage setting ●────▶│ │◀────────────┴───▶│ Load ├───▶ Power
|
||||
//! │ Power │ ◀current │ │
|
||||
//! │ supply │ └────────┘
|
||||
//! │ ├───────────────────────────────▶ Total power
|
||||
//! └──────────┘
|
||||
//! ```
|
||||
use asynchronix::model::{Model, Output, Requestor};
|
||||
use asynchronix::simulation::{Mailbox, SimInit};
|
||||
use asynchronix::time::MonotonicTime;
|
||||
|
||||
/// Power supply.
|
||||
pub struct PowerSupply {
|
||||
/// Electrical output [V → A] -- requestor port.
|
||||
pub pwr_out: Requestor<f64, f64>,
|
||||
/// Power consumption [W] -- output port.
|
||||
pub power: Output<f64>,
|
||||
}
|
||||
|
||||
impl PowerSupply {
|
||||
/// Creates a power supply.
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
pwr_out: Default::default(),
|
||||
power: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Voltage setting [V] -- input port.
|
||||
pub async fn voltage_setting(&mut self, voltage: f64) {
|
||||
// Ignore negative values.
|
||||
if voltage < 0.0 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Sum all load currents.
|
||||
let mut total_current = 0.0;
|
||||
for current in self.pwr_out.send(voltage).await {
|
||||
total_current += current;
|
||||
}
|
||||
|
||||
self.power.send(voltage * total_current).await;
|
||||
}
|
||||
}
|
||||
|
||||
impl Model for PowerSupply {}
|
||||
|
||||
/// Power supply.
|
||||
pub struct Load {
|
||||
/// Power consumption [W] -- output port.
|
||||
pub power: Output<f64>,
|
||||
|
||||
/// Load conductance [S] -- internal state.
|
||||
conductance: f64,
|
||||
}
|
||||
|
||||
impl Load {
|
||||
/// Creates a load with the specified resistance [Ω].
|
||||
fn new(resistance: f64) -> Self {
|
||||
assert!(resistance > 0.0);
|
||||
Self {
|
||||
power: Default::default(),
|
||||
conductance: 1.0 / resistance,
|
||||
}
|
||||
}
|
||||
|
||||
/// Electrical input [V → A] -- replier port.
|
||||
///
|
||||
/// This port receives the applied voltage and returns the load current.
|
||||
pub async fn pwr_in(&mut self, voltage: f64) -> f64 {
|
||||
let current = voltage * self.conductance;
|
||||
self.power.send(voltage * current).await;
|
||||
|
||||
current
|
||||
}
|
||||
}
|
||||
|
||||
impl Model for Load {}
|
||||
|
||||
fn main() {
|
||||
// ---------------
|
||||
// Bench assembly.
|
||||
// ---------------
|
||||
|
||||
// Models.
|
||||
let r1 = 5.0;
|
||||
let r2 = 10.0;
|
||||
let r3 = 20.0;
|
||||
let mut psu = PowerSupply::new();
|
||||
let mut load1 = Load::new(r1);
|
||||
let mut load2 = Load::new(r2);
|
||||
let mut load3 = Load::new(r3);
|
||||
|
||||
// Mailboxes.
|
||||
let psu_mbox = Mailbox::new();
|
||||
let load1_mbox = Mailbox::new();
|
||||
let load2_mbox = Mailbox::new();
|
||||
let load3_mbox = Mailbox::new();
|
||||
|
||||
// Connections.
|
||||
psu.pwr_out.connect(Load::pwr_in, &load1_mbox);
|
||||
psu.pwr_out.connect(Load::pwr_in, &load2_mbox);
|
||||
psu.pwr_out.connect(Load::pwr_in, &load3_mbox);
|
||||
|
||||
// Model handles for simulation.
|
||||
let mut psu_power = psu.power.connect_slot().0;
|
||||
let mut load1_power = load1.power.connect_slot().0;
|
||||
let mut load2_power = load2.power.connect_slot().0;
|
||||
let mut load3_power = load3.power.connect_slot().0;
|
||||
let psu_addr = psu_mbox.address();
|
||||
|
||||
// Start time (arbitrary since models do not depend on absolute time).
|
||||
let t0 = MonotonicTime::EPOCH;
|
||||
|
||||
// Assembly and initialization.
|
||||
let mut simu = SimInit::new()
|
||||
.add_model(psu, psu_mbox)
|
||||
.add_model(load1, load1_mbox)
|
||||
.add_model(load2, load2_mbox)
|
||||
.add_model(load3, load3_mbox)
|
||||
.init(t0);
|
||||
|
||||
// ----------
|
||||
// Simulation.
|
||||
// ----------
|
||||
|
||||
// Compare two electrical powers for equality [W].
|
||||
fn same_power(a: f64, b: f64) -> bool {
|
||||
// Use an absolute floating-point epsilon of 1 pW.
|
||||
(a - b).abs() < 1e-12
|
||||
}
|
||||
|
||||
// Vary the supply voltage, check the load and power supply consumptions.
|
||||
for voltage in [10.0, 15.0, 20.0] {
|
||||
simu.send_event(PowerSupply::voltage_setting, voltage, &psu_addr);
|
||||
|
||||
let v_square = voltage * voltage;
|
||||
assert!(same_power(load1_power.take().unwrap(), v_square / r1));
|
||||
assert!(same_power(load2_power.take().unwrap(), v_square / r2));
|
||||
assert!(same_power(load3_power.take().unwrap(), v_square / r3));
|
||||
assert!(same_power(
|
||||
psu_power.take().unwrap(),
|
||||
v_square * (1.0 / r1 + 1.0 / r2 + 1.0 / r3)
|
||||
));
|
||||
}
|
||||
}
|
315
asynchronix/examples/stepper_motor.rs
Normal file
315
asynchronix/examples/stepper_motor.rs
Normal file
@ -0,0 +1,315 @@
|
||||
//! Example: current-controlled stepper motor and its driver.
|
||||
//!
|
||||
//! This example demonstrates in particular:
|
||||
//!
|
||||
//! * self-scheduling methods,
|
||||
//! * model initialization,
|
||||
//! * simulation monitoring with event streams.
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌──────────┐ ┌──────────┐
|
||||
//! PPS │ │ coil currents │ │ position
|
||||
//! Pulse rate ●─────────▶│ Driver ├───────────────▶│ Motor ├──────────▶
|
||||
//! (±freq) │ │ (IA, IB) │ │ (0:199)
|
||||
//! └──────────┘ └──────────┘
|
||||
//! ```
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::time::Duration;
|
||||
|
||||
use asynchronix::model::{InitializedModel, Model, Output};
|
||||
use asynchronix::simulation::{Mailbox, SimInit};
|
||||
use asynchronix::time::{MonotonicTime, Scheduler};
|
||||
|
||||
/// Stepper motor.
|
||||
pub struct Motor {
|
||||
/// Position [-] -- output port.
|
||||
pub position: Output<u16>,
|
||||
|
||||
/// Position [-] -- internal state.
|
||||
pos: u16,
|
||||
/// Torque applied by the load [N·m] -- internal state.
|
||||
torque: f64,
|
||||
}
|
||||
|
||||
impl Motor {
|
||||
/// Number of steps per revolution.
|
||||
pub const STEPS_PER_REV: u16 = 200;
|
||||
/// Torque constant of the motor [N·m·A⁻¹].
|
||||
pub const TORQUE_CONSTANT: f64 = 1.0;
|
||||
|
||||
/// Creates a motor with the specified initial position.
|
||||
fn new(position: u16) -> Self {
|
||||
Self {
|
||||
position: Default::default(),
|
||||
pos: position % Self::STEPS_PER_REV,
|
||||
torque: 0.0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Coil currents [A] -- input port.
|
||||
///
|
||||
/// For the sake of simplicity, we do as if the rotor rotates
|
||||
/// instantaneously. If the current is too weak to overcome the load or when
|
||||
/// attempting to move to an opposite phase, the position remains unchanged.
|
||||
pub async fn current_in(&mut self, current: (f64, f64)) {
|
||||
assert!(!current.0.is_nan() && !current.1.is_nan());
|
||||
|
||||
let (target_phase, abs_current) = match (current.0 != 0.0, current.1 != 0.0) {
|
||||
(false, false) => return,
|
||||
(true, false) => (if current.0 > 0.0 { 0 } else { 2 }, current.0.abs()),
|
||||
(false, true) => (if current.1 > 0.0 { 1 } else { 3 }, current.1.abs()),
|
||||
_ => panic!("current detected in both coils"),
|
||||
};
|
||||
|
||||
if abs_current < Self::TORQUE_CONSTANT * self.torque {
|
||||
return;
|
||||
}
|
||||
let pos_delta = match target_phase - (self.pos % 4) as i8 {
|
||||
0 | 2 | -2 => return,
|
||||
1 | -3 => 1,
|
||||
-1 | 3 => Self::STEPS_PER_REV - 1,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
self.pos = (self.pos + pos_delta) % Self::STEPS_PER_REV;
|
||||
self.position.send(self.pos).await;
|
||||
}
|
||||
|
||||
/// Torque applied by the load [N·m] -- input port.
|
||||
pub fn load(&mut self, torque: f64) {
|
||||
assert!(torque >= 0.0);
|
||||
|
||||
self.torque = torque;
|
||||
}
|
||||
}
|
||||
|
||||
impl Model for Motor {
|
||||
/// Broadcasts the initial position of the motor.
|
||||
fn init(
|
||||
mut self,
|
||||
_scheduler: &Scheduler<Self>,
|
||||
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
|
||||
Box::pin(async move {
|
||||
self.position.send(self.pos).await;
|
||||
|
||||
self.into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Stepper motor driver.
|
||||
pub struct Driver {
|
||||
/// Coil A and coil B currents [A] -- output port.
|
||||
pub current_out: Output<(f64, f64)>,
|
||||
|
||||
/// Requested pulse rate (pulse per second) [Hz] -- internal state.
|
||||
pps: f64,
|
||||
/// Phase for the next pulse (= 0, 1, 2 or 3) -- internal state.
|
||||
next_phase: u8,
|
||||
/// Nominal coil current (absolute value) [A] -- constant.
|
||||
current: f64,
|
||||
}
|
||||
|
||||
impl Driver {
|
||||
/// Minimum supported pulse rate [Hz].
|
||||
const MIN_PPS: f64 = 1.0;
|
||||
/// Maximum supported pulse rate [Hz].
|
||||
const MAX_PPS: f64 = 1_000.0;
|
||||
|
||||
/// Creates a new driver with the specified nominal current.
|
||||
pub fn new(nominal_current: f64) -> Self {
|
||||
Self {
|
||||
current_out: Default::default(),
|
||||
pps: 0.0,
|
||||
next_phase: 0,
|
||||
current: nominal_current,
|
||||
}
|
||||
}
|
||||
|
||||
/// Sets the pulse rate (sign = direction) [Hz] -- input port.
|
||||
pub async fn pulse_rate(&mut self, pps: f64, scheduler: &Scheduler<Self>) {
|
||||
let pps = pps.signum() * pps.abs().clamp(Self::MIN_PPS, Self::MAX_PPS);
|
||||
if pps == self.pps {
|
||||
return;
|
||||
}
|
||||
|
||||
let is_idle = self.pps == 0.0;
|
||||
self.pps = pps;
|
||||
|
||||
// Trigger the rotation if the motor is currently idle. Otherwise the
|
||||
// new value will be accounted for at the next pulse.
|
||||
if is_idle {
|
||||
self.send_pulse((), scheduler).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a pulse and schedules the next one.
|
||||
///
|
||||
/// Note: self-scheduling async methods must be for now defined with an
|
||||
/// explicit signature instead of `async fn` due to a rustc issue.
|
||||
fn send_pulse<'a>(
|
||||
&'a mut self,
|
||||
_: (),
|
||||
scheduler: &'a Scheduler<Self>,
|
||||
) -> impl Future<Output = ()> + Send + 'a {
|
||||
async move {
|
||||
let current_out = match self.next_phase {
|
||||
0 => (self.current, 0.0),
|
||||
1 => (0.0, self.current),
|
||||
2 => (-self.current, 0.0),
|
||||
3 => (0.0, -self.current),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
self.current_out.send(current_out).await;
|
||||
|
||||
if self.pps == 0.0 {
|
||||
return;
|
||||
}
|
||||
|
||||
self.next_phase = (self.next_phase + (self.pps.signum() + 4.0) as u8) % 4;
|
||||
|
||||
let pulse_duration = Duration::from_secs_f64(1.0 / self.pps.abs());
|
||||
|
||||
// Schedule the next pulse.
|
||||
scheduler
|
||||
.schedule_in(pulse_duration, Self::send_pulse, ())
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Model for Driver {}
|
||||
|
||||
fn main() {
|
||||
// ---------------
|
||||
// Bench assembly.
|
||||
// ---------------
|
||||
|
||||
// Models.
|
||||
let init_pos = 123;
|
||||
let mut motor = Motor::new(init_pos);
|
||||
let mut driver = Driver::new(1.0);
|
||||
|
||||
// Mailboxes.
|
||||
let motor_mbox = Mailbox::new();
|
||||
let driver_mbox = Mailbox::new();
|
||||
|
||||
// Connections.
|
||||
driver.current_out.connect(Motor::current_in, &motor_mbox);
|
||||
|
||||
// Model handles for simulation.
|
||||
let mut position = motor.position.connect_stream().0;
|
||||
let motor_addr = motor_mbox.address();
|
||||
let driver_addr = driver_mbox.address();
|
||||
|
||||
// Start time (arbitrary since models do not depend on absolute time).
|
||||
let t0 = MonotonicTime::EPOCH;
|
||||
|
||||
// Assembly and initialization.
|
||||
let mut simu = SimInit::new()
|
||||
.add_model(driver, driver_mbox)
|
||||
.add_model(motor, motor_mbox)
|
||||
.init(t0);
|
||||
|
||||
// ----------
|
||||
// Simulation.
|
||||
// ----------
|
||||
|
||||
// Check initial conditions.
|
||||
let mut t = t0;
|
||||
assert_eq!(simu.time(), t);
|
||||
assert_eq!(position.next(), Some(init_pos));
|
||||
assert!(position.next().is_none());
|
||||
|
||||
// Start the motor in 2s with a PPS of 10Hz.
|
||||
simu.schedule_in(
|
||||
Duration::from_secs(2),
|
||||
Driver::pulse_rate,
|
||||
10.0,
|
||||
&driver_addr,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Advance simulation time to two next events.
|
||||
simu.step();
|
||||
t += Duration::new(2, 0);
|
||||
assert_eq!(simu.time(), t);
|
||||
simu.step();
|
||||
t += Duration::new(0, 100_000_000);
|
||||
assert_eq!(simu.time(), t);
|
||||
|
||||
// Whichever the starting position, after two phase increments from the
|
||||
// driver the rotor should have synchronized with the driver, with a
|
||||
// position given by this beautiful formula.
|
||||
let mut pos = (((init_pos + 1) / 4) * 4 + 1) % Motor::STEPS_PER_REV;
|
||||
assert_eq!(position.by_ref().last().unwrap(), pos);
|
||||
|
||||
// Advance simulation time by 0.9s, which with a 10Hz PPS should correspond to
|
||||
// 9 position increments.
|
||||
simu.step_by(Duration::new(0, 900_000_000));
|
||||
t += Duration::new(0, 900_000_000);
|
||||
assert_eq!(simu.time(), t);
|
||||
for _ in 0..9 {
|
||||
pos = (pos + 1) % Motor::STEPS_PER_REV;
|
||||
assert_eq!(position.next(), Some(pos));
|
||||
}
|
||||
assert!(position.next().is_none());
|
||||
|
||||
// Increase the load beyond the torque limit for a 1A driver current.
|
||||
simu.send_event(Motor::load, 2.0, &motor_addr);
|
||||
|
||||
// Advance simulation time and check that the motor is blocked.
|
||||
simu.step();
|
||||
t += Duration::new(0, 100_000_000);
|
||||
assert_eq!(simu.time(), t);
|
||||
assert!(position.next().is_none());
|
||||
|
||||
// Do it again.
|
||||
simu.step();
|
||||
t += Duration::new(0, 100_000_000);
|
||||
assert_eq!(simu.time(), t);
|
||||
assert!(position.next().is_none());
|
||||
|
||||
// Decrease the load below the torque limit for a 1A driver current and
|
||||
// advance simulation time.
|
||||
simu.send_event(Motor::load, 0.5, &motor_addr);
|
||||
simu.step();
|
||||
t += Duration::new(0, 100_000_000);
|
||||
|
||||
// The motor should start moving again, but since the phase was incremented
|
||||
// 3 times (out of 4 phases) while the motor was blocked, the motor actually
|
||||
// makes a step backward before it moves forward again.
|
||||
assert_eq!(simu.time(), t);
|
||||
pos = (pos + Motor::STEPS_PER_REV - 1) % Motor::STEPS_PER_REV;
|
||||
assert_eq!(position.next(), Some(pos));
|
||||
|
||||
// Advance simulation time by 0.7s, which with a 10Hz PPS should correspond to
|
||||
// 7 position increments.
|
||||
simu.step_by(Duration::new(0, 700_000_000));
|
||||
t += Duration::new(0, 700_000_000);
|
||||
assert_eq!(simu.time(), t);
|
||||
for _ in 0..7 {
|
||||
pos = (pos + 1) % Motor::STEPS_PER_REV;
|
||||
assert_eq!(position.next(), Some(pos));
|
||||
}
|
||||
assert!(position.next().is_none());
|
||||
|
||||
// Now make the motor rotate in the opposite direction. Note that this
|
||||
// driver only accounts for a new PPS at the next pulse.
|
||||
simu.send_event(Driver::pulse_rate, -10.0, &driver_addr);
|
||||
simu.step();
|
||||
t += Duration::new(0, 100_000_000);
|
||||
assert_eq!(simu.time(), t);
|
||||
pos = (pos + 1) % Motor::STEPS_PER_REV;
|
||||
assert_eq!(position.next(), Some(pos));
|
||||
|
||||
// Advance simulation time by 1.9s, which with a -10Hz PPS should correspond
|
||||
// to 19 position decrements.
|
||||
simu.step_by(Duration::new(1, 900_000_000));
|
||||
t += Duration::new(1, 900_000_000);
|
||||
assert_eq!(simu.time(), t);
|
||||
pos = (pos + Motor::STEPS_PER_REV - 19) % Motor::STEPS_PER_REV;
|
||||
assert_eq!(position.by_ref().last(), Some(pos));
|
||||
}
|
397
asynchronix/src/channel.rs
Normal file
397
asynchronix/src/channel.rs
Normal file
@ -0,0 +1,397 @@
|
||||
//! Multiple-producer single-consumer Channel for communication between
|
||||
//! simulation models.
|
||||
#![warn(missing_docs, missing_debug_implementations, unreachable_pub)]
|
||||
|
||||
mod event;
|
||||
mod queue;
|
||||
|
||||
use std::error;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::num::NonZeroUsize;
|
||||
use std::sync::atomic::{self, AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
|
||||
use diatomic_waker::primitives::DiatomicWaker;
|
||||
use recycle_box::RecycleBox;
|
||||
|
||||
use event::Event;
|
||||
use queue::{PopError, PushError, Queue};
|
||||
use recycle_box::coerce_box;
|
||||
|
||||
use crate::model::Model;
|
||||
use crate::time::Scheduler;
|
||||
|
||||
/// Data shared between the receiver and the senders.
|
||||
struct Inner<M> {
|
||||
/// Non-blocking internal queue.
|
||||
queue: Queue<dyn MessageFn<M>>,
|
||||
/// Signalling primitive used to notify the receiver.
|
||||
receiver_signal: DiatomicWaker,
|
||||
/// Signalling primitive used to notify one or several senders.
|
||||
sender_signal: Event,
|
||||
/// Current count of live senders.
|
||||
sender_count: AtomicUsize,
|
||||
}
|
||||
|
||||
impl<M: 'static> Inner<M> {
|
||||
fn new(capacity: usize) -> Self {
|
||||
Self {
|
||||
queue: Queue::new(capacity),
|
||||
receiver_signal: DiatomicWaker::new(),
|
||||
sender_signal: Event::new(),
|
||||
sender_count: AtomicUsize::new(0),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A receiver which can asynchronously execute `async` message that take an
|
||||
/// argument of type `&mut M` and an optional `&Scheduler<M>` argument.
|
||||
pub(crate) struct Receiver<M> {
|
||||
/// Shared data.
|
||||
inner: Arc<Inner<M>>,
|
||||
/// A recyclable box to temporarily store the `async` closure to be executed.
|
||||
future_box: Option<RecycleBox<()>>,
|
||||
}
|
||||
|
||||
impl<M: Model> Receiver<M> {
|
||||
/// Creates a new receiver with the specified capacity.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// The constructor will panic if the requested capacity is 0 or is greater
|
||||
/// than `usize::MAX/2 + 1`.
|
||||
pub(crate) fn new(capacity: usize) -> Self {
|
||||
let inner = Arc::new(Inner::new(capacity));
|
||||
|
||||
Receiver {
|
||||
inner,
|
||||
future_box: Some(RecycleBox::new(())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a new sender.
|
||||
pub(crate) fn sender(&self) -> Sender<M> {
|
||||
// Increase the reference count of senders.
|
||||
//
|
||||
// Ordering: Relaxed ordering is sufficient here for the same reason it
|
||||
// is sufficient for an `Arc` reference count increment: synchronization
|
||||
// is only necessary when decrementing the counter since all what is
|
||||
// needed is to ensure that all operations until the drop handler is
|
||||
// called are visible once the reference count drops to 0.
|
||||
self.inner.sender_count.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Sender {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Receives and executes a message asynchronously, if necessary waiting
|
||||
/// until one becomes available.
|
||||
pub(crate) async fn recv(
|
||||
&mut self,
|
||||
model: &mut M,
|
||||
scheduler: &Scheduler<M>,
|
||||
) -> Result<(), RecvError> {
|
||||
let msg = unsafe {
|
||||
self.inner
|
||||
.receiver_signal
|
||||
.wait_until(|| match self.inner.queue.pop() {
|
||||
Ok(msg) => Some(Some(msg)),
|
||||
Err(PopError::Empty) => None,
|
||||
Err(PopError::Closed) => Some(None),
|
||||
})
|
||||
.await
|
||||
};
|
||||
|
||||
match msg {
|
||||
Some(mut msg) => {
|
||||
// Consume the message to obtain a boxed future.
|
||||
let fut = msg.call_once(model, scheduler, self.future_box.take().unwrap());
|
||||
|
||||
// Now that `msg` was consumed and its slot in the queue was
|
||||
// freed, signal to one awaiting sender that one slot is
|
||||
// available for sending.
|
||||
self.inner.sender_signal.notify(1);
|
||||
|
||||
// Await the future provided by the message.
|
||||
let mut fut = RecycleBox::into_pin(fut);
|
||||
fut.as_mut().await;
|
||||
|
||||
// Recycle the box.
|
||||
self.future_box = Some(RecycleBox::vacate_pinned(fut));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
None => Err(RecvError),
|
||||
}
|
||||
}
|
||||
|
||||
/// Closes the channel.
|
||||
///
|
||||
/// This prevents any further messages from being sent to the channel.
|
||||
/// Messages that were already sent can still be received, however, which is
|
||||
/// why a call to this method should typically be followed by a loop
|
||||
/// receiving all remaining messages.
|
||||
///
|
||||
/// For this reason, no counterpart to [`Sender::is_closed`] is exposed by
|
||||
/// the receiver as such method could easily be misused and lead to lost
|
||||
/// messages. Instead, messages should be received until a [`RecvError`] is
|
||||
/// returned.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn close(&self) {
|
||||
if !self.inner.queue.is_closed() {
|
||||
self.inner.queue.close();
|
||||
|
||||
// Notify all blocked senders that the channel is closed.
|
||||
self.inner.sender_signal.notify(usize::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a unique identifier for the channel.
|
||||
///
|
||||
/// All channels are guaranteed to have different identifiers at any given
|
||||
/// time, but an identifier may be reused after all handles to a channel
|
||||
/// have been dropped.
|
||||
pub(crate) fn channel_id(&self) -> ChannelId {
|
||||
ChannelId(NonZeroUsize::new(&*self.inner as *const Inner<M> as usize).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl<M> Drop for Receiver<M> {
|
||||
fn drop(&mut self) {
|
||||
self.inner.queue.close();
|
||||
|
||||
// Notify all blocked senders that the channel is closed.
|
||||
self.inner.sender_signal.notify(usize::MAX);
|
||||
}
|
||||
}
|
||||
|
||||
impl<M> fmt::Debug for Receiver<M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Receiver").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle to a channel that can send messages.
|
||||
///
|
||||
/// Multiple [`Sender`] handles can be created using the [`Receiver::sender`]
|
||||
/// method or via cloning.
|
||||
pub(crate) struct Sender<M: 'static> {
|
||||
/// Shared data.
|
||||
inner: Arc<Inner<M>>,
|
||||
}
|
||||
|
||||
impl<M: Model> Sender<M> {
|
||||
/// Sends a message, if necessary waiting until enough capacity becomes
|
||||
/// available in the channel.
|
||||
pub(crate) async fn send<F>(&self, msg_fn: F) -> Result<(), SendError>
|
||||
where
|
||||
F: for<'a> FnOnce(
|
||||
&'a mut M,
|
||||
&'a Scheduler<M>,
|
||||
RecycleBox<()>,
|
||||
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>
|
||||
+ Send
|
||||
+ 'static,
|
||||
{
|
||||
// Define a closure that boxes the argument in a type-erased
|
||||
// `RecycleBox`.
|
||||
let mut msg_fn = Some(|vacated_box| -> RecycleBox<dyn MessageFn<M>> {
|
||||
coerce_box!(RecycleBox::recycle(vacated_box, MessageFnOnce::new(msg_fn)))
|
||||
});
|
||||
|
||||
let success = self
|
||||
.inner
|
||||
.sender_signal
|
||||
.wait_until(|| {
|
||||
match self.inner.queue.push(msg_fn.take().unwrap()) {
|
||||
Ok(()) => Some(true),
|
||||
Err(PushError::Full(m)) => {
|
||||
// Recycle the message.
|
||||
msg_fn = Some(m);
|
||||
|
||||
None
|
||||
}
|
||||
Err(PushError::Closed) => Some(false),
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
if success {
|
||||
self.inner.receiver_signal.notify();
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
Err(SendError)
|
||||
}
|
||||
}
|
||||
|
||||
/// Closes the channel.
|
||||
///
|
||||
/// This prevents any further messages from being sent. Messages that were
|
||||
/// already sent can still be received.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn close(&self) {
|
||||
self.inner.queue.close();
|
||||
|
||||
// Notify the receiver and all blocked senders that the channel is
|
||||
// closed.
|
||||
self.inner.receiver_signal.notify();
|
||||
self.inner.sender_signal.notify(usize::MAX);
|
||||
}
|
||||
|
||||
/// Checks if the channel is closed.
|
||||
///
|
||||
/// This can happen either because the [`Receiver`] was dropped or because
|
||||
/// one of the [`Sender::close`] or [`Receiver::close`] method was called.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn is_closed(&self) -> bool {
|
||||
self.inner.queue.is_closed()
|
||||
}
|
||||
|
||||
/// Returns a unique identifier for the channel.
|
||||
///
|
||||
/// All channels are guaranteed to have different identifiers at any given
|
||||
/// time, but an identifier may be reused after all handles to a channel
|
||||
/// have been dropped.
|
||||
pub(crate) fn channel_id(&self) -> ChannelId {
|
||||
ChannelId(NonZeroUsize::new(&*self.inner as *const Inner<M> as usize).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
impl<M> Clone for Sender<M> {
|
||||
fn clone(&self) -> Self {
|
||||
// Increase the reference count of senders.
|
||||
//
|
||||
// Ordering: Relaxed ordering is sufficient here for the same reason it
|
||||
// is sufficient for an `Arc` reference count increment: synchronization
|
||||
// is only necessary when decrementing the counter since all what is
|
||||
// needed is to ensure that all operations until the drop handler is
|
||||
// called are visible once the reference count drops to 0.
|
||||
self.inner.sender_count.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: 'static> Drop for Sender<M> {
|
||||
fn drop(&mut self) {
|
||||
// Decrease the reference count of senders.
|
||||
//
|
||||
// Ordering: Release ordering is necessary for the same reason it is
|
||||
// necessary for an `Arc` reference count decrement: it ensures that all
|
||||
// operations performed by this sender before it was dropped will be
|
||||
// visible once the sender count drops to 0.
|
||||
if self.inner.sender_count.fetch_sub(1, Ordering::Release) == 1
|
||||
&& !self.inner.queue.is_closed()
|
||||
{
|
||||
// Make sure that the notified receiver sees all operations
|
||||
// performed by all dropped senders.
|
||||
//
|
||||
// Ordering: Acquire is necessary to synchronize with the Release
|
||||
// decrement operations. Note that the fence synchronizes with _all_
|
||||
// decrement operations since the chain of counter decrements forms
|
||||
// a Release sequence.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
self.inner.queue.close();
|
||||
|
||||
// Notify the senders that the channel is closed.
|
||||
self.inner.receiver_signal.notify();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M> fmt::Debug for Sender<M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Address").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// A closure that can be called once to create a future boxed in a `RecycleBox`
|
||||
/// from an `&mut M`, a `&Scheduler<M>` and an empty `RecycleBox`.
|
||||
///
|
||||
/// This is basically a workaround to emulate an `FnOnce` with the equivalent of
|
||||
/// an `FnMut` so that it is possible to call it as a `dyn` trait stored in a
|
||||
/// custom pointer type like `RecycleBox` (a `Box<dyn FnOnce>` would not need
|
||||
/// this because it implements the magical `DerefMove` trait and therefore can
|
||||
/// be used to call an `FnOnce`).
|
||||
trait MessageFn<M: Model>: Send {
|
||||
/// A method that can be executed once.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method may panic if called more than once.
|
||||
fn call_once<'a>(
|
||||
&mut self,
|
||||
model: &'a mut M,
|
||||
scheduler: &'a Scheduler<M>,
|
||||
recycle_box: RecycleBox<()>,
|
||||
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>;
|
||||
}
|
||||
|
||||
/// A `MessageFn` implementation wrapping an async `FnOnce`.
|
||||
struct MessageFnOnce<F, M> {
|
||||
msg_fn: Option<F>,
|
||||
_phantom: PhantomData<fn(&mut M)>,
|
||||
}
|
||||
impl<F, M> MessageFnOnce<F, M> {
|
||||
fn new(msg_fn: F) -> Self {
|
||||
Self {
|
||||
msg_fn: Some(msg_fn),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<F, M: Model> MessageFn<M> for MessageFnOnce<F, M>
|
||||
where
|
||||
F: for<'a> FnOnce(
|
||||
&'a mut M,
|
||||
&'a Scheduler<M>,
|
||||
RecycleBox<()>,
|
||||
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>
|
||||
+ Send,
|
||||
{
|
||||
fn call_once<'a>(
|
||||
&mut self,
|
||||
model: &'a mut M,
|
||||
scheduler: &'a Scheduler<M>,
|
||||
recycle_box: RecycleBox<()>,
|
||||
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a> {
|
||||
let closure = self.msg_fn.take().unwrap();
|
||||
|
||||
(closure)(model, scheduler, recycle_box)
|
||||
}
|
||||
}
|
||||
|
||||
/// Unique identifier for a channel.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub(crate) struct ChannelId(NonZeroUsize);
|
||||
|
||||
impl fmt::Display for ChannelId {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
/// An error returned when an attempt to send a message asynchronously is
|
||||
/// unsuccessful.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub(crate) struct SendError;
|
||||
|
||||
/// An error returned when an attempt to receive a message asynchronously is
|
||||
/// unsuccessful.
|
||||
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
|
||||
pub(crate) struct RecvError;
|
||||
|
||||
impl error::Error for RecvError {}
|
||||
|
||||
impl fmt::Display for RecvError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
"receiving from a closed channel".fmt(f)
|
||||
}
|
||||
}
|
1075
asynchronix/src/channel/event.rs
Normal file
1075
asynchronix/src/channel/event.rs
Normal file
File diff suppressed because it is too large
Load Diff
826
asynchronix/src/channel/queue.rs
Normal file
826
asynchronix/src/channel/queue.rs
Normal file
@ -0,0 +1,826 @@
|
||||
//! A bounded MPSC queue, based on Dmitry Vyukov's MPMC queue.
|
||||
//!
|
||||
//! The messages stored in the queue are async closures that can be called with
|
||||
//! a `&mut M` argument and an empty `RecycleBox` to generate a boxed future.
|
||||
|
||||
use std::cmp;
|
||||
use std::fmt;
|
||||
use std::mem::{self, ManuallyDrop};
|
||||
use std::ops::Deref;
|
||||
use std::ops::DerefMut;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crossbeam_utils::CachePadded;
|
||||
use recycle_box::RecycleBox;
|
||||
|
||||
use crate::loom_exports::cell::UnsafeCell;
|
||||
use crate::loom_exports::debug_or_loom_assert_eq;
|
||||
use crate::loom_exports::sync::atomic::AtomicUsize;
|
||||
|
||||
/// A message borrowed from the queue.
|
||||
///
|
||||
/// The borrowed message should be dropped as soon as possible because its slot
|
||||
/// in the queue cannot be re-used until then.
|
||||
///
|
||||
/// # Leaks
|
||||
///
|
||||
/// Leaking this borrow will eventually prevent more messages to be pushed to
|
||||
/// the queue.
|
||||
pub(super) struct MessageBorrow<'a, T: ?Sized> {
|
||||
queue: &'a Queue<T>,
|
||||
msg: ManuallyDrop<RecycleBox<T>>,
|
||||
index: usize,
|
||||
stamp: usize,
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Deref for MessageBorrow<'a, T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.msg
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> DerefMut for MessageBorrow<'a, T> {
|
||||
fn deref_mut(&mut self) -> &mut Self::Target {
|
||||
&mut self.msg
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T: ?Sized> Drop for MessageBorrow<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
let slot = &self.queue.buffer[self.index];
|
||||
|
||||
// Safety: the content of the `ManuallyDrop` will not be accessed anymore.
|
||||
let recycle_box = RecycleBox::vacate(unsafe { ManuallyDrop::take(&mut self.msg) });
|
||||
|
||||
// Give the box back to the queue.
|
||||
//
|
||||
// Safety: the slot can be safely accessed because it has not yet been
|
||||
// marked as empty.
|
||||
unsafe {
|
||||
slot.message
|
||||
.with_mut(|p| *p = MessageBox::Vacated(recycle_box));
|
||||
}
|
||||
|
||||
// Mark the slot as empty.
|
||||
slot.stamp.store(self.stamp, Ordering::Release);
|
||||
}
|
||||
}
|
||||
impl<'a, M> fmt::Debug for MessageBorrow<'a, M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("MessageBorrow").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
enum MessageBox<T: ?Sized> {
|
||||
Populated(RecycleBox<T>),
|
||||
Vacated(RecycleBox<()>),
|
||||
None,
|
||||
}
|
||||
|
||||
/// A queue slot that a stamp and either a boxed messaged or an empty box.
|
||||
struct Slot<T: ?Sized> {
|
||||
stamp: AtomicUsize,
|
||||
message: UnsafeCell<MessageBox<T>>,
|
||||
}
|
||||
|
||||
/// An fast MPSC queue that stores its items in recyclable boxes.
|
||||
///
|
||||
/// The item may be unsized.
|
||||
///
|
||||
/// The enqueue position, dequeue position and the slot stamps are all stored as
|
||||
/// `usize` and share the following layout:
|
||||
///
|
||||
/// ```text
|
||||
///
|
||||
/// | <- MSB LSB -> |
|
||||
/// | Sequence count | flag (1 bit) | Buffer index |
|
||||
///
|
||||
/// ```
|
||||
///
|
||||
/// The purpose of the flag differs depending on the field:
|
||||
///
|
||||
/// - enqueue position: if set, the flag signals that the queue has been closed
|
||||
/// by either the consumer or a producer,
|
||||
/// - dequeue position: the flag is not used (always 0),
|
||||
/// - slot stamp: the flag de-facto extends the mantissa of the buffer index,
|
||||
/// which makes it in particular possible to support queues with a capacity of
|
||||
/// 1 without special-casing.
|
||||
///
|
||||
pub(super) struct Queue<T: ?Sized> {
|
||||
/// Buffer position of the slot to which the next closure will be written.
|
||||
///
|
||||
/// The position stores the buffer index in the least significant bits and a
|
||||
/// sequence counter in the most significant bits.
|
||||
enqueue_pos: CachePadded<AtomicUsize>,
|
||||
|
||||
/// Buffer position of the slot from which the next closure will be read.
|
||||
///
|
||||
/// This is only ever mutated from a single thread but it must be stored in
|
||||
/// an atomic or an `UnsafeCell` since it is shared between the consumers
|
||||
/// and the producer. The reason it is shared is that the drop handler of
|
||||
/// the last `Inner` owner (which may be a producer) needs access to the
|
||||
/// dequeue position.
|
||||
dequeue_pos: CachePadded<UnsafeCell<usize>>,
|
||||
|
||||
/// Buffer holding the closures and their stamps.
|
||||
buffer: Box<[Slot<T>]>,
|
||||
|
||||
/// Bit mask covering both the buffer index and the 1-bit flag.
|
||||
right_mask: usize,
|
||||
|
||||
/// Bit mask for the 1-bit flag, used as closed-channel flag in the enqueue
|
||||
/// position.
|
||||
closed_channel_mask: usize,
|
||||
}
|
||||
|
||||
impl<T: ?Sized> Queue<T> {
|
||||
/// Creates a new `Inner`.
|
||||
pub(super) fn new(capacity: usize) -> Self {
|
||||
assert!(capacity >= 1, "the capacity must be 1 or greater");
|
||||
|
||||
assert!(
|
||||
capacity <= (1 << (usize::BITS - 1)),
|
||||
"the capacity may not exceed {}",
|
||||
1usize << (usize::BITS - 1)
|
||||
);
|
||||
|
||||
// Allocate a buffer initialized with linearly increasing stamps.
|
||||
let mut buffer = Vec::with_capacity(capacity);
|
||||
for i in 0..capacity {
|
||||
buffer.push(Slot {
|
||||
stamp: AtomicUsize::new(i),
|
||||
message: UnsafeCell::new(MessageBox::Vacated(RecycleBox::new(()))),
|
||||
});
|
||||
}
|
||||
|
||||
let closed_channel_mask = capacity.next_power_of_two();
|
||||
let right_mask = (closed_channel_mask << 1).wrapping_sub(1);
|
||||
|
||||
Queue {
|
||||
enqueue_pos: CachePadded::new(AtomicUsize::new(0)),
|
||||
dequeue_pos: CachePadded::new(UnsafeCell::new(0)),
|
||||
buffer: buffer.into(),
|
||||
right_mask,
|
||||
closed_channel_mask,
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to push an item in the queue.
|
||||
pub(super) fn push<F>(&self, msg_fn: F) -> Result<(), PushError<F>>
|
||||
where
|
||||
F: FnOnce(RecycleBox<()>) -> RecycleBox<T>,
|
||||
{
|
||||
let mut enqueue_pos = self.enqueue_pos.load(Ordering::Relaxed);
|
||||
|
||||
loop {
|
||||
if enqueue_pos & self.closed_channel_mask != 0 {
|
||||
return Err(PushError::Closed);
|
||||
}
|
||||
|
||||
let slot = &self.buffer[enqueue_pos & self.right_mask];
|
||||
let stamp = slot.stamp.load(Ordering::Acquire);
|
||||
|
||||
let stamp_delta = stamp.wrapping_sub(enqueue_pos) as isize;
|
||||
|
||||
match stamp_delta.cmp(&0) {
|
||||
cmp::Ordering::Equal => {
|
||||
// The enqueue position matches the stamp: a push can be
|
||||
// attempted.
|
||||
|
||||
// Try incrementing the enqueue position.
|
||||
match self.enqueue_pos.compare_exchange_weak(
|
||||
enqueue_pos,
|
||||
self.next_queue_pos(enqueue_pos),
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => {
|
||||
// Write the closure into the slot and update the stamp.
|
||||
unsafe {
|
||||
slot.message.with_mut(|msg_fn_box| {
|
||||
let vacated_box =
|
||||
match mem::replace(&mut *msg_fn_box, MessageBox::None) {
|
||||
MessageBox::Vacated(b) => b,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
|
||||
*msg_fn_box = MessageBox::Populated(msg_fn(vacated_box))
|
||||
});
|
||||
};
|
||||
slot.stamp.store(stamp.wrapping_add(1), Ordering::Release);
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
Err(pos) => {
|
||||
enqueue_pos = pos;
|
||||
}
|
||||
}
|
||||
}
|
||||
cmp::Ordering::Less => {
|
||||
// The sequence count of the stamp is smaller than that of the
|
||||
// enqueue position: the closure it contains has not been popped
|
||||
// yet, so report a full queue.
|
||||
return Err(PushError::Full(msg_fn));
|
||||
}
|
||||
cmp::Ordering::Greater => {
|
||||
// The stamp is greater than the enqueue position: this means we
|
||||
// raced with a concurrent producer which has already (i)
|
||||
// incremented the enqueue position and (ii) written a closure to
|
||||
// this slot. A retry is required.
|
||||
enqueue_pos = self.enqueue_pos.load(Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to pop an item from the queue.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This method may not be called concurrently from multiple threads.
|
||||
pub(super) unsafe fn pop(&self) -> Result<MessageBorrow<'_, T>, PopError> {
|
||||
let dequeue_pos = self.dequeue_pos.with(|p| *p);
|
||||
let index = dequeue_pos & self.right_mask;
|
||||
let slot = &self.buffer[index];
|
||||
let stamp = slot.stamp.load(Ordering::Acquire);
|
||||
|
||||
if dequeue_pos != stamp {
|
||||
// The stamp is ahead of the dequeue position by 1 increment: the
|
||||
// closure can be popped.
|
||||
debug_or_loom_assert_eq!(stamp, dequeue_pos + 1);
|
||||
|
||||
// Only this thread can access the dequeue position so there is no
|
||||
// need to increment the position atomically with a `fetch_add`.
|
||||
self.dequeue_pos
|
||||
.with_mut(|p| *p = self.next_queue_pos(dequeue_pos));
|
||||
|
||||
// Extract the closure from the slot and set the stamp to the value of
|
||||
// the dequeue position increased by one sequence increment.
|
||||
slot.message.with_mut(
|
||||
|msg_box| match mem::replace(&mut *msg_box, MessageBox::None) {
|
||||
MessageBox::Populated(msg) => {
|
||||
let borrow = MessageBorrow {
|
||||
queue: self,
|
||||
msg: ManuallyDrop::new(msg),
|
||||
index,
|
||||
stamp: stamp.wrapping_add(self.right_mask),
|
||||
};
|
||||
|
||||
Ok(borrow)
|
||||
}
|
||||
_ => unreachable!(),
|
||||
},
|
||||
)
|
||||
} else {
|
||||
// Check whether the queue was closed. Even if the closed flag is
|
||||
// set and the slot is empty, there might still be a producer that
|
||||
// started a push before the channel was closed but has not yet
|
||||
// updated the stamp. For this reason, before returning
|
||||
// `PopError::Closed` it is necessary to check as well that the
|
||||
// enqueue position matches the dequeue position.
|
||||
//
|
||||
// Ordering: Relaxed ordering is enough since no closure will be read.
|
||||
if self.enqueue_pos.load(Ordering::Relaxed) == (dequeue_pos | self.closed_channel_mask)
|
||||
{
|
||||
Err(PopError::Closed)
|
||||
} else {
|
||||
Err(PopError::Empty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Closes the queue.
|
||||
pub(super) fn close(&self) {
|
||||
// Set the closed-channel flag.
|
||||
//
|
||||
// Ordering: Relaxed ordering is enough here since neither the producers
|
||||
// nor the consumer rely on this flag for synchronizing reads and
|
||||
// writes.
|
||||
self.enqueue_pos
|
||||
.fetch_or(self.closed_channel_mask, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Checks if the channel has been closed.
|
||||
///
|
||||
/// Note that even if the channel is closed, some messages may still be
|
||||
/// present in the queue so further calls to `pop` may still succeed.
|
||||
pub(super) fn is_closed(&self) -> bool {
|
||||
// Read the closed-channel flag.
|
||||
//
|
||||
// Ordering: Relaxed ordering is enough here since this is merely an
|
||||
// informational function and cannot lead to any unsafety. If the load
|
||||
// is stale, the worse that can happen is that the queue is seen as open
|
||||
// when it is in fact already closed, which is OK since the caller must
|
||||
// anyway be resilient to the case where the channel closes right after
|
||||
// `is_closed` returns `false`.
|
||||
self.enqueue_pos.load(Ordering::Relaxed) & self.closed_channel_mask != 0
|
||||
}
|
||||
|
||||
/// Increment the queue position, incrementing the sequence count as well if
|
||||
/// the index wraps to 0.
|
||||
///
|
||||
/// Precondition when used with enqueue positions: the closed-channel flag
|
||||
/// should be cleared.
|
||||
#[inline]
|
||||
fn next_queue_pos(&self, queue_pos: usize) -> usize {
|
||||
debug_or_loom_assert_eq!(queue_pos & self.closed_channel_mask, 0);
|
||||
|
||||
// The queue position cannot wrap around: in the worst case it will
|
||||
// overflow the flag bit.
|
||||
let new_queue_pos = queue_pos + 1;
|
||||
|
||||
let new_index = new_queue_pos & self.right_mask;
|
||||
|
||||
if new_index < self.buffer.len() {
|
||||
new_queue_pos
|
||||
} else {
|
||||
// The buffer index must wrap to 0 and the sequence count
|
||||
// must be incremented.
|
||||
let sequence_increment = self.right_mask + 1;
|
||||
let sequence_count = queue_pos & !self.right_mask;
|
||||
|
||||
sequence_count.wrapping_add(sequence_increment)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: ?Sized + Send> Send for Queue<T> {}
|
||||
unsafe impl<T: ?Sized + Send> Sync for Queue<T> {}
|
||||
|
||||
/// Error occurring when pushing into a queue is unsuccessful.
|
||||
pub(super) enum PushError<F> {
|
||||
/// The queue is full.
|
||||
Full(F),
|
||||
/// The receiver has been dropped.
|
||||
Closed,
|
||||
}
|
||||
|
||||
/// Error occurring when popping from a queue is unsuccessful.
|
||||
#[derive(Debug)]
|
||||
pub(super) enum PopError {
|
||||
/// The queue is empty.
|
||||
Empty,
|
||||
/// All senders have been dropped and the queue is empty.
|
||||
Closed,
|
||||
}
|
||||
|
||||
/// Queue producer.
|
||||
///
|
||||
/// This is a safe queue producer proxy used for testing purposes only.
|
||||
#[cfg(test)]
|
||||
struct Producer<T: ?Sized> {
|
||||
inner: crate::loom_exports::sync::Arc<Queue<T>>,
|
||||
}
|
||||
#[cfg(test)]
|
||||
impl<T: ?Sized> Producer<T> {
|
||||
/// Attempts to push an item into the queue.
|
||||
fn push<F>(&self, msg_fn: F) -> Result<(), PushError<F>>
|
||||
where
|
||||
F: FnOnce(RecycleBox<()>) -> RecycleBox<T>,
|
||||
{
|
||||
self.inner.push(msg_fn)
|
||||
}
|
||||
|
||||
/// Closes the queue.
|
||||
pub(super) fn close(&self) {
|
||||
self.inner.close();
|
||||
}
|
||||
|
||||
/// Checks if the queue is closed.
|
||||
#[cfg(not(asynchronix_loom))]
|
||||
fn is_closed(&self) -> bool {
|
||||
self.inner.is_closed()
|
||||
}
|
||||
}
|
||||
#[cfg(test)]
|
||||
impl<T> Clone for Producer<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Queue consumer.
|
||||
///
|
||||
/// This is a safe queue consumer proxy used for testing purposes only.
|
||||
#[cfg(test)]
|
||||
struct Consumer<T: ?Sized> {
|
||||
inner: crate::loom_exports::sync::Arc<Queue<T>>,
|
||||
}
|
||||
#[cfg(test)]
|
||||
impl<T: ?Sized> Consumer<T> {
|
||||
/// Attempts to pop an item from the queue.
|
||||
fn pop(&mut self) -> Result<MessageBorrow<'_, T>, PopError> {
|
||||
// Safety: single-thread access is guaranteed since the consumer does
|
||||
// not implement `Clone` and `pop` requires exclusive ownership.
|
||||
unsafe { self.inner.pop() }
|
||||
}
|
||||
|
||||
/// Closes the queue.
|
||||
fn close(&self) {
|
||||
self.inner.close();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn queue<T: ?Sized>(capacity: usize) -> (Producer<T>, Consumer<T>) {
|
||||
let inner = crate::loom_exports::sync::Arc::new(Queue::new(capacity));
|
||||
|
||||
let producer = Producer {
|
||||
inner: inner.clone(),
|
||||
};
|
||||
let consumer = Consumer {
|
||||
inner: inner.clone(),
|
||||
};
|
||||
|
||||
(producer, consumer)
|
||||
}
|
||||
|
||||
/// Regular tests.
|
||||
#[cfg(all(test, not(asynchronix_loom)))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use std::thread;
|
||||
|
||||
#[test]
|
||||
fn queue_closed_by_sender() {
|
||||
let (p, mut c) = queue(3);
|
||||
|
||||
assert!(matches!(c.pop(), Err(PopError::Empty)));
|
||||
|
||||
assert!(matches!(p.push(|b| RecycleBox::recycle(b, 42)), Ok(_)));
|
||||
p.close();
|
||||
|
||||
assert_eq!(*c.pop().unwrap(), 42);
|
||||
assert!(matches!(c.pop(), Err(PopError::Closed)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_closed_by_consumer() {
|
||||
let (p, mut c) = queue(3);
|
||||
|
||||
assert_eq!(p.is_closed(), false);
|
||||
assert!(matches!(p.push(|b| RecycleBox::recycle(b, 42)), Ok(_)));
|
||||
|
||||
c.close();
|
||||
|
||||
assert_eq!(p.is_closed(), true);
|
||||
assert!(matches!(
|
||||
p.push(|b| RecycleBox::recycle(b, 13)),
|
||||
Err(PushError::Closed)
|
||||
));
|
||||
|
||||
assert_eq!(*c.pop().unwrap(), 42);
|
||||
assert!(matches!(c.pop(), Err(PopError::Closed)));
|
||||
}
|
||||
|
||||
fn queue_spsc(capacity: usize) {
|
||||
const COUNT: usize = if cfg!(miri) { 50 } else { 100_000 };
|
||||
|
||||
let (p, mut c) = queue(capacity);
|
||||
|
||||
let th_pop = thread::spawn(move || {
|
||||
for i in 0..COUNT {
|
||||
loop {
|
||||
if let Ok(msg) = c.pop() {
|
||||
assert_eq!(*msg, i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
assert!(c.pop().is_err());
|
||||
});
|
||||
|
||||
let th_push = thread::spawn(move || {
|
||||
for i in 0..COUNT {
|
||||
while p.push(|b| RecycleBox::recycle(b, i)).is_err() {}
|
||||
}
|
||||
});
|
||||
|
||||
th_pop.join().unwrap();
|
||||
th_push.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_spsc_capacity_one() {
|
||||
queue_spsc(1);
|
||||
}
|
||||
#[test]
|
||||
fn queue_spsc_capacity_two() {
|
||||
queue_spsc(2);
|
||||
}
|
||||
#[test]
|
||||
fn queue_spsc_capacity_three() {
|
||||
queue_spsc(3);
|
||||
}
|
||||
|
||||
fn queue_mpsc(capacity: usize) {
|
||||
const COUNT: usize = if cfg!(miri) { 20 } else { 25_000 };
|
||||
const PRODUCER_THREADS: usize = 4;
|
||||
|
||||
let (p, mut c) = queue(capacity);
|
||||
let mut push_count = Vec::<usize>::new();
|
||||
push_count.resize_with(COUNT, Default::default);
|
||||
|
||||
let th_push: Vec<_> = (0..PRODUCER_THREADS)
|
||||
.map(|_| {
|
||||
let p = p.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
for i in 0..COUNT {
|
||||
while p.push(|b| RecycleBox::recycle(b, i)).is_err() {}
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for _ in 0..COUNT * PRODUCER_THREADS {
|
||||
let n = loop {
|
||||
if let Ok(x) = c.pop() {
|
||||
break *x;
|
||||
}
|
||||
};
|
||||
|
||||
push_count[n] += 1;
|
||||
}
|
||||
|
||||
for c in push_count {
|
||||
assert_eq!(c, PRODUCER_THREADS);
|
||||
}
|
||||
|
||||
for th in th_push {
|
||||
th.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_mpsc_capacity_one() {
|
||||
queue_mpsc(1);
|
||||
}
|
||||
#[test]
|
||||
fn queue_mpsc_capacity_two() {
|
||||
queue_mpsc(2);
|
||||
}
|
||||
#[test]
|
||||
fn queue_mpsc_capacity_three() {
|
||||
queue_mpsc(3);
|
||||
}
|
||||
}
|
||||
|
||||
/// Loom tests.
|
||||
#[cfg(all(test, asynchronix_loom))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use loom::model::Builder;
|
||||
use loom::sync::atomic::AtomicUsize;
|
||||
use loom::sync::Arc;
|
||||
use loom::thread;
|
||||
|
||||
fn loom_queue_push_pop(
|
||||
max_push_per_thread: usize,
|
||||
producer_thread_count: usize,
|
||||
capacity: usize,
|
||||
preemption_bound: usize,
|
||||
) {
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(preemption_bound);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (producer, mut consumer) = queue(capacity);
|
||||
|
||||
let push_count = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let producer_threads: Vec<_> = (0..producer_thread_count)
|
||||
.map(|_| {
|
||||
let producer = producer.clone();
|
||||
let push_count = push_count.clone();
|
||||
|
||||
thread::spawn(move || {
|
||||
for i in 0..max_push_per_thread {
|
||||
match producer.push(|b| RecycleBox::recycle(b, i)) {
|
||||
Ok(()) => {}
|
||||
Err(PushError::Full(_)) => {
|
||||
// A push can fail only if there is not enough capacity.
|
||||
assert!(capacity < max_push_per_thread * producer_thread_count);
|
||||
|
||||
break;
|
||||
}
|
||||
Err(PushError::Closed) => panic!(),
|
||||
}
|
||||
push_count.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut pop_count = 0;
|
||||
while consumer.pop().is_ok() {
|
||||
pop_count += 1;
|
||||
}
|
||||
|
||||
for th in producer_threads {
|
||||
th.join().unwrap();
|
||||
}
|
||||
|
||||
while consumer.pop().is_ok() {
|
||||
pop_count += 1;
|
||||
}
|
||||
|
||||
assert_eq!(push_count.load(Ordering::Relaxed), pop_count);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_queue_push_pop_overflow() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 5;
|
||||
loom_queue_push_pop(2, 2, 3, DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
#[test]
|
||||
fn loom_queue_push_pop_no_overflow() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 5;
|
||||
loom_queue_push_pop(2, 2, 5, DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
#[test]
|
||||
fn loom_queue_push_pop_capacity_power_of_two_overflow() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 5;
|
||||
loom_queue_push_pop(3, 2, 4, DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
#[test]
|
||||
fn loom_queue_push_pop_capacity_one_overflow() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 5;
|
||||
loom_queue_push_pop(2, 2, 1, DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
#[test]
|
||||
fn loom_queue_push_pop_capacity_power_of_two_no_overflow() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 5;
|
||||
loom_queue_push_pop(2, 2, 4, DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
#[test]
|
||||
fn loom_queue_push_pop_three_producers() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 2;
|
||||
loom_queue_push_pop(2, 3, 3, DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_queue_drop_items() {
|
||||
const CAPACITY: usize = 3;
|
||||
const PRODUCER_THREAD_COUNT: usize = 3;
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (producer, consumer) = queue(CAPACITY);
|
||||
let item = std::sync::Arc::new(()); // loom does not implement `strong_count()`
|
||||
|
||||
let producer_threads: Vec<_> = (0..PRODUCER_THREAD_COUNT)
|
||||
.map(|_| {
|
||||
thread::spawn({
|
||||
let item = item.clone();
|
||||
let producer = producer.clone();
|
||||
|
||||
move || {
|
||||
assert!(matches!(
|
||||
producer.push(|b| RecycleBox::recycle(b, item)),
|
||||
Ok(_)
|
||||
));
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for th in producer_threads {
|
||||
th.join().unwrap();
|
||||
}
|
||||
drop(producer);
|
||||
drop(consumer);
|
||||
|
||||
assert_eq!(std::sync::Arc::strong_count(&item), 1);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_queue_closed_by_producer() {
|
||||
const CAPACITY: usize = 3;
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (producer, mut consumer) = queue(CAPACITY);
|
||||
|
||||
let th_push_close = thread::spawn({
|
||||
let producer = producer.clone();
|
||||
|
||||
move || {
|
||||
assert!(matches!(
|
||||
producer.push(|b| RecycleBox::recycle(b, 7)),
|
||||
Ok(_)
|
||||
));
|
||||
producer.close();
|
||||
}
|
||||
});
|
||||
|
||||
let th_try_push = thread::spawn({
|
||||
let producer = producer.clone();
|
||||
|
||||
move || match producer.push(|b| RecycleBox::recycle(b, 13)) {
|
||||
Ok(()) => true,
|
||||
Err(PushError::Closed) => false,
|
||||
_ => panic!(),
|
||||
}
|
||||
});
|
||||
|
||||
let mut sum = 0;
|
||||
loop {
|
||||
match consumer.pop() {
|
||||
Ok(n) => {
|
||||
sum += *n;
|
||||
}
|
||||
Err(PopError::Closed) => break,
|
||||
Err(PopError::Empty) => {}
|
||||
};
|
||||
thread::yield_now();
|
||||
}
|
||||
|
||||
th_push_close.join().unwrap();
|
||||
let try_push_success = th_try_push.join().unwrap();
|
||||
if try_push_success {
|
||||
assert_eq!(sum, 7 + 13);
|
||||
} else {
|
||||
assert_eq!(sum, 7);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_queue_closed_by_consumer() {
|
||||
const CAPACITY: usize = 3;
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (producer, mut consumer) = queue(CAPACITY);
|
||||
|
||||
let th_try_push1 = thread::spawn({
|
||||
let producer = producer.clone();
|
||||
|
||||
move || match producer.push(|b| RecycleBox::recycle(b, 7)) {
|
||||
Ok(()) => true,
|
||||
Err(PushError::Closed) => false,
|
||||
_ => panic!(),
|
||||
}
|
||||
});
|
||||
|
||||
let th_try_push2 = thread::spawn({
|
||||
let producer = producer.clone();
|
||||
|
||||
move || match producer.push(|b| RecycleBox::recycle(b, 13)) {
|
||||
Ok(()) => true,
|
||||
Err(PushError::Closed) => false,
|
||||
_ => panic!(),
|
||||
}
|
||||
});
|
||||
|
||||
let mut sum = 0;
|
||||
consumer.close();
|
||||
|
||||
loop {
|
||||
match consumer.pop() {
|
||||
Ok(n) => {
|
||||
sum += *n;
|
||||
}
|
||||
Err(PopError::Closed) => break,
|
||||
Err(PopError::Empty) => {}
|
||||
};
|
||||
thread::yield_now();
|
||||
}
|
||||
|
||||
let try_push1_success = th_try_push1.join().unwrap();
|
||||
let try_push2_success = th_try_push2.join().unwrap();
|
||||
match (try_push1_success, try_push2_success) {
|
||||
(true, true) => assert_eq!(sum, 7 + 13),
|
||||
(true, false) => assert_eq!(sum, 7),
|
||||
(false, true) => assert_eq!(sum, 13),
|
||||
(false, false) => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@ -4,7 +4,7 @@
|
||||
|
||||
use std::future::Future;
|
||||
|
||||
use crate::runtime::executor;
|
||||
use crate::executor;
|
||||
|
||||
/// A multi-threaded `async` executor.
|
||||
#[derive(Debug)]
|
||||
|
@ -13,10 +13,10 @@
|
||||
//! treats deadlocking as a normal occurrence. This is because in a
|
||||
//! discrete-time simulator, the simulation of a system at a given time step
|
||||
//! will make as much progress as possible until it technically reaches a
|
||||
//! deadlock. Only then does the simulator advance the simulated time until the
|
||||
//! next "event" extracted from a time-sorted priority queue.
|
||||
//! deadlock. Only then does the simulator advance the simulated time to that of
|
||||
//! the next "event" extracted from a time-sorted priority queue.
|
||||
//!
|
||||
//! The design of the executor is largely influenced by the tokio and go
|
||||
//! The design of the executor is largely influenced by the tokio and Go
|
||||
//! schedulers, both of which are optimized for message-passing applications. In
|
||||
//! particular, it uses fast, fixed-size thread-local work-stealing queues with
|
||||
//! a non-stealable LIFO slot in combination with an injector queue, which
|
||||
@ -32,9 +32,8 @@
|
||||
//! active worker threads is stored in a single atomic variable. This makes it
|
||||
//! possible to rapidly identify free worker threads for stealing operations,
|
||||
//! with the downside that the maximum number of worker threads is currently
|
||||
//! limited to `usize::BITS`. This is unlikely to constitute a limitation in
|
||||
//! practice though since system simulation is not typically embarrassingly
|
||||
//! parallel.
|
||||
//! limited to `usize::BITS`. This is not expected to constitute a limitation in
|
||||
//! practice since system simulation is not typically embarrassingly parallel.
|
||||
//!
|
||||
//! Probably the largest difference with tokio is the task system, which has
|
||||
//! better throughput due to less need for synchronization. This mainly results
|
||||
@ -42,6 +41,7 @@
|
||||
//! notification flag, thus alleviating the need to reset the notification flag
|
||||
//! before polling a future.
|
||||
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::panic::{self, AssertUnwindSafe};
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
@ -52,34 +52,35 @@ use std::time::{Duration, Instant};
|
||||
use crossbeam_utils::sync::{Parker, Unparker};
|
||||
use slab::Slab;
|
||||
|
||||
mod find_bit;
|
||||
mod injector;
|
||||
mod pool_manager;
|
||||
mod queue;
|
||||
mod rng;
|
||||
mod task;
|
||||
mod worker;
|
||||
|
||||
#[cfg(all(test, not(asynchronix_loom)))]
|
||||
mod tests;
|
||||
|
||||
use crate::macros::scoped_thread_local::scoped_thread_local;
|
||||
use crate::util::rng::Rng;
|
||||
|
||||
use self::pool_manager::PoolManager;
|
||||
use self::rng::Rng;
|
||||
use self::task::{CancelToken, Promise, Runnable};
|
||||
use self::worker::Worker;
|
||||
use crate::macros::scoped_local_key::scoped_thread_local;
|
||||
|
||||
type Bucket = injector::Bucket<Runnable, 128>;
|
||||
type Injector = injector::Injector<Runnable, 128>;
|
||||
type LocalQueue = queue::Worker<Runnable, queue::B256>;
|
||||
type Stealer = queue::Stealer<Runnable, queue::B256>;
|
||||
const BUCKET_SIZE: usize = 128;
|
||||
const QUEUE_SIZE: usize = BUCKET_SIZE * 2;
|
||||
|
||||
static NEXT_EXECUTOR_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
type Bucket = injector::Bucket<Runnable, BUCKET_SIZE>;
|
||||
type Injector = injector::Injector<Runnable, BUCKET_SIZE>;
|
||||
type LocalQueue = st3::fifo::Worker<Runnable>;
|
||||
type Stealer = st3::fifo::Stealer<Runnable>;
|
||||
|
||||
scoped_thread_local!(static LOCAL_WORKER: Worker);
|
||||
scoped_thread_local!(static ACTIVE_TASKS: Mutex<Slab<CancelToken>>);
|
||||
static NEXT_EXECUTOR_ID: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
/// A multi-threaded `async` executor.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Executor {
|
||||
/// Shared executor data.
|
||||
context: Arc<ExecutorContext>,
|
||||
@ -95,15 +96,20 @@ impl Executor {
|
||||
/// Creates an executor that runs futures on a thread pool.
|
||||
///
|
||||
/// The maximum number of threads is set with the `num_threads` parameter.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This will panic if the specified number of threads is zero or is more
|
||||
/// than `usize::BITS`.
|
||||
pub(crate) fn new(num_threads: usize) -> Self {
|
||||
let parker = Parker::new();
|
||||
let unparker = parker.unparker().clone();
|
||||
|
||||
let (local_data, shared_data): (Vec<_>, Vec<_>) = (0..num_threads)
|
||||
let (local_queues_and_parkers, stealers_and_unparkers): (Vec<_>, Vec<_>) = (0..num_threads)
|
||||
.map(|_| {
|
||||
let parker = Parker::new();
|
||||
let unparker = parker.unparker().clone();
|
||||
let local_queue = LocalQueue::new();
|
||||
let local_queue = LocalQueue::new(QUEUE_SIZE);
|
||||
let stealer = local_queue.stealer();
|
||||
|
||||
((local_queue, parker), (stealer, unparker))
|
||||
@ -115,14 +121,13 @@ impl Executor {
|
||||
let executor_id = NEXT_EXECUTOR_ID.fetch_add(1, Ordering::Relaxed);
|
||||
assert!(
|
||||
executor_id <= usize::MAX / 2,
|
||||
"{} executors have been instantiated: this is most probably a bug.",
|
||||
usize::MAX / 2
|
||||
"too many executors have been instantiated"
|
||||
);
|
||||
|
||||
let context = Arc::new(ExecutorContext::new(
|
||||
executor_id,
|
||||
unparker,
|
||||
shared_data.into_iter(),
|
||||
stealers_and_unparkers.into_iter(),
|
||||
));
|
||||
let active_tasks = Arc::new(Mutex::new(Slab::new()));
|
||||
|
||||
@ -132,7 +137,7 @@ impl Executor {
|
||||
context.pool_manager.set_all_workers_active();
|
||||
|
||||
// Spawn all worker threads.
|
||||
let worker_handles: Vec<_> = local_data
|
||||
let worker_handles: Vec<_> = local_queues_and_parkers
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
.into_iter()
|
||||
@ -169,6 +174,10 @@ impl Executor {
|
||||
|
||||
/// Spawns a task and returns a promise that can be polled to retrieve the
|
||||
/// task's output.
|
||||
///
|
||||
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
||||
/// is called.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn spawn<T>(&self, future: T) -> Promise<T::Output>
|
||||
where
|
||||
T: Future + Send + 'static,
|
||||
@ -188,8 +197,6 @@ impl Executor {
|
||||
task_entry.insert(cancel_token);
|
||||
self.context.injector.insert_task(runnable);
|
||||
|
||||
self.context.pool_manager.activate_worker();
|
||||
|
||||
promise
|
||||
}
|
||||
|
||||
@ -197,6 +204,9 @@ impl Executor {
|
||||
///
|
||||
/// This is mostly useful to avoid undue reference counting for futures that
|
||||
/// return a `()` type.
|
||||
///
|
||||
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
||||
/// is called.
|
||||
pub(crate) fn spawn_and_forget<T>(&self, future: T)
|
||||
where
|
||||
T: Future + Send + 'static,
|
||||
@ -215,13 +225,13 @@ impl Executor {
|
||||
|
||||
task_entry.insert(cancel_token);
|
||||
self.context.injector.insert_task(runnable);
|
||||
|
||||
self.context.pool_manager.activate_worker();
|
||||
}
|
||||
|
||||
/// Let the executor run, blocking until all futures have completed or until
|
||||
/// the executor deadlocks.
|
||||
/// Execute spawned tasks, blocking until all futures have completed or
|
||||
/// until the executor reaches a deadlock.
|
||||
pub(crate) fn run(&mut self) {
|
||||
self.context.pool_manager.activate_worker();
|
||||
|
||||
loop {
|
||||
if let Some(worker_panic) = self.context.pool_manager.take_panic() {
|
||||
panic::resume_unwind(worker_panic);
|
||||
@ -247,7 +257,7 @@ impl Drop for Executor {
|
||||
//
|
||||
// A local worker must be set because some tasks may schedule other
|
||||
// tasks when dropped, which requires that a local worker be available.
|
||||
let worker = Worker::new(LocalQueue::new(), self.context.clone());
|
||||
let worker = Worker::new(LocalQueue::new(QUEUE_SIZE), self.context.clone());
|
||||
LOCAL_WORKER.set(&worker, || {
|
||||
// Cancel all pending futures.
|
||||
//
|
||||
@ -274,10 +284,15 @@ impl Drop for Executor {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Executor {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Executor").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// Shared executor context.
|
||||
///
|
||||
/// This contains all executor resources that can be shared between threads.
|
||||
#[derive(Debug)]
|
||||
struct ExecutorContext {
|
||||
/// Injector queue.
|
||||
injector: Injector,
|
||||
@ -294,9 +309,10 @@ impl ExecutorContext {
|
||||
pub(super) fn new(
|
||||
executor_id: usize,
|
||||
executor_unparker: Unparker,
|
||||
shared_data: impl Iterator<Item = (Stealer, Unparker)>,
|
||||
stealers_and_unparkers: impl Iterator<Item = (Stealer, Unparker)>,
|
||||
) -> Self {
|
||||
let (stealers, worker_unparkers): (Vec<_>, Vec<_>) = shared_data.into_iter().unzip();
|
||||
let (stealers, worker_unparkers): (Vec<_>, Vec<_>) =
|
||||
stealers_and_unparkers.into_iter().unzip();
|
||||
let worker_unparkers = worker_unparkers.into_boxed_slice();
|
||||
|
||||
Self {
|
||||
@ -504,7 +520,7 @@ fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
|
||||
if stealers.all(|stealer| {
|
||||
stealer
|
||||
.steal_and_pop(local_queue, |n| n - n / 2)
|
||||
.map(|task| {
|
||||
.map(|(task, _)| {
|
||||
let prev_task = fast_slot.replace(Some(task));
|
||||
assert!(prev_task.is_none());
|
||||
})
|
@ -4,14 +4,13 @@ use std::sync::Mutex;
|
||||
|
||||
use crossbeam_utils::sync::Unparker;
|
||||
|
||||
use super::find_bit;
|
||||
use super::rng;
|
||||
use super::Stealer;
|
||||
use crate::util::bit;
|
||||
use crate::util::rng;
|
||||
|
||||
/// Manager of worker threads.
|
||||
///
|
||||
/// The manager currently only supports up to `usize::BITS` threads.
|
||||
#[derive(Debug)]
|
||||
pub(super) struct PoolManager {
|
||||
/// Number of worker threads.
|
||||
pool_size: usize,
|
||||
@ -297,7 +296,7 @@ impl<'a> ShuffledStealers<'a> {
|
||||
let (candidates, next_candidate) = if candidates == 0 {
|
||||
(0, 0)
|
||||
} else {
|
||||
let next_candidate = find_bit::find_bit(candidates, |count| {
|
||||
let next_candidate = bit::find_bit(candidates, |count| {
|
||||
rng.gen_bounded(count as u64) as usize + 1
|
||||
});
|
||||
|
@ -17,7 +17,7 @@ mod util;
|
||||
mod tests;
|
||||
|
||||
pub(crate) use cancel_token::CancelToken;
|
||||
pub(crate) use promise::{Promise, Stage};
|
||||
pub(crate) use promise::Promise;
|
||||
pub(crate) use runnable::Runnable;
|
||||
|
||||
use self::util::{runnable_exists, RunOnDrop};
|
@ -76,10 +76,12 @@ where
|
||||
// Deallocate the task if this was the last reference.
|
||||
if state & REF_MASK == REF_INC {
|
||||
// Ensure that all atomic accesses to the state are visible.
|
||||
|
||||
// FIXME: the fence does not seem necessary since the fetch_update
|
||||
// uses AcqRel.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with all Release
|
||||
// operations that decrement the number of references to the
|
||||
// task.
|
||||
// operations that decrement the number of references to the task.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// Set a drop guard to ensure that the task is deallocated,
|
@ -117,7 +117,8 @@ pub(crate) enum Stage<T> {
|
||||
}
|
||||
|
||||
impl<U> Stage<U> {
|
||||
/// Maps a `Stage<T>` to `Stage<U>` by applying a function to a contained value.
|
||||
/// Maps a `Stage<U>` to `Stage<V>` by applying a function to a contained value.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn map<V, F>(self, f: F) -> Stage<V>
|
||||
where
|
||||
F: FnOnce(U) -> V,
|
||||
@ -130,25 +131,28 @@ impl<U> Stage<U> {
|
||||
}
|
||||
|
||||
/// Returns `true` if the promise is a [`Stage::Ready`] value.
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
pub(crate) fn is_ready(&self) -> bool {
|
||||
matches!(*self, Stage::Ready(_))
|
||||
}
|
||||
|
||||
/// Returns `true` if the promise is a [`Stage::Pending`] value.
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
pub(crate) fn is_pending(&self) -> bool {
|
||||
matches!(*self, Stage::Pending)
|
||||
}
|
||||
|
||||
/// Returns `true` if the promise is a [`Stage::Cancelled`] value.
|
||||
#[allow(unused)]
|
||||
#[inline]
|
||||
pub(crate) fn is_cancelled(&self) -> bool {
|
||||
matches!(*self, Stage::Cancelled)
|
||||
}
|
||||
}
|
||||
|
||||
/// A promise that can poll a task's output of type `T`.
|
||||
/// A promise that can poll a task's output of type `U`.
|
||||
///
|
||||
/// Note that dropping a promise does not cancel the task.
|
||||
#[derive(Debug)]
|
||||
@ -182,6 +186,7 @@ impl<U: Send + 'static> Promise<U> {
|
||||
}
|
||||
|
||||
/// Retrieves the output of the task if ready.
|
||||
#[allow(unused)]
|
||||
pub(crate) fn poll(&self) -> Stage<U> {
|
||||
unsafe { (self.vtable.poll)(self.task) }
|
||||
}
|
@ -8,7 +8,7 @@ use std::pin::Pin;
|
||||
use std::task::{Context, Poll, RawWaker, Waker};
|
||||
|
||||
use crate::loom_exports::debug_or_loom_assert;
|
||||
use crate::loom_exports::sync::atomic::{self, Ordering};
|
||||
use crate::loom_exports::sync::atomic::{self, AtomicU64, Ordering};
|
||||
|
||||
use super::util::RunOnDrop;
|
||||
use super::Task;
|
||||
@ -91,8 +91,11 @@ where
|
||||
mem::forget(panic_guard);
|
||||
|
||||
if let Poll::Ready(output) = poll_state {
|
||||
// Set a panic guard to close the task if the future or the
|
||||
// output panic when dropped.
|
||||
// Set a panic guard to close the task if the future or the output
|
||||
// panic when dropped. Miri complains if a reference to `this` is
|
||||
// captured and `mem::forget` is called on the guard after
|
||||
// deallocation, which is why the state is taken by pointer.
|
||||
let state_ptr = &this.state as *const AtomicU64;
|
||||
let panic_guard = RunOnDrop::new(|| {
|
||||
// Clear the `POLLING` flag while setting the `CLOSED` flag
|
||||
// to enter the `Closed` phase.
|
||||
@ -101,8 +104,7 @@ where
|
||||
// ensure that all memory operations on the future or the
|
||||
// output are visible when the last reference deallocates
|
||||
// the task.
|
||||
let state = this
|
||||
.state
|
||||
let state = (*state_ptr)
|
||||
.fetch_update(Ordering::Release, Ordering::Relaxed, |s| {
|
||||
Some((s | CLOSED) & !POLLING)
|
||||
})
|
@ -9,6 +9,7 @@ use std::thread;
|
||||
use futures_channel::{mpsc, oneshot};
|
||||
use futures_util::StreamExt;
|
||||
|
||||
use super::super::promise::Stage;
|
||||
use super::*;
|
||||
|
||||
// Test prelude to simulates a single-slot scheduler queue.
|
@ -12,6 +12,7 @@ use ::loom::sync::atomic::Ordering::*;
|
||||
use ::loom::sync::Arc;
|
||||
use ::loom::{lazy_static, thread};
|
||||
|
||||
use super::promise::Stage;
|
||||
use super::*;
|
||||
|
||||
// Test prelude to simulates a single-slot scheduler queue.
|
@ -1,11 +1,406 @@
|
||||
//! Asynchronix: a high-performance asynchronous computation framework for
|
||||
//! system simulation.
|
||||
|
||||
//! A high-performance, discrete-event computation framework for system
|
||||
//! simulation.
|
||||
//!
|
||||
//! Asynchronix is a developer-friendly, yet highly optimized software simulator
|
||||
//! able to scale to very large simulation with complex time-driven state
|
||||
//! machines.
|
||||
//!
|
||||
//! It promotes a component-oriented architecture that is familiar to system
|
||||
//! engineers and closely resembles [flow-based programming][FBP]: a model is
|
||||
//! essentially an isolated entity with a fixed set of typed inputs and outputs,
|
||||
//! communicating with other models through message passing via connections
|
||||
//! defined during bench assembly. Unlike in conventional flow-based
|
||||
//! programming, request-reply patterns are also possible.
|
||||
//!
|
||||
//! Asynchronix leverages asynchronous programming to perform
|
||||
//! auto-parallelization in a manner that is fully transparent to model authors
|
||||
//! and users, achieving high computational throughput on large simulation
|
||||
//! benches by means of a custom multi-threaded executor.
|
||||
//!
|
||||
//!
|
||||
//! [FBP]: https://en.wikipedia.org/wiki/Flow-based_programming
|
||||
//!
|
||||
//! # A practical overview
|
||||
//!
|
||||
//! Simulating a system typically involves three distinct activities:
|
||||
//!
|
||||
//! 1. the design of simulation models for each sub-system,
|
||||
//! 2. the assembly of a simulation bench from a set of models, performed by
|
||||
//! inter-connecting model ports,
|
||||
//! 3. the execution of the simulation, managed through periodical increments of
|
||||
//! the simulation time and by exchange of messages with simulation models.
|
||||
//!
|
||||
//! The following sections go through each of these activities in more details.
|
||||
//!
|
||||
//! ## Authoring models
|
||||
//!
|
||||
//! Models can contain four kinds of ports:
|
||||
//!
|
||||
//! * _output ports_, which are instances of the [`Output`](model::Output) type
|
||||
//! and can be used to broadcast a message,
|
||||
//! * _requestor ports_, which are instances of the
|
||||
//! [`Requestor`](model::Requestor) type and can be used to broadcast a
|
||||
//! message and receive an iterator yielding the replies from all connected
|
||||
//! replier ports,
|
||||
//! * _input ports_, which are synchronous or asynchronous methods that
|
||||
//! implement the [`InputFn`](model::InputFn) trait and take an `&mut self`
|
||||
//! argument, a message argument, and an optional
|
||||
//! [`&Scheduler`](time::Scheduler) argument,
|
||||
//! * _replier ports_, which are similar to input ports but implement the
|
||||
//! [`ReplierFn`](model::ReplierFn) trait and return a reply.
|
||||
//!
|
||||
//! Messages that are broadcast by an output port to an input port are referred
|
||||
//! to as *events*, while messages exchanged between requestor and replier ports
|
||||
//! are referred to as *requests* and *replies*.
|
||||
//!
|
||||
//! Models must implement the [`Model`](model::Model) trait. The main purpose of
|
||||
//! this trait is to allow models to specify an `init()` method that is
|
||||
//! guaranteed to run once and only once when the simulation is initialized,
|
||||
//! _i.e._ after all models have been connected but before the simulation
|
||||
//! starts. The `init()` method has a default implementation, so models that do
|
||||
//! not require initialization can simply implement the trait with a one-liner
|
||||
//! such as `impl Model for MyModel {}`.
|
||||
//!
|
||||
//! #### A simple model
|
||||
//!
|
||||
//! Let us consider for illustration a simple model that forwards its input
|
||||
//! after multiplying it by 2. This model has only one input and one output
|
||||
//! port:
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌────────────┐
|
||||
//! │ │
|
||||
//! Input ●───────▶│ Multiplier ├───────▶ Output
|
||||
//! f64 │ │ f64
|
||||
//! └────────────┘
|
||||
//! ```
|
||||
//!
|
||||
//! `Multiplier` could be implemented as follows:
|
||||
//!
|
||||
//! ```
|
||||
//! use asynchronix::model::{Model, Output};
|
||||
//!
|
||||
//! #[derive(Default)]
|
||||
//! pub struct Multiplier {
|
||||
//! pub output: Output<f64>,
|
||||
//! }
|
||||
//! impl Multiplier {
|
||||
//! pub async fn input(&mut self, value: f64) {
|
||||
//! self.output.send(2.0 * value).await;
|
||||
//! }
|
||||
//! }
|
||||
//! impl Model for Multiplier {}
|
||||
//! ```
|
||||
//!
|
||||
//! #### A model using the local scheduler
|
||||
//!
|
||||
//! Models frequently need to schedule actions at a future time or simply get
|
||||
//! access to the current simulation time. To do so, input and replier methods
|
||||
//! can take an optional argument that gives them access to a local scheduler.
|
||||
//!
|
||||
//! To show how the local scheduler can be used in practice, let us implement
|
||||
//! `Delay`, a model which simply forwards its input unmodified after a 1s
|
||||
//! delay:
|
||||
//!
|
||||
//! ```
|
||||
//! use std::time::Duration;
|
||||
//! use asynchronix::model::{Model, Output};
|
||||
//! use asynchronix::time::Scheduler;
|
||||
//!
|
||||
//! #[derive(Default)]
|
||||
//! pub struct Delay {
|
||||
//! pub output: Output<f64>,
|
||||
//! }
|
||||
//! impl Delay {
|
||||
//! pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
|
||||
//! scheduler.schedule_in(Duration::from_secs(1), Self::send, value).unwrap();
|
||||
//! }
|
||||
//!
|
||||
//! async fn send(&mut self, value: f64) {
|
||||
//! self.output.send(value).await;
|
||||
//! }
|
||||
//! }
|
||||
//! impl Model for Delay {}
|
||||
//! ```
|
||||
//!
|
||||
//! ## Assembling simulation benches
|
||||
//!
|
||||
//! A simulation bench is a system of inter-connected models that have been
|
||||
//! migrated to a simulation.
|
||||
//!
|
||||
//! The assembly process usually starts with the instantiation of models and the
|
||||
//! creation of a [`Mailbox`](simulation::Mailbox) for each model. A mailbox is
|
||||
//! essentially a fixed-capacity buffer for events and requests. While each
|
||||
//! model has only one mailbox, it is possible to create an arbitrary number of
|
||||
//! [`Address`](simulation::Mailbox)es pointing to that mailbox.
|
||||
//!
|
||||
//! Addresses are used among others to connect models: each output or requestor
|
||||
//! ports has a `connect()` method that takes as argument a function pointer to
|
||||
//! the corresponding input or replier port method and the address of the
|
||||
//! targeted model.
|
||||
//!
|
||||
//! Once all models are connected, they are added to a
|
||||
//! [`SimInit`](simulation::SimInit) instance, which is a builder type for the
|
||||
//! final [`Simulation`](simulation::Simulation).
|
||||
//!
|
||||
//! The easiest way to understand the assembly step is with a short example. Say
|
||||
//! that we want to assemble the following system from the models implemented
|
||||
//! above:
|
||||
//!
|
||||
//! ```text
|
||||
//! ┌────────────┐
|
||||
//! │ │
|
||||
//! ┌──▶│ Delay ├──┐
|
||||
//! ┌────────────┐ │ │ │ │ ┌────────────┐
|
||||
//! │ │ │ └────────────┘ │ │ │
|
||||
//! Input ●──▶│ Multiplier ├───┤ ├──▶│ Delay ├──▶ Output
|
||||
//! │ │ │ ┌────────────┐ │ │ │
|
||||
//! └────────────┘ │ │ │ │ └────────────┘
|
||||
//! └──▶│ Multiplier ├──┘
|
||||
//! │ │
|
||||
//! └────────────┘
|
||||
//! ```
|
||||
//!
|
||||
//! Here is how this could be done:
|
||||
//!
|
||||
//! ```
|
||||
//! # mod models {
|
||||
//! # use std::time::Duration;
|
||||
//! # use asynchronix::model::{Model, Output};
|
||||
//! # use asynchronix::time::Scheduler;
|
||||
//! # #[derive(Default)]
|
||||
//! # pub struct Multiplier {
|
||||
//! # pub output: Output<f64>,
|
||||
//! # }
|
||||
//! # impl Multiplier {
|
||||
//! # pub async fn input(&mut self, value: f64) {
|
||||
//! # self.output.send(2.0 * value).await;
|
||||
//! # }
|
||||
//! # }
|
||||
//! # impl Model for Multiplier {}
|
||||
//! # #[derive(Default)]
|
||||
//! # pub struct Delay {
|
||||
//! # pub output: Output<f64>,
|
||||
//! # }
|
||||
//! # impl Delay {
|
||||
//! # pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
|
||||
//! # scheduler.schedule_in(Duration::from_secs(1), Self::send, value).unwrap();
|
||||
//! # }
|
||||
//! # async fn send(&mut self, value: f64) { // this method can be private
|
||||
//! # self.output.send(value).await;
|
||||
//! # }
|
||||
//! # }
|
||||
//! # impl Model for Delay {}
|
||||
//! # }
|
||||
//! use std::time::Duration;
|
||||
//! use asynchronix::simulation::{Mailbox, SimInit};
|
||||
//! use asynchronix::time::MonotonicTime;
|
||||
//!
|
||||
//! use models::{Delay, Multiplier};
|
||||
//!
|
||||
//! // Instantiate models.
|
||||
//! let mut multiplier1 = Multiplier::default();
|
||||
//! let mut multiplier2 = Multiplier::default();
|
||||
//! let mut delay1 = Delay::default();
|
||||
//! let mut delay2 = Delay::default();
|
||||
//!
|
||||
//! // Instantiate mailboxes.
|
||||
//! let multiplier1_mbox = Mailbox::new();
|
||||
//! let multiplier2_mbox = Mailbox::new();
|
||||
//! let delay1_mbox = Mailbox::new();
|
||||
//! let delay2_mbox = Mailbox::new();
|
||||
//!
|
||||
//! // Connect the models.
|
||||
//! multiplier1.output.connect(Delay::input, &delay1_mbox);
|
||||
//! multiplier1.output.connect(Multiplier::input, &multiplier2_mbox);
|
||||
//! multiplier2.output.connect(Delay::input, &delay2_mbox);
|
||||
//! delay1.output.connect(Delay::input, &delay2_mbox);
|
||||
//!
|
||||
//! // Keep handles to the system input and output for the simulation.
|
||||
//! let mut output_slot = delay2.output.connect_slot().0;
|
||||
//! let input_address = multiplier1_mbox.address();
|
||||
//!
|
||||
//! // Pick an arbitrary simulation start time and build the simulation.
|
||||
//! let t0 = MonotonicTime::EPOCH;
|
||||
//! let mut simu = SimInit::new()
|
||||
//! .add_model(multiplier1, multiplier1_mbox)
|
||||
//! .add_model(multiplier2, multiplier2_mbox)
|
||||
//! .add_model(delay1, delay1_mbox)
|
||||
//! .add_model(delay2, delay2_mbox)
|
||||
//! .init(t0);
|
||||
//! ```
|
||||
//!
|
||||
//! ## Running simulations
|
||||
//!
|
||||
//! The simulation can be controlled in several ways:
|
||||
//!
|
||||
//! 1. by advancing time, either until the next scheduled event with
|
||||
//! [`Simulation::step()`](simulation::Simulation::step), or by a specific
|
||||
//! duration using for instance
|
||||
//! [`Simulation::step_by()`](simulation::Simulation::step_by).
|
||||
//! 2. by sending events or queries without advancing simulation time, using
|
||||
//! [`Simulation::send_event()`](simulation::Simulation::send_event) or
|
||||
//! [`Simulation::send_query()`](simulation::Simulation::send_query),
|
||||
//! 3. by scheduling events, using for instance
|
||||
//! [`Simulation::schedule_in()`](simulation::Simulation::schedule_in).
|
||||
//!
|
||||
//! Simulation outputs can be monitored using
|
||||
//! [`EventSlot`](simulation::EventSlot)s and
|
||||
//! [`EventStream`](simulation::EventStream)s, which can be connected to any
|
||||
//! model's output port. While an event slot only gives access to the last value
|
||||
//! sent from a port, an event stream is an iterator that yields all events that
|
||||
//! were sent in first-in-first-out order.
|
||||
//!
|
||||
//! This is an example of simulation that could be performed using the above
|
||||
//! bench assembly:
|
||||
//!
|
||||
//! ```
|
||||
//! # mod models {
|
||||
//! # use std::time::Duration;
|
||||
//! # use asynchronix::model::{Model, Output};
|
||||
//! # use asynchronix::time::Scheduler;
|
||||
//! # #[derive(Default)]
|
||||
//! # pub struct Multiplier {
|
||||
//! # pub output: Output<f64>,
|
||||
//! # }
|
||||
//! # impl Multiplier {
|
||||
//! # pub async fn input(&mut self, value: f64) {
|
||||
//! # self.output.send(2.0 * value).await;
|
||||
//! # }
|
||||
//! # }
|
||||
//! # impl Model for Multiplier {}
|
||||
//! # #[derive(Default)]
|
||||
//! # pub struct Delay {
|
||||
//! # pub output: Output<f64>,
|
||||
//! # }
|
||||
//! # impl Delay {
|
||||
//! # pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
|
||||
//! # scheduler.schedule_in(Duration::from_secs(1), Self::send, value).unwrap();
|
||||
//! # }
|
||||
//! # async fn send(&mut self, value: f64) { // this method can be private
|
||||
//! # self.output.send(value).await;
|
||||
//! # }
|
||||
//! # }
|
||||
//! # impl Model for Delay {}
|
||||
//! # }
|
||||
//! # use std::time::Duration;
|
||||
//! # use asynchronix::simulation::{Mailbox, SimInit};
|
||||
//! # use asynchronix::time::MonotonicTime;
|
||||
//! # use models::{Delay, Multiplier};
|
||||
//! # let mut multiplier1 = Multiplier::default();
|
||||
//! # let mut multiplier2 = Multiplier::default();
|
||||
//! # let mut delay1 = Delay::default();
|
||||
//! # let mut delay2 = Delay::default();
|
||||
//! # let multiplier1_mbox = Mailbox::new();
|
||||
//! # let multiplier2_mbox = Mailbox::new();
|
||||
//! # let delay1_mbox = Mailbox::new();
|
||||
//! # let delay2_mbox = Mailbox::new();
|
||||
//! # multiplier1.output.connect(Delay::input, &delay1_mbox);
|
||||
//! # multiplier1.output.connect(Multiplier::input, &multiplier2_mbox);
|
||||
//! # multiplier2.output.connect(Delay::input, &delay2_mbox);
|
||||
//! # delay1.output.connect(Delay::input, &delay2_mbox);
|
||||
//! # let mut output_slot = delay2.output.connect_slot().0;
|
||||
//! # let input_address = multiplier1_mbox.address();
|
||||
//! # let t0 = MonotonicTime::EPOCH;
|
||||
//! # let mut simu = SimInit::new()
|
||||
//! # .add_model(multiplier1, multiplier1_mbox)
|
||||
//! # .add_model(multiplier2, multiplier2_mbox)
|
||||
//! # .add_model(delay1, delay1_mbox)
|
||||
//! # .add_model(delay2, delay2_mbox)
|
||||
//! # .init(t0);
|
||||
//! // Send a value to the first multiplier.
|
||||
//! simu.send_event(Multiplier::input, 21.0, &input_address);
|
||||
//!
|
||||
//! // The simulation is still at t0 so nothing is expected at the output of the
|
||||
//! // second delay gate.
|
||||
//! assert!(output_slot.take().is_none());
|
||||
//!
|
||||
//! // Advance simulation time until the next event and check the time and output.
|
||||
//! simu.step();
|
||||
//! assert_eq!(simu.time(), t0 + Duration::from_secs(1));
|
||||
//! assert_eq!(output_slot.take(), Some(84.0));
|
||||
//!
|
||||
//! // Get the answer to the ultimate question of life, the universe & everything.
|
||||
//! simu.step();
|
||||
//! assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
||||
//! assert_eq!(output_slot.take(), Some(42.0));
|
||||
//! ```
|
||||
//!
|
||||
//! # Message ordering guarantees
|
||||
//!
|
||||
//! The Asynchronix runtime is based on the [actor model][actor_model], meaning
|
||||
//! that every simulation model can be thought of as an isolated entity running
|
||||
//! in its own thread. While in practice the runtime will actually multiplex and
|
||||
//! migrate models over a fixed set of kernel threads, models will indeed run in
|
||||
//! parallel whenever possible.
|
||||
//!
|
||||
//! Since Asynchronix is a time-based simulator, the runtime will always execute
|
||||
//! tasks in chronological order, thus eliminating most ordering ambiguities
|
||||
//! that could result from parallel execution. Nevertheless, it is sometimes
|
||||
//! possible for events and queries generated in the same time slice to lead to
|
||||
//! ambiguous execution orders. In order to make it easier to reason about such
|
||||
//! situations, Asynchronix provides a set of guarantees about message delivery
|
||||
//! order. Borrowing from the [Pony][pony] programming language, we refer to
|
||||
//! this contract as *causal messaging*, a property that can be summarized by
|
||||
//! these two rules:
|
||||
//!
|
||||
//! 1. *one-to-one message ordering guarantee*: if model `A` sends two events or
|
||||
//! queries `M1` and then `M2` to model `B`, then `B` will always process
|
||||
//! `M1` before `M2`,
|
||||
//! 2. *transitivity guarantee*: if `A` sends `M1` to `B` and then `M2` to `C`
|
||||
//! which in turn sends `M3` to `B`, even though `M1` and `M2` may be
|
||||
//! processed in any order by `B` and `C`, it is guaranteed that `B` will
|
||||
//! process `M1` before `M3`.
|
||||
//!
|
||||
//! The first guarantee (and only the first) also extends to events scheduled
|
||||
//! from a simulation with
|
||||
//! [`Simulation::schedule_in()`](simulation::Simulation::schedule_in) or
|
||||
//! [`Simulation::schedule_at()`](simulation::Simulation::schedule_at): if the
|
||||
//! scheduler contains several events to be delivered at the same time to the
|
||||
//! same model, these events will always be processed in the order in which they
|
||||
//! were scheduled.
|
||||
//!
|
||||
//! [actor_model]: https://en.wikipedia.org/wiki/Actor_model
|
||||
//! [pony]: https://www.ponylang.io/
|
||||
//!
|
||||
//!
|
||||
//! # Other resources
|
||||
//!
|
||||
//! ## Other examples
|
||||
//!
|
||||
//! The [`examples`][gh_examples] directory in the main repository contains more
|
||||
//! fleshed out examples that demonstrate various capabilities of the simulation
|
||||
//! framework.
|
||||
//!
|
||||
//! [gh_examples]:
|
||||
//! https://github.com/asynchronics/asynchronix/tree/main/asynchronix/examples
|
||||
//!
|
||||
//! ## Modules documentation
|
||||
//!
|
||||
//! While the above overview does cover the basic concepts, more information is
|
||||
//! available in the documentation of the different modules:
|
||||
//!
|
||||
//! * the [`model`] module provides more details about the signatures of input
|
||||
//! and replier port methods and discusses model initialization in the
|
||||
//! documentation of [`model::Model`],
|
||||
//! * the [`simulation`] module discusses how the capacity of mailboxes may
|
||||
//! affect the simulation, how connections can be modified after the
|
||||
//! simulation was instantiated, and which pathological situations can lead to
|
||||
//! a deadlock,
|
||||
//! * the [`time`] module discusses in particular self-scheduling methods and
|
||||
//! scheduling cancellation in the documentation of [`time::Scheduler`] while
|
||||
//! the monotonic timestamp format used for simulations is documented in
|
||||
//! [`time::MonotonicTime`].
|
||||
#![warn(missing_docs, missing_debug_implementations, unreachable_pub)]
|
||||
|
||||
pub(crate) mod channel;
|
||||
pub(crate) mod executor;
|
||||
mod loom_exports;
|
||||
pub(crate) mod macros;
|
||||
pub mod runtime;
|
||||
pub mod model;
|
||||
pub mod simulation;
|
||||
pub mod time;
|
||||
pub(crate) mod util;
|
||||
|
||||
#[cfg(feature = "dev-hooks")]
|
||||
pub mod dev_hooks;
|
||||
|
@ -1,15 +1,23 @@
|
||||
#[cfg(asynchronix_loom)]
|
||||
#[allow(unused_imports)]
|
||||
pub(crate) mod sync {
|
||||
pub(crate) use loom::sync::{Arc, Mutex};
|
||||
|
||||
pub(crate) mod atomic {
|
||||
pub(crate) use loom::sync::atomic::{fence, AtomicU32, AtomicU64, AtomicUsize, Ordering};
|
||||
pub(crate) use loom::sync::atomic::{
|
||||
fence, AtomicBool, AtomicIsize, AtomicPtr, AtomicU32, AtomicU64, AtomicUsize, Ordering,
|
||||
};
|
||||
}
|
||||
}
|
||||
#[cfg(not(asynchronix_loom))]
|
||||
#[allow(unused_imports)]
|
||||
pub(crate) mod sync {
|
||||
pub(crate) use std::sync::{Arc, Mutex};
|
||||
|
||||
pub(crate) mod atomic {
|
||||
pub(crate) use std::sync::atomic::{fence, AtomicU32, AtomicU64, AtomicUsize, Ordering};
|
||||
pub(crate) use std::sync::atomic::{
|
||||
fence, AtomicBool, AtomicIsize, AtomicPtr, AtomicU32, AtomicU64, AtomicUsize, Ordering,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1 +1 @@
|
||||
pub(crate) mod scoped_local_key;
|
||||
pub(crate) mod scoped_thread_local;
|
||||
|
@ -11,8 +11,8 @@ use std::ptr;
|
||||
macro_rules! scoped_thread_local {
|
||||
($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty) => (
|
||||
$(#[$attrs])*
|
||||
$vis static $name: $crate::macros::scoped_local_key::ScopedLocalKey<$ty>
|
||||
= $crate::macros::scoped_local_key::ScopedLocalKey {
|
||||
$vis static $name: $crate::macros::scoped_thread_local::ScopedLocalKey<$ty>
|
||||
= $crate::macros::scoped_thread_local::ScopedLocalKey {
|
||||
inner: {
|
||||
thread_local!(static FOO: ::std::cell::Cell<*const ()> = const {
|
||||
std::cell::Cell::new(::std::ptr::null())
|
253
asynchronix/src/model.rs
Normal file
253
asynchronix/src/model.rs
Normal file
@ -0,0 +1,253 @@
|
||||
//! Model components.
|
||||
//!
|
||||
//! # Model trait
|
||||
//!
|
||||
//! Every model must implement the [`Model`] trait. This trait defines an
|
||||
//! asynchronous initialization method, [`Model::init()`], which main purpose is
|
||||
//! to enable models to perform specific actions only once all models have been
|
||||
//! connected and migrated to the simulation, but before the simulation actually
|
||||
//! starts.
|
||||
//!
|
||||
//! #### Examples
|
||||
//!
|
||||
//! A model that does not require initialization can simply use the default
|
||||
//! implementation of the `Model` trait:
|
||||
//!
|
||||
//! ```
|
||||
//! use asynchronix::model::Model;
|
||||
//!
|
||||
//! pub struct MyModel {
|
||||
//! // ...
|
||||
//! }
|
||||
//! impl Model for MyModel {}
|
||||
//! ```
|
||||
//!
|
||||
//! Otherwise, a custom `init()` method can be implemented:
|
||||
//!
|
||||
//! ```
|
||||
//! use std::future::Future;
|
||||
//! use std::pin::Pin;
|
||||
//!
|
||||
//! use asynchronix::model::{InitializedModel, Model};
|
||||
//! use asynchronix::time::Scheduler;
|
||||
//!
|
||||
//! pub struct MyModel {
|
||||
//! // ...
|
||||
//! }
|
||||
//! impl Model for MyModel {
|
||||
//! fn init(
|
||||
//! mut self,
|
||||
//! scheduler: &Scheduler<Self>
|
||||
//! ) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>>{
|
||||
//! Box::pin(async move {
|
||||
//! println!("...initialization...");
|
||||
//!
|
||||
//! self.into()
|
||||
//! })
|
||||
//! }
|
||||
//! }
|
||||
//! ```
|
||||
//!
|
||||
//! # Events and queries
|
||||
//!
|
||||
//! Models can exchange data via *events* and *queries*.
|
||||
//!
|
||||
//! Events are send-and-forget messages that can be broadcast from an *output
|
||||
//! port* to an arbitrary number of *input ports* with a matching event type.
|
||||
//!
|
||||
//! Queries actually involve two messages: a *request* that can be broadcast
|
||||
//! from a *requestor port* to an arbitrary number of *replier ports* with a
|
||||
//! matching request type, and a *reply* sent in response to such request. The
|
||||
//! response received by a requestor port is an iterator that yields as many
|
||||
//! items (replies) as there are connected replier ports.
|
||||
//!
|
||||
//!
|
||||
//! ### Output and requestor ports
|
||||
//!
|
||||
//! Output and requestor ports can be added to a model using composition, adding
|
||||
//! [`Output`] and [`Requestor`] objects as members. They are parametrized by
|
||||
//! the event, request and reply types.
|
||||
//!
|
||||
//! Models are expected to expose their output and requestor ports as public
|
||||
//! members so they can be connected to input and replier ports when assembling
|
||||
//! the simulation bench.
|
||||
//!
|
||||
//! #### Example
|
||||
//!
|
||||
//! ```
|
||||
//! use asynchronix::model::{Model, Output, Requestor};
|
||||
//!
|
||||
//! pub struct MyModel {
|
||||
//! pub my_output: Output<String>,
|
||||
//! pub my_requestor: Requestor<u32, bool>,
|
||||
//! }
|
||||
//! impl MyModel {
|
||||
//! // ...
|
||||
//! }
|
||||
//! impl Model for MyModel {}
|
||||
//! ```
|
||||
//!
|
||||
//!
|
||||
//! ### Input and replier ports
|
||||
//!
|
||||
//! Input ports and replier ports are methods that implement the [`InputFn`] or
|
||||
//! [`ReplierFn`] traits with appropriate bounds on their argument and return
|
||||
//! types.
|
||||
//!
|
||||
//! In practice, an input port method for an event of type `T` may have any of
|
||||
//! the following signatures, where the futures returned by the `async` variants
|
||||
//! must implement `Send`:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! fn(&mut self) // argument elided, implies `T=()`
|
||||
//! fn(&mut self, T)
|
||||
//! fn(&mut self, T, &Scheduler<Self>)
|
||||
//! async fn(&mut self) // argument elided, implies `T=()`
|
||||
//! async fn(&mut self, T)
|
||||
//! async fn(&mut self, T, &Scheduler<Self>)
|
||||
//! where
|
||||
//! Self: Model,
|
||||
//! T: Clone + Send + 'static,
|
||||
//! R: Send + 'static,
|
||||
//! ```
|
||||
//!
|
||||
//! The scheduler argument is useful for methods that need access to the
|
||||
//! simulation time or that need to schedule an action at a future date.
|
||||
//!
|
||||
//! A replier port for a request of type `T` with a reply of type `R` may in
|
||||
//! turn have any of the following signatures, where the futures must implement
|
||||
//! `Send`:
|
||||
//!
|
||||
//! ```ignore
|
||||
//! async fn(&mut self) -> R // argument elided, implies `T=()`
|
||||
//! async fn(&mut self, T) -> R
|
||||
//! async fn(&mut self, T, &Scheduler<Self>) -> R
|
||||
//! where
|
||||
//! Self: Model,
|
||||
//! T: Clone + Send + 'static,
|
||||
//! R: Send + 'static,
|
||||
//! ```
|
||||
//!
|
||||
//! Output and replier ports will normally be exposed as public methods so they
|
||||
//! can be connected to input and requestor ports when assembling the simulation
|
||||
//! bench. However, input ports may instead be defined as private methods if
|
||||
//! they are only used by the model itself to schedule future actions (see the
|
||||
//! [`Scheduler`](crate::time::Scheduler) examples).
|
||||
//!
|
||||
//! Changing the signature of an input or replier port is not considered to
|
||||
//! alter the public interface of a model provided that the event, request and
|
||||
//! reply types remain the same.
|
||||
//!
|
||||
//! #### Example
|
||||
//!
|
||||
//! ```
|
||||
//! use asynchronix::model::Model;
|
||||
//! use asynchronix::time::Scheduler;
|
||||
//!
|
||||
//! pub struct MyModel {
|
||||
//! // ...
|
||||
//! }
|
||||
//! impl MyModel {
|
||||
//! pub fn my_input(&mut self, input: String, scheduler: &Scheduler<Self>) {
|
||||
//! // ...
|
||||
//! }
|
||||
//! pub async fn my_replier(&mut self, request: u32) -> bool { // scheduler argument elided
|
||||
//! // ...
|
||||
//! # unimplemented!()
|
||||
//! }
|
||||
//! }
|
||||
//! impl Model for MyModel {}
|
||||
//! ```
|
||||
//!
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
|
||||
use crate::time::Scheduler;
|
||||
|
||||
pub use model_fn::{InputFn, ReplierFn};
|
||||
pub use ports::{LineError, LineId, Output, Requestor};
|
||||
|
||||
pub mod markers;
|
||||
mod model_fn;
|
||||
mod ports;
|
||||
|
||||
/// Trait to be implemented by all models.
|
||||
///
|
||||
/// This trait enables models to perform specific actions in the
|
||||
/// [`Model::init()`] method only once all models have been connected and
|
||||
/// migrated to the simulation bench, but before the simulation actually starts.
|
||||
/// A common use for `init` is to send messages to connected models at the
|
||||
/// beginning of the simulation.
|
||||
///
|
||||
/// The `init` function converts the model to the opaque `InitializedModel` type
|
||||
/// to prevent an already initialized model from being added to the simulation
|
||||
/// bench.
|
||||
pub trait Model: Sized + Send + 'static {
|
||||
/// Performs asynchronous model initialization.
|
||||
///
|
||||
/// This asynchronous method is executed exactly once for all models of the
|
||||
/// simulation when the
|
||||
/// [`SimInit::init()`](crate::simulation::SimInit::init) method is called.
|
||||
///
|
||||
/// The default implementation simply converts the model to an
|
||||
/// `InitializedModel` without any side effect.
|
||||
///
|
||||
/// *Note*: it is currently necessary to box the returned future; this
|
||||
/// limitation will be lifted once Rust supports `async` methods in traits.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::future::Future;
|
||||
/// use std::pin::Pin;
|
||||
///
|
||||
/// use asynchronix::model::{InitializedModel, Model};
|
||||
/// use asynchronix::time::Scheduler;
|
||||
///
|
||||
/// pub struct MyModel {
|
||||
/// // ...
|
||||
/// }
|
||||
///
|
||||
/// impl Model for MyModel {
|
||||
/// fn init(
|
||||
/// self,
|
||||
/// scheduler: &Scheduler<Self>
|
||||
/// ) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>>{
|
||||
/// Box::pin(async move {
|
||||
/// println!("...initialization...");
|
||||
///
|
||||
/// self.into()
|
||||
/// })
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
|
||||
// Removing the boxing constraint requires the
|
||||
// `return_position_impl_trait_in_trait` and `async_fn_in_trait` features.
|
||||
// Tracking issue: <https://github.com/rust-lang/rust/issues/91611>.
|
||||
fn init(
|
||||
self,
|
||||
scheduler: &Scheduler<Self>,
|
||||
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
|
||||
Box::pin(async move {
|
||||
let _ = scheduler; // suppress the unused argument warning
|
||||
self.into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Opaque type containing an initialized model.
|
||||
///
|
||||
/// A model can be converted to an `InitializedModel` using the `Into`/`From`
|
||||
/// traits. The implementation of the simulation guarantees that the
|
||||
/// [`Model::init()`] method will never be called on a model after conversion to
|
||||
/// an `InitializedModel`.
|
||||
#[derive(Debug)]
|
||||
pub struct InitializedModel<M: Model>(pub(crate) M);
|
||||
|
||||
impl<M: Model> From<M> for InitializedModel<M> {
|
||||
fn from(model: M) -> Self {
|
||||
InitializedModel(model)
|
||||
}
|
||||
}
|
31
asynchronix/src/model/markers.rs
Normal file
31
asynchronix/src/model/markers.rs
Normal file
@ -0,0 +1,31 @@
|
||||
//! Marker types for simulation model methods.
|
||||
|
||||
/// Marker type for regular simulation model methods that take a mutable
|
||||
/// reference to the model, without any other argument.
|
||||
#[derive(Debug)]
|
||||
pub struct WithoutArguments {}
|
||||
|
||||
/// Marker type for regular simulation model methods that take a mutable
|
||||
/// reference to the model and a message, without scheduler argument.
|
||||
#[derive(Debug)]
|
||||
pub struct WithoutScheduler {}
|
||||
|
||||
/// Marker type for regular simulation model methods that take a mutable
|
||||
/// reference to the model, a message and an explicit scheduler argument.
|
||||
#[derive(Debug)]
|
||||
pub struct WithScheduler {}
|
||||
|
||||
/// Marker type for asynchronous simulation model methods that take a mutable
|
||||
/// reference to the model, without any other argument.
|
||||
#[derive(Debug)]
|
||||
pub struct AsyncWithoutArguments {}
|
||||
|
||||
/// Marker type for asynchronous simulation model methods that take a mutable
|
||||
/// reference to the model and a message, without scheduler argument.
|
||||
#[derive(Debug)]
|
||||
pub struct AsyncWithoutScheduler {}
|
||||
|
||||
/// Marker type for asynchronous simulation model methods that take a mutable
|
||||
/// reference to the model, a message and an explicit scheduler argument.
|
||||
#[derive(Debug)]
|
||||
pub struct AsyncWithScheduler {}
|
185
asynchronix/src/model/model_fn.rs
Normal file
185
asynchronix/src/model/model_fn.rs
Normal file
@ -0,0 +1,185 @@
|
||||
//! Trait for model input and replier ports.
|
||||
|
||||
use std::future::{ready, Future, Ready};
|
||||
|
||||
use crate::model::{markers, Model};
|
||||
use crate::time::Scheduler;
|
||||
|
||||
/// A function, method or closures that can be used as an *input port*.
|
||||
///
|
||||
/// This trait is in particular implemented for any function or method with the
|
||||
/// following signature, where it is implicitly assumed that the function
|
||||
/// implements `Send + 'static`:
|
||||
///
|
||||
/// ```ignore
|
||||
/// FnOnce(&mut M, T)
|
||||
/// FnOnce(&mut M, T, &Scheduler<M>)
|
||||
/// async fn(&mut M, T)
|
||||
/// async fn(&mut M, T, &Scheduler<M>)
|
||||
/// where
|
||||
/// M: Model
|
||||
/// ```
|
||||
///
|
||||
/// It is also implemented for the following signatures when `T=()`:
|
||||
///
|
||||
/// ```ignore
|
||||
/// FnOnce(&mut M)
|
||||
/// async fn(&mut M)
|
||||
/// where
|
||||
/// M: Model
|
||||
/// ```
|
||||
pub trait InputFn<'a, M: Model, T, S>: Send + 'static {
|
||||
/// The `Future` returned by the asynchronous method.
|
||||
type Future: Future<Output = ()> + Send + 'a;
|
||||
|
||||
/// Calls the method.
|
||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future;
|
||||
}
|
||||
|
||||
impl<'a, M, F> InputFn<'a, M, (), markers::WithoutArguments> for F
|
||||
where
|
||||
M: Model,
|
||||
F: FnOnce(&'a mut M) + Send + 'static,
|
||||
{
|
||||
type Future = Ready<()>;
|
||||
|
||||
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model);
|
||||
|
||||
ready(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithoutScheduler> for F
|
||||
where
|
||||
M: Model,
|
||||
F: FnOnce(&'a mut M, T) + Send + 'static,
|
||||
{
|
||||
type Future = Ready<()>;
|
||||
|
||||
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model, arg);
|
||||
|
||||
ready(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithScheduler> for F
|
||||
where
|
||||
M: Model,
|
||||
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) + Send + 'static,
|
||||
{
|
||||
type Future = Ready<()>;
|
||||
|
||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model, arg, scheduler);
|
||||
|
||||
ready(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M, Fut, F> InputFn<'a, M, (), markers::AsyncWithoutArguments> for F
|
||||
where
|
||||
M: Model,
|
||||
Fut: Future<Output = ()> + Send + 'a,
|
||||
F: FnOnce(&'a mut M) -> Fut + Send + 'static,
|
||||
{
|
||||
type Future = Fut;
|
||||
|
||||
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithoutScheduler> for F
|
||||
where
|
||||
M: Model,
|
||||
Fut: Future<Output = ()> + Send + 'a,
|
||||
F: FnOnce(&'a mut M, T) -> Fut + Send + 'static,
|
||||
{
|
||||
type Future = Fut;
|
||||
|
||||
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model, arg)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithScheduler> for F
|
||||
where
|
||||
M: Model,
|
||||
Fut: Future<Output = ()> + Send + 'a,
|
||||
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) -> Fut + Send + 'static,
|
||||
{
|
||||
type Future = Fut;
|
||||
|
||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model, arg, scheduler)
|
||||
}
|
||||
}
|
||||
|
||||
/// A function, method or closure that can be used as a *replier port*.
|
||||
///
|
||||
/// This trait is in particular implemented for any function or method with the
|
||||
/// following signature, where it is implicitly assumed that the function
|
||||
/// implements `Send + 'static`:
|
||||
///
|
||||
/// ```ignore
|
||||
/// async fn(&mut M, T) -> R
|
||||
/// async fn(&mut M, T, &Scheduler<M>) -> R
|
||||
/// where
|
||||
/// M: Model
|
||||
/// ```
|
||||
///
|
||||
/// It is also implemented for the following signatures when `T=()`:
|
||||
///
|
||||
/// ```ignore
|
||||
/// async fn(&mut M) -> R
|
||||
/// where
|
||||
/// M: Model
|
||||
/// ```
|
||||
pub trait ReplierFn<'a, M: Model, T, R, S>: Send + 'static {
|
||||
/// The `Future` returned by the asynchronous method.
|
||||
type Future: Future<Output = R> + Send + 'a;
|
||||
|
||||
/// Calls the method.
|
||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future;
|
||||
}
|
||||
|
||||
impl<'a, M, R, Fut, F> ReplierFn<'a, M, (), R, markers::AsyncWithoutArguments> for F
|
||||
where
|
||||
M: Model,
|
||||
Fut: Future<Output = R> + Send + 'a,
|
||||
F: FnOnce(&'a mut M) -> Fut + Send + 'static,
|
||||
{
|
||||
type Future = Fut;
|
||||
|
||||
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithoutScheduler> for F
|
||||
where
|
||||
M: Model,
|
||||
Fut: Future<Output = R> + Send + 'a,
|
||||
F: FnOnce(&'a mut M, T) -> Fut + Send + 'static,
|
||||
{
|
||||
type Future = Fut;
|
||||
|
||||
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model, arg)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithScheduler> for F
|
||||
where
|
||||
M: Model,
|
||||
Fut: Future<Output = R> + Send + 'a,
|
||||
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) -> Fut + Send + 'static,
|
||||
{
|
||||
type Future = Fut;
|
||||
|
||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
|
||||
self(model, arg, scheduler)
|
||||
}
|
||||
}
|
218
asynchronix/src/model/ports.rs
Normal file
218
asynchronix/src/model/ports.rs
Normal file
@ -0,0 +1,218 @@
|
||||
//! Model ports for event and query broadcasting.
|
||||
//!
|
||||
//! Models typically contain [`Output`] and/or [`Requestor`] ports, exposed as
|
||||
//! public member variables. Output ports broadcast events to all connected
|
||||
//! input ports, while requestor ports broadcast queries to, and retrieve
|
||||
//! replies from, all connected replier ports.
|
||||
//!
|
||||
//! On the surface, output and requestor ports only differ in that sending a
|
||||
//! query from a requestor port also returns an iterator over the replies from
|
||||
//! all connected ports. Sending a query is more costly, however, because of the
|
||||
//! need to wait until all connected models have processed the query. In
|
||||
//! contrast, since events are buffered in the mailbox of the target model,
|
||||
//! sending an event is a fire-and-forget operation. For this reason, output
|
||||
//! ports should generally be preferred over requestor ports when possible.
|
||||
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
mod broadcaster;
|
||||
mod sender;
|
||||
|
||||
use crate::model::{InputFn, Model, ReplierFn};
|
||||
use crate::simulation::{Address, EventSlot, EventStream};
|
||||
use crate::util::spsc_queue;
|
||||
|
||||
use broadcaster::Broadcaster;
|
||||
|
||||
use self::sender::{EventSender, EventSlotSender, EventStreamSender, QuerySender};
|
||||
|
||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
|
||||
/// Unique identifier for a connection between two ports.
|
||||
pub struct LineId(u64);
|
||||
|
||||
/// An output port.
|
||||
///
|
||||
/// `Output` ports can be connected to input ports, i.e. to asynchronous model
|
||||
/// methods that return no value. They broadcast events to all connected input
|
||||
/// ports.
|
||||
pub struct Output<T: Clone + Send + 'static> {
|
||||
broadcaster: Broadcaster<T, ()>,
|
||||
next_line_id: u64,
|
||||
}
|
||||
|
||||
impl<T: Clone + Send + 'static> Output<T> {
|
||||
/// Creates a new, disconnected `Output` port.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Adds a connection to an input port of the model specified by the
|
||||
/// address.
|
||||
///
|
||||
/// The input port must be an asynchronous method of a model of type `M`
|
||||
/// taking as argument a value of type `T` plus, optionally, a scheduler
|
||||
/// reference.
|
||||
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>) -> LineId
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> InputFn<'a, M, T, S> + Copy,
|
||||
S: Send + 'static,
|
||||
{
|
||||
assert!(self.next_line_id != u64::MAX);
|
||||
let line_id = LineId(self.next_line_id);
|
||||
self.next_line_id += 1;
|
||||
let sender = Box::new(EventSender::new(input, address.into().0));
|
||||
self.broadcaster.add(sender, line_id);
|
||||
|
||||
line_id
|
||||
}
|
||||
|
||||
/// Adds a connection to an event stream iterator.
|
||||
pub fn connect_stream(&mut self) -> (EventStream<T>, LineId) {
|
||||
assert!(self.next_line_id != u64::MAX);
|
||||
let line_id = LineId(self.next_line_id);
|
||||
self.next_line_id += 1;
|
||||
|
||||
let (producer, consumer) = spsc_queue::spsc_queue();
|
||||
let sender = Box::new(EventStreamSender::new(producer));
|
||||
let event_stream = EventStream::new(consumer);
|
||||
|
||||
self.broadcaster.add(sender, line_id);
|
||||
|
||||
(event_stream, line_id)
|
||||
}
|
||||
|
||||
/// Adds a connection to an event slot.
|
||||
pub fn connect_slot(&mut self) -> (EventSlot<T>, LineId) {
|
||||
assert!(self.next_line_id != u64::MAX);
|
||||
let line_id = LineId(self.next_line_id);
|
||||
self.next_line_id += 1;
|
||||
|
||||
let slot = Arc::new(Mutex::new(None));
|
||||
let sender = Box::new(EventSlotSender::new(slot.clone()));
|
||||
let event_slot = EventSlot::new(slot);
|
||||
|
||||
self.broadcaster.add(sender, line_id);
|
||||
|
||||
(event_slot, line_id)
|
||||
}
|
||||
|
||||
/// Removes the connection specified by the `LineId` parameter.
|
||||
///
|
||||
/// It is a logic error to specify a line identifier from another [`Output`]
|
||||
/// or [`Requestor`] instance and may result in the disconnection of an
|
||||
/// arbitrary endpoint.
|
||||
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
|
||||
if self.broadcaster.remove(line_id) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(LineError {})
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes all connections.
|
||||
pub fn disconnect_all(&mut self) {
|
||||
self.broadcaster.clear();
|
||||
}
|
||||
|
||||
/// Broadcasts an event to all connected input ports.
|
||||
pub async fn send(&mut self, arg: T) {
|
||||
self.broadcaster.broadcast_event(arg).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Send + 'static> Default for Output<T> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
broadcaster: Broadcaster::default(),
|
||||
next_line_id: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Send + 'static> fmt::Debug for Output<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Output ({} connected ports)", self.broadcaster.len())
|
||||
}
|
||||
}
|
||||
|
||||
/// A requestor port.
|
||||
///
|
||||
/// `Requestor` ports can be connected to replier ports, i.e. to asynchronous
|
||||
/// model methods that return a value. They broadcast queries to all connected
|
||||
/// replier ports.
|
||||
pub struct Requestor<T: Clone + Send + 'static, R: Send + 'static> {
|
||||
broadcaster: Broadcaster<T, R>,
|
||||
next_line_id: u64,
|
||||
}
|
||||
|
||||
impl<T: Clone + Send + 'static, R: Send + 'static> Requestor<T, R> {
|
||||
/// Creates a new, disconnected `Requestor` port.
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Adds a connection to a replier port of the model specified by the
|
||||
/// address.
|
||||
///
|
||||
/// The replier port must be an asynchronous method of a model of type `M`
|
||||
/// returning a value of type `R` and taking as argument a value of type `T`
|
||||
/// plus, optionally, a scheduler reference.
|
||||
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>) -> LineId
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> ReplierFn<'a, M, T, R, S> + Copy,
|
||||
S: Send + 'static,
|
||||
{
|
||||
assert!(self.next_line_id != u64::MAX);
|
||||
let line_id = LineId(self.next_line_id);
|
||||
self.next_line_id += 1;
|
||||
let sender = Box::new(QuerySender::new(replier, address.into().0));
|
||||
self.broadcaster.add(sender, line_id);
|
||||
|
||||
line_id
|
||||
}
|
||||
|
||||
/// Removes the connection specified by the `LineId` parameter.
|
||||
///
|
||||
/// It is a logic error to specify a line identifier from another [`Output`]
|
||||
/// or [`Requestor`] instance and may result in the disconnection of an
|
||||
/// arbitrary endpoint.
|
||||
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
|
||||
if self.broadcaster.remove(line_id) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(LineError {})
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes all connections.
|
||||
pub fn disconnect_all(&mut self) {
|
||||
self.broadcaster.clear();
|
||||
}
|
||||
|
||||
/// Broadcasts a query to all connected replier ports.
|
||||
pub async fn send(&mut self, arg: T) -> impl Iterator<Item = R> + '_ {
|
||||
self.broadcaster.broadcast_query(arg).await.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Send + 'static, R: Send + 'static> Default for Requestor<T, R> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
broadcaster: Broadcaster::default(),
|
||||
next_line_id: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for Requestor<T, R> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Requestor ({} connected ports)", self.broadcaster.len())
|
||||
}
|
||||
}
|
||||
|
||||
/// Error raised when the specified line cannot be found.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct LineError {}
|
746
asynchronix/src/model/ports/broadcaster.rs
Normal file
746
asynchronix/src/model/ports/broadcaster.rs
Normal file
@ -0,0 +1,746 @@
|
||||
use std::future::Future;
|
||||
use std::mem::ManuallyDrop;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use diatomic_waker::WakeSink;
|
||||
use recycle_box::{coerce_box, RecycleBox};
|
||||
|
||||
use super::sender::{SendError, Sender};
|
||||
use super::LineId;
|
||||
use task_set::TaskSet;
|
||||
|
||||
mod task_set;
|
||||
|
||||
/// An object that can efficiently broadcast messages to several addresses.
|
||||
///
|
||||
/// This object maintains a list of senders associated to each target address.
|
||||
/// When a message is broadcasted, the sender futures are awaited in parallel.
|
||||
/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate
|
||||
/// does, but with some key differences:
|
||||
///
|
||||
/// - tasks and future storage are reusable to avoid repeated allocation, so
|
||||
/// allocation occurs only after a new sender is added,
|
||||
/// - the outputs of all sender futures are returned all at once rather than
|
||||
/// with an asynchronous iterator (a.k.a. async stream); the implementation
|
||||
/// exploits this behavior by waking the main broadcast future only when all
|
||||
/// sender futures have been awaken, which strongly reduces overhead since
|
||||
/// waking a sender task does not actually schedule it on the executor.
|
||||
pub(super) struct Broadcaster<T: Clone + 'static, R: 'static> {
|
||||
/// The list of senders with their associated line identifier.
|
||||
senders: Vec<(LineId, Box<dyn Sender<T, R>>)>,
|
||||
/// Fields explicitly borrowed by the `BroadcastFuture`.
|
||||
shared: Shared<R>,
|
||||
}
|
||||
|
||||
impl<T: Clone + 'static> Broadcaster<T, ()> {
|
||||
/// Broadcasts an event to all addresses.
|
||||
pub(super) async fn broadcast_event(&mut self, arg: T) -> Result<(), BroadcastError> {
|
||||
match self.senders.as_mut_slice() {
|
||||
// No sender.
|
||||
[] => Ok(()),
|
||||
// One sender.
|
||||
[sender] => sender.1.send(arg).await.map_err(|_| BroadcastError {}),
|
||||
// Multiple senders.
|
||||
_ => self.broadcast(arg).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + 'static, R> Broadcaster<T, R> {
|
||||
/// Adds a new sender associated to the specified identifier.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic if the total count of senders would reach
|
||||
/// `u32::MAX - 1`.
|
||||
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>, id: LineId) {
|
||||
self.senders.push((id, sender));
|
||||
|
||||
self.shared.futures_env.push(FutureEnv {
|
||||
storage: None,
|
||||
output: None,
|
||||
});
|
||||
|
||||
self.shared.task_set.resize(self.senders.len());
|
||||
}
|
||||
|
||||
/// Removes the first sender with the specified identifier, if any.
|
||||
///
|
||||
/// Returns `true` if there was indeed a sender associated to the specified
|
||||
/// identifier.
|
||||
pub(super) fn remove(&mut self, id: LineId) -> bool {
|
||||
if let Some(pos) = self.senders.iter().position(|s| s.0 == id) {
|
||||
self.senders.swap_remove(pos);
|
||||
self.shared.futures_env.swap_remove(pos);
|
||||
self.shared.task_set.resize(self.senders.len());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
/// Removes all senders.
|
||||
pub(super) fn clear(&mut self) {
|
||||
self.senders.clear();
|
||||
self.shared.futures_env.clear();
|
||||
self.shared.task_set.resize(0);
|
||||
}
|
||||
|
||||
/// Returns the number of connected senders.
|
||||
pub(super) fn len(&self) -> usize {
|
||||
self.senders.len()
|
||||
}
|
||||
|
||||
/// Broadcasts a query to all addresses and collect all responses.
|
||||
pub(super) async fn broadcast_query(
|
||||
&mut self,
|
||||
arg: T,
|
||||
) -> Result<impl Iterator<Item = R> + '_, BroadcastError> {
|
||||
match self.senders.as_mut_slice() {
|
||||
// No sender.
|
||||
[] => {}
|
||||
// One sender.
|
||||
[sender] => {
|
||||
let output = sender.1.send(arg).await.map_err(|_| BroadcastError {})?;
|
||||
self.shared.futures_env[0].output = Some(output);
|
||||
}
|
||||
// Multiple senders.
|
||||
_ => self.broadcast(arg).await?,
|
||||
};
|
||||
|
||||
// At this point all outputs should be available so `unwrap` can be
|
||||
// called on the output of each future.
|
||||
let outputs = self
|
||||
.shared
|
||||
.futures_env
|
||||
.iter_mut()
|
||||
.map(|t| t.output.take().unwrap());
|
||||
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
/// Efficiently broadcasts a message or a query to multiple addresses.
|
||||
///
|
||||
/// This method does not collect the responses from queries.
|
||||
fn broadcast(&mut self, arg: T) -> BroadcastFuture<'_, R> {
|
||||
let futures_count = self.senders.len();
|
||||
let mut futures = recycle_vec(self.shared.storage.take().unwrap_or_default());
|
||||
|
||||
// Broadcast the message and collect all futures.
|
||||
for (i, (sender, futures_env)) in self
|
||||
.senders
|
||||
.iter_mut()
|
||||
.zip(self.shared.futures_env.iter_mut())
|
||||
.enumerate()
|
||||
{
|
||||
let future_cache = futures_env
|
||||
.storage
|
||||
.take()
|
||||
.unwrap_or_else(|| RecycleBox::new(()));
|
||||
|
||||
// Move the argument rather than clone it for the last future.
|
||||
if i + 1 == futures_count {
|
||||
let future: RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + '_> =
|
||||
coerce_box!(RecycleBox::recycle(future_cache, sender.1.send(arg)));
|
||||
|
||||
futures.push(RecycleBox::into_pin(future));
|
||||
break;
|
||||
}
|
||||
|
||||
let future: RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + '_> = coerce_box!(
|
||||
RecycleBox::recycle(future_cache, sender.1.send(arg.clone()))
|
||||
);
|
||||
|
||||
futures.push(RecycleBox::into_pin(future));
|
||||
}
|
||||
|
||||
// Generate the global future.
|
||||
BroadcastFuture::new(&mut self.shared, futures)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Clone + 'static, R> Default for Broadcaster<T, R> {
|
||||
/// Creates an empty `Broadcaster` object.
|
||||
fn default() -> Self {
|
||||
let wake_sink = WakeSink::new();
|
||||
let wake_src = wake_sink.source();
|
||||
|
||||
Self {
|
||||
senders: Vec::new(),
|
||||
shared: Shared {
|
||||
wake_sink,
|
||||
task_set: TaskSet::new(wake_src),
|
||||
futures_env: Vec::new(),
|
||||
storage: None,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Data related to a sender future.
|
||||
struct FutureEnv<R> {
|
||||
/// Cached storage for the future.
|
||||
storage: Option<RecycleBox<()>>,
|
||||
/// Output of the associated future.
|
||||
output: Option<R>,
|
||||
}
|
||||
|
||||
/// A type-erased `Send` future wrapped in a `RecycleBox`.
|
||||
type RecycleBoxFuture<'a, R> = RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + 'a>;
|
||||
|
||||
/// Fields of `Broadcaster` that are explicitly borrowed by a `BroadcastFuture`.
|
||||
struct Shared<R> {
|
||||
/// Thread-safe waker handle.
|
||||
wake_sink: WakeSink,
|
||||
/// Tasks associated to the sender futures.
|
||||
task_set: TaskSet,
|
||||
/// Data related to the sender futures.
|
||||
futures_env: Vec<FutureEnv<R>>,
|
||||
/// Cached storage for the sender futures.
|
||||
///
|
||||
/// When it exists, the cached storage is always an empty vector but it
|
||||
/// typically has a non-zero capacity. Its purpose is to reuse the
|
||||
/// previously allocated capacity when creating new sender futures.
|
||||
storage: Option<Vec<Pin<RecycleBoxFuture<'static, R>>>>,
|
||||
}
|
||||
|
||||
/// A future aggregating the outputs of a collection of sender futures.
|
||||
///
|
||||
/// The idea is to join all sender futures as efficiently as possible, meaning:
|
||||
///
|
||||
/// - the sender futures are polled simultaneously rather than waiting for their
|
||||
/// completion in a sequential manner,
|
||||
/// - this future is never woken if it can be proven that at least one of the
|
||||
/// individual sender task will still be awaken,
|
||||
/// - the storage allocated for the sender futures is always returned to the
|
||||
/// `Broadcast` object so it can be reused by the next future,
|
||||
/// - the happy path (all futures immediately ready) is very fast.
|
||||
pub(super) struct BroadcastFuture<'a, R> {
|
||||
/// Reference to the shared fields of the `Broadcast` object.
|
||||
shared: &'a mut Shared<R>,
|
||||
/// List of all send futures.
|
||||
futures: ManuallyDrop<Vec<Pin<RecycleBoxFuture<'a, R>>>>,
|
||||
/// The total count of futures that have not yet been polled to completion.
|
||||
pending_futures_count: usize,
|
||||
/// State of completion of the future.
|
||||
state: FutureState,
|
||||
}
|
||||
|
||||
impl<'a, R> BroadcastFuture<'a, R> {
|
||||
/// Creates a new `BroadcastFuture`.
|
||||
fn new(shared: &'a mut Shared<R>, futures: Vec<Pin<RecycleBoxFuture<'a, R>>>) -> Self {
|
||||
let futures_count = futures.len();
|
||||
|
||||
assert!(shared.futures_env.len() == futures_count);
|
||||
|
||||
for futures_env in shared.futures_env.iter_mut() {
|
||||
// Drop the previous output if necessary.
|
||||
futures_env.output.take();
|
||||
}
|
||||
|
||||
BroadcastFuture {
|
||||
shared,
|
||||
futures: ManuallyDrop::new(futures),
|
||||
state: FutureState::Uninit,
|
||||
pending_futures_count: futures_count,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, R> Drop for BroadcastFuture<'a, R> {
|
||||
fn drop(&mut self) {
|
||||
// Safety: this is safe since `self.futures` is never accessed after it
|
||||
// is moved out.
|
||||
let mut futures = unsafe { ManuallyDrop::take(&mut self.futures) };
|
||||
|
||||
// Recycle the future-containing boxes.
|
||||
for (future, futures_env) in futures.drain(..).zip(self.shared.futures_env.iter_mut()) {
|
||||
futures_env.storage = Some(RecycleBox::vacate_pinned(future));
|
||||
}
|
||||
|
||||
// Recycle the vector that contained the futures.
|
||||
self.shared.storage = Some(recycle_vec(futures));
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, R> Future for BroadcastFuture<'a, R> {
|
||||
type Output = Result<(), BroadcastError>;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = &mut *self;
|
||||
|
||||
assert_ne!(this.state, FutureState::Completed);
|
||||
|
||||
// Poll all sender futures once if this is the first time the broadcast
|
||||
// future is polled.
|
||||
if this.state == FutureState::Uninit {
|
||||
// Prevent spurious wake-ups.
|
||||
this.shared.task_set.discard_scheduled();
|
||||
|
||||
for task_idx in 0..this.futures.len() {
|
||||
let future_env = &mut this.shared.futures_env[task_idx];
|
||||
let future = &mut this.futures[task_idx];
|
||||
let task_waker_ref = this.shared.task_set.waker_of(task_idx);
|
||||
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
|
||||
|
||||
match future.as_mut().poll(task_cx_ref) {
|
||||
Poll::Ready(Ok(output)) => {
|
||||
future_env.output = Some(output);
|
||||
this.pending_futures_count -= 1;
|
||||
}
|
||||
Poll::Ready(Err(_)) => {
|
||||
this.state = FutureState::Completed;
|
||||
|
||||
return Poll::Ready(Err(BroadcastError {}));
|
||||
}
|
||||
Poll::Pending => {}
|
||||
}
|
||||
}
|
||||
|
||||
if this.pending_futures_count == 0 {
|
||||
this.state = FutureState::Completed;
|
||||
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
|
||||
this.state = FutureState::Pending;
|
||||
}
|
||||
|
||||
// Repeatedly poll the futures of all scheduled tasks until there are no
|
||||
// more scheduled tasks.
|
||||
loop {
|
||||
// Only register the waker if it is probable that we won't find any
|
||||
// scheduled task.
|
||||
if !this.shared.task_set.has_scheduled() {
|
||||
this.shared.wake_sink.register(cx.waker());
|
||||
}
|
||||
|
||||
// Retrieve the indices of the scheduled tasks if any. If there are
|
||||
// no scheduled tasks, `Poll::Pending` is returned and this future
|
||||
// will be awaken again when enough tasks have been scheduled.
|
||||
let scheduled_tasks = match this
|
||||
.shared
|
||||
.task_set
|
||||
.steal_scheduled(this.pending_futures_count)
|
||||
{
|
||||
Some(st) => st,
|
||||
None => return Poll::Pending,
|
||||
};
|
||||
|
||||
for task_idx in scheduled_tasks {
|
||||
let future_env = &mut this.shared.futures_env[task_idx];
|
||||
|
||||
// Do not poll completed futures.
|
||||
if future_env.output.is_some() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let future = &mut this.futures[task_idx];
|
||||
let task_waker_ref = this.shared.task_set.waker_of(task_idx);
|
||||
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
|
||||
|
||||
match future.as_mut().poll(task_cx_ref) {
|
||||
Poll::Ready(Ok(output)) => {
|
||||
future_env.output = Some(output);
|
||||
this.pending_futures_count -= 1;
|
||||
}
|
||||
Poll::Ready(Err(_)) => {
|
||||
this.state = FutureState::Completed;
|
||||
|
||||
return Poll::Ready(Err(BroadcastError {}));
|
||||
}
|
||||
Poll::Pending => {}
|
||||
}
|
||||
}
|
||||
|
||||
if this.pending_futures_count == 0 {
|
||||
this.state = FutureState::Completed;
|
||||
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Error returned when a message could not be delivered.
|
||||
#[derive(Debug)]
|
||||
pub(super) struct BroadcastError {}
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
enum FutureState {
|
||||
Uninit,
|
||||
Pending,
|
||||
Completed,
|
||||
}
|
||||
|
||||
/// Drops all items in a vector and returns an empty vector of another type,
|
||||
/// preserving the allocation and capacity of the original vector provided that
|
||||
/// the layouts of `T` and `U` are compatible.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This will panic in debug mode if the layouts are incompatible.
|
||||
fn recycle_vec<T, U>(mut v: Vec<T>) -> Vec<U> {
|
||||
debug_assert_eq!(
|
||||
std::alloc::Layout::new::<T>(),
|
||||
std::alloc::Layout::new::<U>()
|
||||
);
|
||||
|
||||
let cap = v.capacity();
|
||||
|
||||
// No unsafe here: this just relies on an optimization in the `collect`
|
||||
// method.
|
||||
v.clear();
|
||||
let v_out: Vec<U> = v.into_iter().map(|_| unreachable!()).collect();
|
||||
|
||||
debug_assert_eq!(v_out.capacity(), cap);
|
||||
|
||||
v_out
|
||||
}
|
||||
|
||||
#[cfg(all(test, not(asynchronix_loom)))]
|
||||
mod tests {
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
|
||||
use futures_executor::block_on;
|
||||
|
||||
use super::super::sender::QuerySender;
|
||||
use crate::channel::Receiver;
|
||||
use crate::model::Model;
|
||||
use crate::time::Scheduler;
|
||||
use crate::time::{MonotonicTime, TearableAtomicTime};
|
||||
use crate::util::priority_queue::PriorityQueue;
|
||||
use crate::util::sync_cell::SyncCell;
|
||||
|
||||
use super::super::*;
|
||||
use super::*;
|
||||
|
||||
struct Counter {
|
||||
inner: Arc<AtomicUsize>,
|
||||
}
|
||||
impl Counter {
|
||||
fn new(counter: Arc<AtomicUsize>) -> Self {
|
||||
Self { inner: counter }
|
||||
}
|
||||
async fn inc(&mut self, by: usize) {
|
||||
self.inner.fetch_add(by, Ordering::Relaxed);
|
||||
}
|
||||
async fn fetch_inc(&mut self, by: usize) -> usize {
|
||||
let res = self.inner.fetch_add(by, Ordering::Relaxed);
|
||||
res
|
||||
}
|
||||
}
|
||||
impl Model for Counter {}
|
||||
|
||||
#[test]
|
||||
fn broadcast_event_smoke() {
|
||||
const N_RECV: usize = 4;
|
||||
|
||||
let mut mailboxes = Vec::new();
|
||||
let mut broadcaster = Broadcaster::default();
|
||||
for id in 0..N_RECV {
|
||||
let mailbox = Receiver::new(10);
|
||||
let address = mailbox.sender();
|
||||
let sender = Box::new(EventSender::new(Counter::inc, address));
|
||||
|
||||
broadcaster.add(sender, LineId(id as u64));
|
||||
mailboxes.push(mailbox);
|
||||
}
|
||||
|
||||
let th_broadcast = thread::spawn(move || {
|
||||
block_on(broadcaster.broadcast_event(1)).unwrap();
|
||||
});
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let th_recv: Vec<_> = mailboxes
|
||||
.into_iter()
|
||||
.map(|mut mailbox| {
|
||||
thread::spawn({
|
||||
let mut counter = Counter::new(counter.clone());
|
||||
|
||||
move || {
|
||||
let dummy_address = Receiver::new(1).sender();
|
||||
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
|
||||
let dummy_time =
|
||||
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
|
||||
let dummy_scheduler =
|
||||
Scheduler::new(dummy_address, dummy_priority_queue, dummy_time);
|
||||
block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap();
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
th_broadcast.join().unwrap();
|
||||
for th in th_recv {
|
||||
th.join().unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(counter.load(Ordering::Relaxed), N_RECV);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn broadcast_query_smoke() {
|
||||
const N_RECV: usize = 4;
|
||||
|
||||
let mut mailboxes = Vec::new();
|
||||
let mut broadcaster = Broadcaster::default();
|
||||
for id in 0..N_RECV {
|
||||
let mailbox = Receiver::new(10);
|
||||
let address = mailbox.sender();
|
||||
let sender = Box::new(QuerySender::new(Counter::fetch_inc, address));
|
||||
|
||||
broadcaster.add(sender, LineId(id as u64));
|
||||
mailboxes.push(mailbox);
|
||||
}
|
||||
|
||||
let th_broadcast = thread::spawn(move || {
|
||||
let iter = block_on(broadcaster.broadcast_query(1)).unwrap();
|
||||
let sum = iter.fold(0, |acc, val| acc + val);
|
||||
|
||||
assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)}
|
||||
});
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
|
||||
let th_recv: Vec<_> = mailboxes
|
||||
.into_iter()
|
||||
.map(|mut mailbox| {
|
||||
thread::spawn({
|
||||
let mut counter = Counter::new(counter.clone());
|
||||
|
||||
move || {
|
||||
let dummy_address = Receiver::new(1).sender();
|
||||
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
|
||||
let dummy_time =
|
||||
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
|
||||
let dummy_scheduler =
|
||||
Scheduler::new(dummy_address, dummy_priority_queue, dummy_time);
|
||||
block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap();
|
||||
thread::sleep(std::time::Duration::from_millis(100));
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
th_broadcast.join().unwrap();
|
||||
for th in th_recv {
|
||||
th.join().unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(counter.load(Ordering::Relaxed), N_RECV);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, asynchronix_loom))]
|
||||
mod tests {
|
||||
use futures_channel::mpsc;
|
||||
use futures_util::StreamExt;
|
||||
|
||||
use loom::model::Builder;
|
||||
use loom::sync::atomic::{AtomicBool, Ordering};
|
||||
use loom::thread;
|
||||
|
||||
use waker_fn::waker_fn;
|
||||
|
||||
use super::super::sender::RecycledFuture;
|
||||
use super::*;
|
||||
|
||||
// An event that may be waken spuriously.
|
||||
struct TestEvent<R> {
|
||||
receiver: mpsc::UnboundedReceiver<Option<R>>,
|
||||
fut_storage: Option<RecycleBox<()>>,
|
||||
}
|
||||
impl<R: Send> Sender<(), R> for TestEvent<R> {
|
||||
fn send(&mut self, _arg: ()) -> RecycledFuture<'_, Result<R, SendError>> {
|
||||
let fut_storage = &mut self.fut_storage;
|
||||
let receiver = &mut self.receiver;
|
||||
|
||||
RecycledFuture::new(fut_storage, async {
|
||||
let mut stream = Box::pin(receiver.filter_map(|item| async { item }));
|
||||
|
||||
Ok(stream.next().await.unwrap())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// An object that can wake a `TestEvent`.
|
||||
#[derive(Clone)]
|
||||
struct TestEventWaker<R> {
|
||||
sender: mpsc::UnboundedSender<Option<R>>,
|
||||
}
|
||||
impl<R> TestEventWaker<R> {
|
||||
fn wake_spurious(&self) {
|
||||
let _ = self.sender.unbounded_send(None);
|
||||
}
|
||||
fn wake_final(&self, value: R) {
|
||||
let _ = self.sender.unbounded_send(Some(value));
|
||||
}
|
||||
}
|
||||
|
||||
fn test_event<R>() -> (TestEvent<R>, TestEventWaker<R>) {
|
||||
let (sender, receiver) = mpsc::unbounded();
|
||||
|
||||
(
|
||||
TestEvent {
|
||||
receiver,
|
||||
fut_storage: None,
|
||||
},
|
||||
TestEventWaker { sender },
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_broadcast_basic() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (test_event1, waker1) = test_event::<usize>();
|
||||
let (test_event2, waker2) = test_event::<usize>();
|
||||
let (test_event3, waker3) = test_event::<usize>();
|
||||
|
||||
let mut broadcaster = Broadcaster::default();
|
||||
broadcaster.add(Box::new(test_event1), LineId(1));
|
||||
broadcaster.add(Box::new(test_event2), LineId(2));
|
||||
broadcaster.add(Box::new(test_event3), LineId(3));
|
||||
|
||||
let mut fut = Box::pin(broadcaster.broadcast_query(()));
|
||||
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
|
||||
let is_scheduled_waker = is_scheduled.clone();
|
||||
|
||||
let waker = waker_fn(move || {
|
||||
// We use swap rather than a plain store to work around this
|
||||
// bug: <https://github.com/tokio-rs/loom/issues/254>
|
||||
is_scheduled_waker.swap(true, Ordering::Release);
|
||||
});
|
||||
let mut cx = Context::from_waker(&waker);
|
||||
|
||||
let th1 = thread::spawn(move || waker1.wake_final(3));
|
||||
let th2 = thread::spawn(move || waker2.wake_final(7));
|
||||
let th3 = thread::spawn(move || waker3.wake_final(42));
|
||||
|
||||
let mut schedule_count = 0;
|
||||
loop {
|
||||
match fut.as_mut().poll(&mut cx) {
|
||||
Poll::Ready(Ok(mut res)) => {
|
||||
assert_eq!(res.next(), Some(3));
|
||||
assert_eq!(res.next(), Some(7));
|
||||
assert_eq!(res.next(), Some(42));
|
||||
assert_eq!(res.next(), None);
|
||||
|
||||
return;
|
||||
}
|
||||
Poll::Ready(Err(_)) => panic!("sender error"),
|
||||
Poll::Pending => {}
|
||||
}
|
||||
|
||||
// If the task has not been scheduled, exit the polling loop.
|
||||
if !is_scheduled.swap(false, Ordering::Acquire) {
|
||||
break;
|
||||
}
|
||||
schedule_count += 1;
|
||||
assert!(schedule_count <= 1);
|
||||
}
|
||||
|
||||
th1.join().unwrap();
|
||||
th2.join().unwrap();
|
||||
th3.join().unwrap();
|
||||
|
||||
assert!(is_scheduled.load(Ordering::Acquire));
|
||||
|
||||
match fut.as_mut().poll(&mut cx) {
|
||||
Poll::Ready(Ok(mut res)) => {
|
||||
assert_eq!(res.next(), Some(3));
|
||||
assert_eq!(res.next(), Some(7));
|
||||
assert_eq!(res.next(), Some(42));
|
||||
assert_eq!(res.next(), None);
|
||||
}
|
||||
Poll::Ready(Err(_)) => panic!("sender error"),
|
||||
Poll::Pending => panic!("the future has not completed"),
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_broadcast_spurious() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (test_event1, waker1) = test_event::<usize>();
|
||||
let (test_event2, waker2) = test_event::<usize>();
|
||||
|
||||
let mut broadcaster = Broadcaster::default();
|
||||
broadcaster.add(Box::new(test_event1), LineId(1));
|
||||
broadcaster.add(Box::new(test_event2), LineId(2));
|
||||
|
||||
let mut fut = Box::pin(broadcaster.broadcast_query(()));
|
||||
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
|
||||
let is_scheduled_waker = is_scheduled.clone();
|
||||
|
||||
let waker = waker_fn(move || {
|
||||
// We use swap rather than a plain store to work around this
|
||||
// bug: <https://github.com/tokio-rs/loom/issues/254>
|
||||
is_scheduled_waker.swap(true, Ordering::Release);
|
||||
});
|
||||
let mut cx = Context::from_waker(&waker);
|
||||
|
||||
let spurious_waker = waker1.clone();
|
||||
let th1 = thread::spawn(move || waker1.wake_final(3));
|
||||
let th2 = thread::spawn(move || waker2.wake_final(7));
|
||||
let th_spurious = thread::spawn(move || spurious_waker.wake_spurious());
|
||||
|
||||
let mut schedule_count = 0;
|
||||
loop {
|
||||
match fut.as_mut().poll(&mut cx) {
|
||||
Poll::Ready(Ok(mut res)) => {
|
||||
assert_eq!(res.next(), Some(3));
|
||||
assert_eq!(res.next(), Some(7));
|
||||
assert_eq!(res.next(), None);
|
||||
|
||||
return;
|
||||
}
|
||||
Poll::Ready(Err(_)) => panic!("sender error"),
|
||||
Poll::Pending => {}
|
||||
}
|
||||
|
||||
// If the task has not been scheduled, exit the polling loop.
|
||||
if !is_scheduled.swap(false, Ordering::Acquire) {
|
||||
break;
|
||||
}
|
||||
schedule_count += 1;
|
||||
assert!(schedule_count <= 2);
|
||||
}
|
||||
|
||||
th1.join().unwrap();
|
||||
th2.join().unwrap();
|
||||
th_spurious.join().unwrap();
|
||||
|
||||
assert!(is_scheduled.load(Ordering::Acquire));
|
||||
|
||||
match fut.as_mut().poll(&mut cx) {
|
||||
Poll::Ready(Ok(mut res)) => {
|
||||
assert_eq!(res.next(), Some(3));
|
||||
assert_eq!(res.next(), Some(7));
|
||||
assert_eq!(res.next(), None);
|
||||
}
|
||||
Poll::Ready(Err(_)) => panic!("sender error"),
|
||||
Poll::Pending => panic!("the future has not completed"),
|
||||
};
|
||||
});
|
||||
}
|
||||
}
|
390
asynchronix/src/model/ports/broadcaster/task_set.rs
Normal file
390
asynchronix/src/model/ports/broadcaster/task_set.rs
Normal file
@ -0,0 +1,390 @@
|
||||
use std::sync::atomic::Ordering;
|
||||
use std::sync::Arc;
|
||||
|
||||
use diatomic_waker::WakeSource;
|
||||
use futures_task::{waker_ref, ArcWake, WakerRef};
|
||||
|
||||
use crate::loom_exports::sync::atomic::{AtomicU32, AtomicU64};
|
||||
|
||||
/// Special value for the `next` field of a task, indicating that the task to
|
||||
/// which this field belongs is not currently in the list of scheduled tasks.
|
||||
const SLEEPING: u32 = u32::MAX;
|
||||
/// Special value for a task index, indicating the absence of task.
|
||||
const EMPTY: u32 = u32::MAX - 1;
|
||||
/// Mask for the index of the task pointed to by the head of the list of
|
||||
/// scheduled tasks.
|
||||
const INDEX_MASK: u64 = u32::MAX as u64;
|
||||
/// Mask for the scheduling countdown in the head of the list of scheduled
|
||||
/// tasks.
|
||||
const COUNTDOWN_MASK: u64 = !INDEX_MASK;
|
||||
/// A single increment of the scheduling countdown in the head of the list of
|
||||
/// scheduled tasks.
|
||||
const COUNTDOWN_ONE: u64 = 1 << 32;
|
||||
|
||||
/// A set of tasks that may be scheduled cheaply and can be requested to wake a
|
||||
/// parent task only when a given amount of tasks have been scheduled.
|
||||
///
|
||||
/// This object maintains both a list of all active tasks and a list of the
|
||||
/// subset of active tasks currently scheduled. The latter is stored in a
|
||||
/// Treiber stack which links tasks through indices rather than pointers. Using
|
||||
/// indices has two advantages: (i) it enables a fully safe implementation and
|
||||
/// (ii) it makes it possible to use a single CAS to simultaneously move the
|
||||
/// head and decrement the outstanding amount of tasks to be scheduled before
|
||||
/// the parent task is notified.
|
||||
pub(super) struct TaskSet {
|
||||
/// Set of all active tasks, scheduled or not.
|
||||
///
|
||||
/// In some rare cases, the back of the vector can also contain inactive
|
||||
/// (retired) tasks.
|
||||
tasks: Vec<Arc<Task>>,
|
||||
/// Head of the Treiber stack for scheduled tasks.
|
||||
///
|
||||
/// The lower bits specify the index of the last scheduled task, if any,
|
||||
/// whereas the upper bits specify the countdown of tasks still to be
|
||||
/// scheduled before the parent task is notified.
|
||||
head: Arc<AtomicU64>,
|
||||
/// A notifier used to wake the parent task.
|
||||
notifier: WakeSource,
|
||||
/// Count of all active tasks, scheduled or not.
|
||||
task_count: usize,
|
||||
}
|
||||
|
||||
impl TaskSet {
|
||||
/// Creates an initially empty set of tasks associated to the parent task
|
||||
/// which notifier is provided.
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
pub(super) fn new(notifier: WakeSource) -> Self {
|
||||
// Only 32-bit targets and above are supported.
|
||||
assert!(usize::BITS >= u32::BITS);
|
||||
|
||||
Self {
|
||||
tasks: Vec::new(),
|
||||
head: Arc::new(AtomicU64::new(EMPTY as u64)),
|
||||
notifier,
|
||||
task_count: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Steals scheduled tasks if any and returns an iterator over their
|
||||
/// indices, otherwise returns `None` and requests a notification to be sent
|
||||
/// after `notify_count` tasks have been scheduled.
|
||||
///
|
||||
/// In all cases, the list of scheduled tasks is guaranteed to be empty
|
||||
/// after this call.
|
||||
///
|
||||
/// If some tasks were stolen, no notification is requested.
|
||||
///
|
||||
/// If no tasks were stolen, the notification is guaranteed to be triggered
|
||||
/// no later than after `notify_count` tasks have been scheduled, though it
|
||||
/// may in some cases be triggered earlier. If the specified `notify_count`
|
||||
/// is zero then no notification is requested.
|
||||
pub(super) fn steal_scheduled(&self, notify_count: usize) -> Option<TaskIterator<'_>> {
|
||||
let countdown = u32::try_from(notify_count).unwrap();
|
||||
|
||||
let mut head = self.head.load(Ordering::Relaxed);
|
||||
loop {
|
||||
let new_head = if head & INDEX_MASK == EMPTY as u64 {
|
||||
(countdown as u64 * COUNTDOWN_ONE) | EMPTY as u64
|
||||
} else {
|
||||
EMPTY as u64
|
||||
};
|
||||
|
||||
// Ordering: this Acquire operation synchronizes with all Release
|
||||
// operations in `Task::wake_by_ref` and ensures that all memory
|
||||
// operations performed during and before the tasks were scheduled
|
||||
// become visible.
|
||||
match self.head.compare_exchange_weak(
|
||||
head,
|
||||
new_head,
|
||||
Ordering::Acquire,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => break,
|
||||
Err(h) => head = h,
|
||||
}
|
||||
}
|
||||
|
||||
let index = (head & INDEX_MASK) as u32;
|
||||
if index == EMPTY {
|
||||
None
|
||||
} else {
|
||||
Some(TaskIterator {
|
||||
task_list: self,
|
||||
next_index: index,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Discards all scheduled tasks and cancels any request for notification
|
||||
/// that may be set.
|
||||
///
|
||||
/// This method is very cheap if there are no scheduled tasks and if no
|
||||
/// notification is currently requested.
|
||||
///
|
||||
/// All discarded tasks are put in the sleeping (unscheduled) state.
|
||||
pub(super) fn discard_scheduled(&self) {
|
||||
if self.head.load(Ordering::Relaxed) != EMPTY as u64 {
|
||||
// Dropping the iterator ensures that all tasks are put in the
|
||||
// sleeping state.
|
||||
let _ = self.steal_scheduled(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Modify the number of active tasks.
|
||||
///
|
||||
/// Note that this method may discard all scheduled tasks.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// This method will panic if `len` is greater than `u32::MAX - 1`.
|
||||
pub(super) fn resize(&mut self, len: usize) {
|
||||
assert!(len <= EMPTY as usize && len <= SLEEPING as usize);
|
||||
|
||||
self.task_count = len;
|
||||
|
||||
// Add new tasks if necessary.
|
||||
if len >= self.tasks.len() {
|
||||
while len > self.tasks.len() {
|
||||
let idx = self.tasks.len() as u32;
|
||||
|
||||
self.tasks.push(Arc::new(Task {
|
||||
idx,
|
||||
notifier: self.notifier.clone(),
|
||||
next: AtomicU32::new(SLEEPING),
|
||||
head: self.head.clone(),
|
||||
}));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to remove inactive tasks.
|
||||
//
|
||||
// The main issue when shrinking the set of active tasks is that stale
|
||||
// wakers may still be around and may at any moment be scheduled and
|
||||
// insert their index in the list of scheduled tasks. If it cannot be
|
||||
// guaranteed that this will not happen, then a reference to that task
|
||||
// must be kept or the iterator for scheduled tasks will panic when
|
||||
// indexing a stale task.
|
||||
//
|
||||
// To prevent an inactive task from being spuriously scheduled, it is
|
||||
// enough to pretend that the task is already scheduled by setting its
|
||||
// `next` field to anything else than `SLEEPING`. However, this could
|
||||
// race if the task has just set its `next` field but has not yet
|
||||
// updated the head of the list of scheduled tasks, so this can only be
|
||||
// done reliably if the task is currently sleeping.
|
||||
|
||||
// All scheduled tasks are first unscheduled in case some of them are
|
||||
// now inactive.
|
||||
self.discard_scheduled();
|
||||
|
||||
// The position of tasks in the set must stay consistent with their
|
||||
// associated index so tasks are popped from the back.
|
||||
while self.tasks.len() > len {
|
||||
// There is at least one task since `len()` was non-zero.
|
||||
let task = self.tasks.last().unwrap();
|
||||
|
||||
// Ordering: Relaxed ordering is sufficient since the task is
|
||||
// effectively discarded.
|
||||
if task
|
||||
.next
|
||||
.compare_exchange(SLEEPING, EMPTY, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_err()
|
||||
{
|
||||
// The task could not be removed for now so the set of tasks cannot
|
||||
// be shrunk further.
|
||||
break;
|
||||
}
|
||||
|
||||
self.tasks.pop();
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if one or more tasks are currently scheduled.
|
||||
pub(super) fn has_scheduled(&self) -> bool {
|
||||
// Ordering: the content of the head is only used as an advisory flag so
|
||||
// Relaxed ordering is sufficient.
|
||||
self.head.load(Ordering::Relaxed) & INDEX_MASK != EMPTY as u64
|
||||
}
|
||||
|
||||
/// Returns a reference to the waker associated to the active task with the
|
||||
/// specified index.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This method will panic if there is no active task with the provided
|
||||
/// index.
|
||||
pub(super) fn waker_of(&self, idx: usize) -> WakerRef {
|
||||
assert!(idx < self.task_count);
|
||||
|
||||
waker_ref(&self.tasks[idx])
|
||||
}
|
||||
}
|
||||
|
||||
/// An asynchronous task associated with the future of a sender.
|
||||
pub(super) struct Task {
|
||||
/// Index of this task.
|
||||
idx: u32,
|
||||
/// A notifier triggered once a certain number of tasks have been scheduled.
|
||||
notifier: WakeSource,
|
||||
/// Index of the next task in the list of scheduled tasks.
|
||||
next: AtomicU32,
|
||||
/// Head of the list of scheduled tasks.
|
||||
head: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
impl ArcWake for Task {
|
||||
fn wake(self: Arc<Self>) {
|
||||
Self::wake_by_ref(&self);
|
||||
}
|
||||
fn wake_by_ref(arc_self: &Arc<Self>) {
|
||||
let mut next = arc_self.next.load(Ordering::Relaxed);
|
||||
|
||||
let mut head = loop {
|
||||
if next == SLEEPING {
|
||||
// The task appears not to be scheduled yet: prepare its
|
||||
// insertion in the list of scheduled tasks by setting the next
|
||||
// task index to the index of the task currently pointed by the
|
||||
// head.
|
||||
//
|
||||
// Ordering: Relaxed ordering is sufficient since the upcoming
|
||||
// CAS on the head already ensure that all memory operations
|
||||
// that precede this call to `wake_by_ref` become visible when
|
||||
// the tasks are stolen.
|
||||
let head = arc_self.head.load(Ordering::Relaxed);
|
||||
match arc_self.next.compare_exchange_weak(
|
||||
SLEEPING,
|
||||
(head & INDEX_MASK) as u32,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => break head,
|
||||
Err(n) => next = n,
|
||||
}
|
||||
} else {
|
||||
// The task appears to be already scheduled: confirm this and
|
||||
// establish proper memory synchronization by performing a no-op
|
||||
// RMW.
|
||||
//
|
||||
// Ordering: the Release ordering synchronizes with the Acquire
|
||||
// swap operation in `TaskIterator::next` and ensures that all
|
||||
// memory operations that precede this call to `wake_by_ref`
|
||||
// will be visible when the task index is yielded.
|
||||
match arc_self.next.compare_exchange_weak(
|
||||
next,
|
||||
next,
|
||||
Ordering::Release,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => return,
|
||||
Err(n) => next = n,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// The index to the next task has been set to the index in the head.
|
||||
// Other concurrent calls to `wake` or `wake_by_ref` will now see the
|
||||
// task as scheduled so this thread is responsible for moving the head.
|
||||
loop {
|
||||
// Attempt a CAS which decrements the countdown if it is not already
|
||||
// cleared and which sets the head's index to this task's index.
|
||||
let countdown = head & COUNTDOWN_MASK;
|
||||
let new_countdown = countdown.wrapping_sub((countdown != 0) as u64 * COUNTDOWN_ONE);
|
||||
let new_head = new_countdown | arc_self.idx as u64;
|
||||
|
||||
// Ordering: this Release operation synchronizes with the Acquire
|
||||
// operation on the head in `TaskSet::steal_scheduled` and ensures
|
||||
// that the value of the `next` field as well as all memory
|
||||
// operations that precede this call to `wake_by_ref` become visible
|
||||
// when the tasks are stolen.
|
||||
match arc_self.head.compare_exchange_weak(
|
||||
head,
|
||||
new_head,
|
||||
Ordering::Release,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => {
|
||||
// If the countdown has just been cleared, it is necessary
|
||||
// to send a notification.
|
||||
if countdown == COUNTDOWN_ONE {
|
||||
arc_self.notifier.notify();
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
Err(h) => {
|
||||
head = h;
|
||||
|
||||
// Update the index of the next task to the new value of the
|
||||
// head.
|
||||
//
|
||||
// Why use a swap instead of a simple store? This is to
|
||||
// maintain a release sequence which includes previous
|
||||
// atomic operation on this field, and more specifically any
|
||||
// no-op CAS that could have been performed by a concurrent
|
||||
// call to wake. This ensures in turn that all memory
|
||||
// operations that precede a no-op CAS will be visible when
|
||||
// `next` is Acquired in `TaskIterator::next`.
|
||||
//
|
||||
// Ordering: Relaxed ordering is sufficient since
|
||||
// synchronization is ensured by the upcoming CAS on the
|
||||
// head.
|
||||
arc_self
|
||||
.next
|
||||
.swap((head & INDEX_MASK) as u32, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An iterator over scheduled tasks.
|
||||
pub(super) struct TaskIterator<'a> {
|
||||
task_list: &'a TaskSet,
|
||||
next_index: u32,
|
||||
}
|
||||
|
||||
impl<'a> Iterator for TaskIterator<'a> {
|
||||
type Item = usize;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
while self.next_index != EMPTY {
|
||||
let index = self.next_index as usize;
|
||||
|
||||
// Ordering: the Acquire ordering synchronizes with any no-op CAS
|
||||
// that could have been performed in `Task::wake_by_ref`, ensuring
|
||||
// that all memory operations that precede such call to
|
||||
// `Task::wake_by_ref` become visible.
|
||||
self.next_index = self.task_list.tasks[index]
|
||||
.next
|
||||
.swap(SLEEPING, Ordering::Acquire);
|
||||
|
||||
// Only yield the index if the task is indeed active.
|
||||
if index < self.task_list.task_count {
|
||||
return Some(index);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for TaskIterator<'a> {
|
||||
fn drop(&mut self) {
|
||||
// Put all remaining scheduled tasks in the sleeping state.
|
||||
//
|
||||
// Ordering: the task is ignored so it is not necessary to ensure that
|
||||
// memory operations performed before the task was scheduled are
|
||||
// visible. For the same reason, it is not necessary to synchronize with
|
||||
// no-op CAS operations in `Task::wake_by_ref`, which is why separate
|
||||
// load and store operations are used rather than a more expensive swap
|
||||
// operation.
|
||||
while self.next_index != EMPTY {
|
||||
let index = self.next_index as usize;
|
||||
self.next_index = self.task_list.tasks[index].next.load(Ordering::Relaxed);
|
||||
self.task_list.tasks[index]
|
||||
.next
|
||||
.store(SLEEPING, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
245
asynchronix/src/model/ports/sender.rs
Normal file
245
asynchronix/src/model/ports/sender.rs
Normal file
@ -0,0 +1,245 @@
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem::ManuallyDrop;
|
||||
use std::pin::Pin;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use recycle_box::{coerce_box, RecycleBox};
|
||||
|
||||
use crate::channel;
|
||||
use crate::model::{InputFn, Model, ReplierFn};
|
||||
use crate::util::spsc_queue;
|
||||
|
||||
/// Abstraction over `EventSender` and `QuerySender`.
|
||||
pub(super) trait Sender<T, R>: Send {
|
||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>>;
|
||||
}
|
||||
|
||||
/// An object that can send a payload to a model.
|
||||
pub(super) struct EventSender<M: 'static, F, T, S> {
|
||||
func: F,
|
||||
sender: channel::Sender<M>,
|
||||
fut_storage: Option<RecycleBox<()>>,
|
||||
_phantom_closure: PhantomData<fn(&mut M, T)>,
|
||||
_phantom_closure_marker: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<M: Send, F, T, S> EventSender<M, F, T, S>
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> InputFn<'a, M, T, S>,
|
||||
T: Send + 'static,
|
||||
{
|
||||
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
|
||||
Self {
|
||||
func,
|
||||
sender,
|
||||
fut_storage: None,
|
||||
_phantom_closure: PhantomData,
|
||||
_phantom_closure_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: Send, F, T, S> Sender<T, ()> for EventSender<M, F, T, S>
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> InputFn<'a, M, T, S> + Copy,
|
||||
T: Send + 'static,
|
||||
S: Send,
|
||||
{
|
||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
|
||||
let func = self.func;
|
||||
|
||||
let fut = self.sender.send(move |model, scheduler, recycle_box| {
|
||||
let fut = func.call(model, arg, scheduler);
|
||||
|
||||
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||
});
|
||||
|
||||
RecycledFuture::new(&mut self.fut_storage, async move {
|
||||
fut.await.map_err(|_| SendError {})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// An object that can send a payload to a model and retrieve a response.
|
||||
pub(super) struct QuerySender<M: 'static, F, T, R, S> {
|
||||
func: F,
|
||||
sender: channel::Sender<M>,
|
||||
receiver: multishot::Receiver<R>,
|
||||
fut_storage: Option<RecycleBox<()>>,
|
||||
_phantom_closure: PhantomData<fn(&mut M, T) -> R>,
|
||||
_phantom_closure_marker: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<M, F, T, R, S> QuerySender<M, F, T, R, S>
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> ReplierFn<'a, M, T, R, S>,
|
||||
T: Send + 'static,
|
||||
R: Send + 'static,
|
||||
{
|
||||
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
|
||||
Self {
|
||||
func,
|
||||
sender,
|
||||
receiver: multishot::Receiver::new(),
|
||||
fut_storage: None,
|
||||
_phantom_closure: PhantomData,
|
||||
_phantom_closure_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<M, F, T, R, S> Sender<T, R> for QuerySender<M, F, T, R, S>
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> ReplierFn<'a, M, T, R, S> + Copy,
|
||||
T: Send + 'static,
|
||||
R: Send + 'static,
|
||||
S: Send,
|
||||
{
|
||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>> {
|
||||
let func = self.func;
|
||||
let sender = &mut self.sender;
|
||||
let reply_receiver = &mut self.receiver;
|
||||
let fut_storage = &mut self.fut_storage;
|
||||
|
||||
// The previous future generated by this method should have been polled
|
||||
// to completion so a new sender should be readily available.
|
||||
let reply_sender = reply_receiver.sender().unwrap();
|
||||
|
||||
let send_fut = sender.send(move |model, scheduler, recycle_box| {
|
||||
let fut = async move {
|
||||
let reply = func.call(model, arg, scheduler).await;
|
||||
reply_sender.send(reply);
|
||||
};
|
||||
|
||||
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||
});
|
||||
|
||||
RecycledFuture::new(fut_storage, async move {
|
||||
// Send the message.
|
||||
send_fut.await.map_err(|_| SendError {})?;
|
||||
|
||||
// Wait until the message is processed and the reply is sent back.
|
||||
// If an error is received, it most likely means the mailbox was
|
||||
// dropped before the message was processed.
|
||||
reply_receiver.recv().await.map_err(|_| SendError {})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// An object that can send a payload to an unbounded queue.
|
||||
pub(super) struct EventStreamSender<T> {
|
||||
producer: spsc_queue::Producer<T>,
|
||||
fut_storage: Option<RecycleBox<()>>,
|
||||
}
|
||||
|
||||
impl<T> EventStreamSender<T> {
|
||||
pub(super) fn new(producer: spsc_queue::Producer<T>) -> Self {
|
||||
Self {
|
||||
producer,
|
||||
fut_storage: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Sender<T, ()> for EventStreamSender<T>
|
||||
where
|
||||
T: Send + 'static,
|
||||
{
|
||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
|
||||
let producer = &mut self.producer;
|
||||
|
||||
RecycledFuture::new(&mut self.fut_storage, async move {
|
||||
producer.push(arg).map_err(|_| SendError {})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// An object that can send a payload to a mutex-protected slot.
|
||||
pub(super) struct EventSlotSender<T> {
|
||||
slot: Arc<Mutex<Option<T>>>,
|
||||
fut_storage: Option<RecycleBox<()>>,
|
||||
}
|
||||
|
||||
impl<T> EventSlotSender<T> {
|
||||
pub(super) fn new(slot: Arc<Mutex<Option<T>>>) -> Self {
|
||||
Self {
|
||||
slot,
|
||||
fut_storage: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Sender<T, ()> for EventSlotSender<T>
|
||||
where
|
||||
T: Send + 'static,
|
||||
{
|
||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
|
||||
let slot = &*self.slot;
|
||||
|
||||
RecycledFuture::new(&mut self.fut_storage, async move {
|
||||
let mut slot = slot.lock().unwrap();
|
||||
*slot = Some(arg);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
/// Error returned when the mailbox was closed or dropped.
|
||||
pub(super) struct SendError {}
|
||||
|
||||
impl fmt::Display for SendError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "sending message into a closed mailbox")
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for SendError {}
|
||||
|
||||
pub(super) struct RecycledFuture<'a, T> {
|
||||
fut: ManuallyDrop<Pin<RecycleBox<dyn Future<Output = T> + Send + 'a>>>,
|
||||
lender_box: &'a mut Option<RecycleBox<()>>,
|
||||
}
|
||||
impl<'a, T> RecycledFuture<'a, T> {
|
||||
pub(super) fn new<F: Future<Output = T> + Send + 'a>(
|
||||
lender_box: &'a mut Option<RecycleBox<()>>,
|
||||
fut: F,
|
||||
) -> Self {
|
||||
let vacated_box = lender_box.take().unwrap_or_else(|| RecycleBox::new(()));
|
||||
let fut: RecycleBox<dyn Future<Output = T> + Send + 'a> =
|
||||
coerce_box!(RecycleBox::recycle(vacated_box, fut));
|
||||
|
||||
Self {
|
||||
fut: ManuallyDrop::new(RecycleBox::into_pin(fut)),
|
||||
lender_box,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Drop for RecycledFuture<'a, T> {
|
||||
fn drop(&mut self) {
|
||||
// Return the box to the lender.
|
||||
//
|
||||
// Safety: taking the `fut` member is safe since it is never used again.
|
||||
*self.lender_box = Some(RecycleBox::vacate_pinned(unsafe {
|
||||
ManuallyDrop::take(&mut self.fut)
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T> Future for RecycledFuture<'a, T> {
|
||||
type Output = T;
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
self.fut.as_mut().poll(cx)
|
||||
}
|
||||
}
|
@ -1,3 +0,0 @@
|
||||
//! The asynchronix executor and supporting runtime.
|
||||
|
||||
pub(crate) mod executor;
|
@ -1,586 +0,0 @@
|
||||
use std::fmt;
|
||||
use std::iter::FusedIterator;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem::{drop, MaybeUninit};
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use std::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release};
|
||||
use std::sync::Arc;
|
||||
|
||||
use cache_padded::CachePadded;
|
||||
|
||||
use crate::loom_exports::cell::UnsafeCell;
|
||||
use crate::loom_exports::sync::atomic::{AtomicU32, AtomicU64};
|
||||
use crate::loom_exports::{debug_or_loom_assert, debug_or_loom_assert_eq};
|
||||
|
||||
pub(super) use buffers::*;
|
||||
|
||||
mod buffers;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// A double-ended FIFO work-stealing queue.
|
||||
///
|
||||
/// The general operation of the queue is based on tokio's worker queue, itself
|
||||
/// based on the Go scheduler's worker queue.
|
||||
///
|
||||
/// The queue tracks its tail and head position within a ring buffer with
|
||||
/// wrap-around integers, where the least significant bits specify the actual
|
||||
/// buffer index. All positions have bit widths that are intentionally larger
|
||||
/// than necessary for buffer indexing because:
|
||||
/// - an extra bit is needed to disambiguate between empty and full buffers when
|
||||
/// the start and end position of the buffer are equal,
|
||||
/// - the worker head is also used as long-cycle counter to mitigate the risk of
|
||||
/// ABA.
|
||||
///
|
||||
#[derive(Debug)]
|
||||
struct Queue<T, B: Buffer<T>> {
|
||||
/// Positions of the head as seen by the worker (most significant bits) and
|
||||
/// as seen by a stealer (least significant bits).
|
||||
heads: CachePadded<AtomicU64>,
|
||||
|
||||
/// Position of the tail.
|
||||
tail: CachePadded<AtomicU32>,
|
||||
|
||||
/// Queue items.
|
||||
buffer: Box<B::Data>,
|
||||
|
||||
/// Make the type !Send and !Sync by default.
|
||||
_phantom: PhantomData<UnsafeCell<T>>,
|
||||
}
|
||||
|
||||
impl<T, B: Buffer<T>> Queue<T, B> {
|
||||
/// Read an item at the given position.
|
||||
///
|
||||
/// The position is automatically mapped to a valid buffer index using a
|
||||
/// modulo operation.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The item at the given position must have been initialized before and
|
||||
/// cannot have been moved out.
|
||||
///
|
||||
/// The caller must guarantee that the item at this position cannot be
|
||||
/// written to or moved out concurrently.
|
||||
#[inline]
|
||||
unsafe fn read_at(&self, position: u32) -> T {
|
||||
let index = (position & B::MASK) as usize;
|
||||
(*self.buffer).as_ref()[index].with(|slot| slot.read().assume_init())
|
||||
}
|
||||
|
||||
/// Write an item at the given position.
|
||||
///
|
||||
/// The position is automatically mapped to a valid buffer index using a
|
||||
/// modulo operation.
|
||||
///
|
||||
/// # Note
|
||||
///
|
||||
/// If an item is already initialized but was not moved out yet, it will be
|
||||
/// leaked.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The caller must guarantee that the item at this position cannot be read
|
||||
/// or written to concurrently.
|
||||
#[inline]
|
||||
unsafe fn write_at(&self, position: u32, item: T) {
|
||||
let index = (position & B::MASK) as usize;
|
||||
(*self.buffer).as_ref()[index].with_mut(|slot| slot.write(MaybeUninit::new(item)));
|
||||
}
|
||||
|
||||
/// Attempt to book `N` items for stealing where `N` is specified by a
|
||||
/// closure which takes as argument the total count of available items.
|
||||
///
|
||||
/// In case of success, the returned tuple contains the stealer head and an
|
||||
/// item count at least equal to 1, in this order.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// An error is returned in the following cases:
|
||||
/// 1) no item could be stolen, either because the queue is empty or because
|
||||
/// `N` is 0,
|
||||
/// 2) a concurrent stealing operation is ongoing.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// This function is not strictly unsafe, but because it initiates the
|
||||
/// stealing operation by modifying the post-stealing head in
|
||||
/// `push_count_and_head` without ever updating the `head` atomic variable,
|
||||
/// its misuse can result in permanently blocking subsequent stealing
|
||||
/// operations.
|
||||
fn book_items<C>(&self, mut count_fn: C, max_count: u32) -> Result<(u32, u32), StealError>
|
||||
where
|
||||
C: FnMut(usize) -> usize,
|
||||
{
|
||||
let mut heads = self.heads.load(Acquire);
|
||||
|
||||
loop {
|
||||
let (worker_head, stealer_head) = unpack_heads(heads);
|
||||
|
||||
// Bail out if both heads differ because it means another stealing
|
||||
// operation is concurrently ongoing.
|
||||
if stealer_head != worker_head {
|
||||
return Err(StealError::Busy);
|
||||
}
|
||||
|
||||
let tail = self.tail.load(Acquire);
|
||||
let item_count = tail.wrapping_sub(worker_head);
|
||||
|
||||
// `item_count` is tested now because `count_fn` may expect
|
||||
// `item_count>0`.
|
||||
if item_count == 0 {
|
||||
return Err(StealError::Empty);
|
||||
}
|
||||
|
||||
// Unwind safety: it is OK if `count_fn` panics because no state has
|
||||
// been modified yet.
|
||||
let count =
|
||||
(count_fn(item_count as usize).min(max_count as usize) as u32).min(item_count);
|
||||
|
||||
// The special case `count_fn() == 0` must be tested specifically,
|
||||
// because if the compare-exchange succeeds with `count=0`, the new
|
||||
// worker head will be the same as the old one so other stealers
|
||||
// will not detect that stealing is currently ongoing and may try to
|
||||
// actually steal items and concurrently modify the position of the
|
||||
// heads.
|
||||
if count == 0 {
|
||||
return Err(StealError::Empty);
|
||||
}
|
||||
|
||||
// Move the worker head only.
|
||||
let new_heads = pack_heads(worker_head.wrapping_add(count), stealer_head);
|
||||
|
||||
// Attempt to book the slots. Only one stealer can succeed since
|
||||
// once this atomic is changed, the other thread will necessarily
|
||||
// observe a mismatch between the two heads.
|
||||
match self
|
||||
.heads
|
||||
.compare_exchange_weak(heads, new_heads, Acquire, Acquire)
|
||||
{
|
||||
Ok(_) => return Ok((stealer_head, count)),
|
||||
// We lost the race to a concurrent pop or steal operation, or
|
||||
// the CAS failed spuriously; try again.
|
||||
Err(h) => heads = h,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, B: Buffer<T>> Drop for Queue<T, B> {
|
||||
fn drop(&mut self) {
|
||||
let worker_head = unpack_heads(self.heads.load(Relaxed)).0;
|
||||
let tail = self.tail.load(Relaxed);
|
||||
|
||||
let count = tail.wrapping_sub(worker_head);
|
||||
|
||||
for offset in 0..count {
|
||||
drop(unsafe { self.read_at(worker_head.wrapping_add(offset)) })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle for single-threaded FIFO push and pop operations.
|
||||
#[derive(Debug)]
|
||||
pub(super) struct Worker<T, B: Buffer<T>> {
|
||||
queue: Arc<Queue<T, B>>,
|
||||
}
|
||||
|
||||
impl<T, B: Buffer<T>> Worker<T, B> {
|
||||
/// Creates a new queue and returns a `Worker` handle.
|
||||
pub(super) fn new() -> Self {
|
||||
let queue = Arc::new(Queue {
|
||||
heads: CachePadded::new(AtomicU64::new(0)),
|
||||
tail: CachePadded::new(AtomicU32::new(0)),
|
||||
buffer: B::allocate(),
|
||||
_phantom: PhantomData,
|
||||
});
|
||||
|
||||
Worker { queue }
|
||||
}
|
||||
|
||||
/// Creates a new `Stealer` handle associated to this `Worker`.
|
||||
///
|
||||
/// An arbitrary number of `Stealer` handles can be created, either using
|
||||
/// this method or cloning an existing `Stealer` handle.
|
||||
pub(super) fn stealer(&self) -> Stealer<T, B> {
|
||||
Stealer {
|
||||
queue: self.queue.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of items that can be successfully pushed onto the
|
||||
/// queue.
|
||||
///
|
||||
/// Note that that the spare capacity may be underestimated due to
|
||||
/// concurrent stealing operations.
|
||||
pub(super) fn spare_capacity(&self) -> usize {
|
||||
let capacity = <B as Buffer<T>>::CAPACITY;
|
||||
let stealer_head = unpack_heads(self.queue.heads.load(Relaxed)).1;
|
||||
let tail = self.queue.tail.load(Relaxed);
|
||||
|
||||
// Aggregate count of available items (those which can be popped) and of
|
||||
// items currently being stolen.
|
||||
let len = tail.wrapping_sub(stealer_head);
|
||||
|
||||
(capacity - len) as usize
|
||||
}
|
||||
|
||||
/// Attempts to push one item at the tail of the queue.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This will fail if the queue is full, in which case the item is returned
|
||||
/// as the error field.
|
||||
pub(super) fn push(&self, item: T) -> Result<(), T> {
|
||||
let stealer_head = unpack_heads(self.queue.heads.load(Acquire)).1;
|
||||
let tail = self.queue.tail.load(Relaxed);
|
||||
|
||||
// Check that the buffer is not full.
|
||||
if tail.wrapping_sub(stealer_head) >= B::CAPACITY {
|
||||
return Err(item);
|
||||
}
|
||||
|
||||
// Store the item.
|
||||
unsafe { self.queue.write_at(tail, item) };
|
||||
|
||||
// Make the item visible by moving the tail.
|
||||
//
|
||||
// Ordering: the Release ordering ensures that the subsequent
|
||||
// acquisition of this atomic by a stealer will make the previous write
|
||||
// visible.
|
||||
self.queue.tail.store(tail.wrapping_add(1), Release);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Attempts to push the content of an iterator at the tail of the queue.
|
||||
///
|
||||
/// It is the responsibility of the caller to ensure that there is enough
|
||||
/// spare capacity to accommodate all iterator items, for instance by
|
||||
/// calling `[Worker::spare_capacity]` beforehand. Otherwise, the iterator
|
||||
/// is dropped while still holding the items in excess.
|
||||
pub(super) fn extend<I: IntoIterator<Item = T>>(&self, iter: I) {
|
||||
let stealer_head = unpack_heads(self.queue.heads.load(Acquire)).1;
|
||||
let mut tail = self.queue.tail.load(Relaxed);
|
||||
|
||||
let max_tail = stealer_head.wrapping_add(B::CAPACITY);
|
||||
for item in iter {
|
||||
// Check whether the buffer is full.
|
||||
if tail == max_tail {
|
||||
break;
|
||||
}
|
||||
// Store the item.
|
||||
unsafe { self.queue.write_at(tail, item) };
|
||||
tail = tail.wrapping_add(1);
|
||||
}
|
||||
|
||||
// Make the items visible by incrementing the push count.
|
||||
//
|
||||
// Ordering: the Release ordering ensures that the subsequent
|
||||
// acquisition of this atomic by a stealer will make the previous write
|
||||
// visible.
|
||||
self.queue.tail.store(tail, Release);
|
||||
}
|
||||
|
||||
/// Attempts to pop one item from the head of the queue.
|
||||
///
|
||||
/// This returns None if the queue is empty.
|
||||
pub(super) fn pop(&self) -> Option<T> {
|
||||
let mut heads = self.queue.heads.load(Acquire);
|
||||
|
||||
let prev_worker_head = loop {
|
||||
let (worker_head, stealer_head) = unpack_heads(heads);
|
||||
let tail = self.queue.tail.load(Relaxed);
|
||||
|
||||
// Check if the queue is empty.
|
||||
if tail == worker_head {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Move the worker head. The weird cast from `bool` to `u32` is to
|
||||
// steer the compiler towards branchless code.
|
||||
let next_heads = pack_heads(
|
||||
worker_head.wrapping_add(1),
|
||||
stealer_head.wrapping_add((stealer_head == worker_head) as u32),
|
||||
);
|
||||
|
||||
// Attempt to book the items.
|
||||
let res = self
|
||||
.queue
|
||||
.heads
|
||||
.compare_exchange_weak(heads, next_heads, AcqRel, Acquire);
|
||||
|
||||
match res {
|
||||
Ok(_) => break worker_head,
|
||||
// We lost the race to a stealer or the CAS failed spuriously; try again.
|
||||
Err(h) => heads = h,
|
||||
}
|
||||
};
|
||||
|
||||
unsafe { Some(self.queue.read_at(prev_worker_head)) }
|
||||
}
|
||||
|
||||
/// Returns an iterator that steals items from the head of the queue.
|
||||
///
|
||||
/// The returned iterator steals up to `N` items, where `N` is specified by
|
||||
/// a closure which takes as argument the total count of items available for
|
||||
/// stealing. Upon success, the number of items ultimately stolen can be
|
||||
/// from 1 to `N`, depending on the number of available items.
|
||||
///
|
||||
/// # Beware
|
||||
///
|
||||
/// All items stolen by the iterator should be moved out as soon as
|
||||
/// possible, because until then or until the iterator is dropped, all
|
||||
/// concurrent stealing operations will fail with [`StealError::Busy`].
|
||||
///
|
||||
/// # Leaking
|
||||
///
|
||||
/// If the iterator is leaked before all stolen items have been moved out,
|
||||
/// subsequent stealing operations will permanently fail with
|
||||
/// [`StealError::Busy`].
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// An error is returned in the following cases:
|
||||
/// 1) no item was stolen, either because the queue is empty or `N` is 0,
|
||||
/// 2) a concurrent stealing operation is ongoing.
|
||||
pub(super) fn drain<C>(&self, count_fn: C) -> Result<Drain<'_, T, B>, StealError>
|
||||
where
|
||||
C: FnMut(usize) -> usize,
|
||||
{
|
||||
let (head, count) = self.queue.book_items(count_fn, u32::MAX)?;
|
||||
|
||||
Ok(Drain {
|
||||
queue: &self.queue,
|
||||
head,
|
||||
from_head: head,
|
||||
to_head: head.wrapping_add(count),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, B: Buffer<T>> Default for Worker<T, B> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, B: Buffer<T>> UnwindSafe for Worker<T, B> {}
|
||||
impl<T, B: Buffer<T>> RefUnwindSafe for Worker<T, B> {}
|
||||
unsafe impl<T: Send, B: Buffer<T>> Send for Worker<T, B> {}
|
||||
|
||||
/// A draining iterator for [`Worker<T, B>`].
|
||||
///
|
||||
/// This iterator is created by [`Worker::drain`]. See its documentation for
|
||||
/// more.
|
||||
#[derive(Debug)]
|
||||
pub(super) struct Drain<'a, T, B: Buffer<T>> {
|
||||
queue: &'a Queue<T, B>,
|
||||
head: u32,
|
||||
from_head: u32,
|
||||
to_head: u32,
|
||||
}
|
||||
|
||||
impl<'a, T, B: Buffer<T>> Iterator for Drain<'a, T, B> {
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<T> {
|
||||
if self.head == self.to_head {
|
||||
return None;
|
||||
}
|
||||
|
||||
let item = Some(unsafe { self.queue.read_at(self.head) });
|
||||
|
||||
self.head = self.head.wrapping_add(1);
|
||||
|
||||
// We cannot rely on the caller to call `next` again after the last item
|
||||
// is yielded so the heads must be updated immediately when yielding the
|
||||
// last item.
|
||||
if self.head == self.to_head {
|
||||
// Signal that the stealing operation has completed.
|
||||
let mut heads = self.queue.heads.load(Relaxed);
|
||||
loop {
|
||||
let (worker_head, stealer_head) = unpack_heads(heads);
|
||||
|
||||
debug_or_loom_assert_eq!(stealer_head, self.from_head);
|
||||
|
||||
let res = self.queue.heads.compare_exchange_weak(
|
||||
heads,
|
||||
pack_heads(worker_head, worker_head),
|
||||
AcqRel,
|
||||
Acquire,
|
||||
);
|
||||
|
||||
match res {
|
||||
Ok(_) => break,
|
||||
Err(h) => {
|
||||
heads = h;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
item
|
||||
}
|
||||
|
||||
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
let sz = self.to_head.wrapping_sub(self.head) as usize;
|
||||
|
||||
(sz, Some(sz))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, B: Buffer<T>> ExactSizeIterator for Drain<'a, T, B> {}
|
||||
|
||||
impl<'a, T, B: Buffer<T>> FusedIterator for Drain<'a, T, B> {}
|
||||
|
||||
impl<'a, T, B: Buffer<T>> Drop for Drain<'a, T, B> {
|
||||
fn drop(&mut self) {
|
||||
// Drop all items and make sure the head is updated so that subsequent
|
||||
// stealing operations can succeed.
|
||||
for _item in self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, B: Buffer<T>> UnwindSafe for Drain<'a, T, B> {}
|
||||
impl<'a, T, B: Buffer<T>> RefUnwindSafe for Drain<'a, T, B> {}
|
||||
unsafe impl<'a, T: Send, B: Buffer<T>> Send for Drain<'a, T, B> {}
|
||||
unsafe impl<'a, T: Send, B: Buffer<T>> Sync for Drain<'a, T, B> {}
|
||||
|
||||
/// Handle for multi-threaded stealing operations.
|
||||
#[derive(Debug)]
|
||||
pub(super) struct Stealer<T, B: Buffer<T>> {
|
||||
queue: Arc<Queue<T, B>>,
|
||||
}
|
||||
|
||||
impl<T, B: Buffer<T>> Stealer<T, B> {
|
||||
/// Attempts to steal items from the head of the queue, returning one of
|
||||
/// them directly and moving the others to the tail of another queue.
|
||||
///
|
||||
/// Up to `N` items are stolen (including the one returned directly), where
|
||||
/// `N` is specified by a closure which takes as argument the total count of
|
||||
/// items available for stealing. Upon success, one item is returned and
|
||||
/// from 0 to `N-1` items are moved to the destination queue, depending on
|
||||
/// the number of available items and the capacity of the destination queue.
|
||||
///
|
||||
/// The returned item is the most recent one among the stolen items.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// An error is returned in the following cases:
|
||||
/// 1) no item was stolen, either because the queue is empty or `N` is 0,
|
||||
/// 2) a concurrent stealing operation is ongoing.
|
||||
///
|
||||
/// Failure to transfer any item to the destination queue is not considered
|
||||
/// an error as long as one element could be returned directly. This can
|
||||
/// occur if the destination queue is full, if the source queue has only one
|
||||
/// item or if `N` is 1.
|
||||
pub(super) fn steal_and_pop<C, BDest>(
|
||||
&self,
|
||||
dest: &Worker<T, BDest>,
|
||||
count_fn: C,
|
||||
) -> Result<T, StealError>
|
||||
where
|
||||
C: FnMut(usize) -> usize,
|
||||
BDest: Buffer<T>,
|
||||
{
|
||||
// Compute the free capacity of the destination queue.
|
||||
//
|
||||
// Ordering: see `Worker::push()` method.
|
||||
let dest_tail = dest.queue.tail.load(Relaxed);
|
||||
let dest_stealer_head = unpack_heads(dest.queue.heads.load(Acquire)).1;
|
||||
let dest_free_capacity = BDest::CAPACITY - dest_tail.wrapping_sub(dest_stealer_head);
|
||||
|
||||
debug_or_loom_assert!(dest_free_capacity <= BDest::CAPACITY);
|
||||
|
||||
let (stealer_head, count) = self.queue.book_items(count_fn, dest_free_capacity + 1)?;
|
||||
let transfer_count = count - 1;
|
||||
|
||||
debug_or_loom_assert!(transfer_count <= dest_free_capacity);
|
||||
|
||||
// Move all items but the last to the destination queue.
|
||||
for offset in 0..transfer_count {
|
||||
unsafe {
|
||||
let item = self.queue.read_at(stealer_head.wrapping_add(offset));
|
||||
dest.queue.write_at(dest_tail.wrapping_add(offset), item);
|
||||
}
|
||||
}
|
||||
|
||||
// Read the last item.
|
||||
let last_item = unsafe {
|
||||
self.queue
|
||||
.read_at(stealer_head.wrapping_add(transfer_count))
|
||||
};
|
||||
|
||||
// Make the moved items visible by updating the destination tail position.
|
||||
//
|
||||
// Ordering: see comments in the `push()` method.
|
||||
dest.queue
|
||||
.tail
|
||||
.store(dest_tail.wrapping_add(transfer_count), Release);
|
||||
|
||||
// Signal that the stealing operation has completed.
|
||||
let mut heads = self.queue.heads.load(Relaxed);
|
||||
loop {
|
||||
let (worker_head, sh) = unpack_heads(heads);
|
||||
|
||||
debug_or_loom_assert_eq!(stealer_head, sh);
|
||||
|
||||
let res = self.queue.heads.compare_exchange_weak(
|
||||
heads,
|
||||
pack_heads(worker_head, worker_head),
|
||||
AcqRel,
|
||||
Acquire,
|
||||
);
|
||||
|
||||
match res {
|
||||
Ok(_) => return Ok(last_item),
|
||||
Err(h) => {
|
||||
heads = h;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, B: Buffer<T>> Clone for Stealer<T, B> {
|
||||
fn clone(&self) -> Self {
|
||||
Stealer {
|
||||
queue: self.queue.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, B: Buffer<T>> UnwindSafe for Stealer<T, B> {}
|
||||
impl<T, B: Buffer<T>> RefUnwindSafe for Stealer<T, B> {}
|
||||
unsafe impl<T: Send, B: Buffer<T>> Send for Stealer<T, B> {}
|
||||
unsafe impl<T: Send, B: Buffer<T>> Sync for Stealer<T, B> {}
|
||||
|
||||
/// Error returned when stealing is unsuccessful.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub(super) enum StealError {
|
||||
/// No item was stolen.
|
||||
Empty,
|
||||
/// Another concurrent stealing operation is ongoing.
|
||||
Busy,
|
||||
}
|
||||
|
||||
impl fmt::Display for StealError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
StealError::Empty => write!(f, "cannot steal from empty queue"),
|
||||
StealError::Busy => write!(f, "a concurrent steal operation is ongoing"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
/// Extract the worker head and stealer head (in this order) from packed heads.
|
||||
fn unpack_heads(heads: u64) -> (u32, u32) {
|
||||
((heads >> u32::BITS) as u32, heads as u32)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
/// Insert a new stealer head into packed heads.
|
||||
fn pack_heads(worker_head: u32, stealer_head: u32) -> u64 {
|
||||
((worker_head as u64) << u32::BITS) | stealer_head as u64
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
//! Internal queue buffers of various sizes.
|
||||
|
||||
use std::fmt::Debug;
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
use crate::loom_exports::cell::UnsafeCell;
|
||||
|
||||
/// Marker trait for fixed-size buffers.
|
||||
pub(crate) trait Buffer<T>: private::Sealed {
|
||||
/// Buffer size.
|
||||
const CAPACITY: u32;
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Buffer index bit mask.
|
||||
const MASK: u32;
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Buffer data type.
|
||||
type Data: AsRef<[UnsafeCell<MaybeUninit<T>>]> + Debug;
|
||||
|
||||
#[doc(hidden)]
|
||||
/// Returns an uninitialized buffer.
|
||||
fn allocate() -> Box<Self::Data>;
|
||||
}
|
||||
|
||||
macro_rules! make_buffer {
|
||||
($b:ident, $cap:expr) => {
|
||||
#[doc = concat!("Marker type for buffers of capacity ", $cap, ".")]
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub(crate) struct $b {}
|
||||
|
||||
impl private::Sealed for $b {}
|
||||
|
||||
impl<T> Buffer<T> for $b {
|
||||
const CAPACITY: u32 = $cap;
|
||||
|
||||
#[doc(hidden)]
|
||||
const MASK: u32 = $cap - 1;
|
||||
|
||||
#[doc(hidden)]
|
||||
type Data = [UnsafeCell<MaybeUninit<T>>; $cap];
|
||||
|
||||
#[doc(hidden)]
|
||||
#[cfg(not(asynchronix_loom))]
|
||||
fn allocate() -> Box<Self::Data> {
|
||||
// Safety: initializing an array of `MaybeUninit` items with
|
||||
// `assume_init()` is valid, as per the `MaybeUninit` documentation.
|
||||
// Admittedly the situation is slightly different here: the buffer is
|
||||
// made of `MaybeUninit` elements wrapped in `UnsafeCell`s; however, the
|
||||
// latter is a `repr(transparent)` type with a trivial constructor, so
|
||||
// this should not make any difference.
|
||||
Box::new(unsafe { MaybeUninit::uninit().assume_init() })
|
||||
}
|
||||
#[doc(hidden)]
|
||||
#[cfg(asynchronix_loom)]
|
||||
fn allocate() -> Box<Self::Data> {
|
||||
// Loom's `UnsafeCell` is not `repr(transparent)` and does not
|
||||
// have a trivial constructor so initialization must be done
|
||||
// element-wise.
|
||||
fn make_fixed_size<T>(buffer: Box<[T]>) -> Box<[T; $cap]> {
|
||||
assert_eq!(buffer.len(), $cap);
|
||||
|
||||
// Safety: The length was checked.
|
||||
unsafe { Box::from_raw(Box::into_raw(buffer).cast()) }
|
||||
}
|
||||
|
||||
let mut buffer = Vec::with_capacity($cap);
|
||||
for _ in 0..$cap {
|
||||
buffer.push(UnsafeCell::new(MaybeUninit::uninit()));
|
||||
}
|
||||
|
||||
make_fixed_size(buffer.into_boxed_slice())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Define buffer capacities up to 2^15, which is the maximum that can be
|
||||
// supported with 16-bit wide buffer positions (1 bit is required for
|
||||
// disambiguation between full and empty buffer).
|
||||
make_buffer!(B2, 2);
|
||||
make_buffer!(B4, 4);
|
||||
make_buffer!(B8, 8);
|
||||
make_buffer!(B16, 16);
|
||||
make_buffer!(B32, 32);
|
||||
make_buffer!(B64, 64);
|
||||
make_buffer!(B128, 128);
|
||||
make_buffer!(B256, 256);
|
||||
make_buffer!(B512, 512);
|
||||
make_buffer!(B1024, 1024);
|
||||
make_buffer!(B2048, 2048);
|
||||
make_buffer!(B4096, 4096);
|
||||
make_buffer!(B8192, 8192);
|
||||
make_buffer!(B16384, 12384);
|
||||
make_buffer!(B32768, 32768);
|
||||
|
||||
/// Prevent public implementation of Buffer.
|
||||
mod private {
|
||||
pub(crate) trait Sealed {}
|
||||
}
|
@ -1,240 +0,0 @@
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::thread::spawn;
|
||||
|
||||
use super::*;
|
||||
|
||||
// Rotate the internal ring buffer indices by `n`.
|
||||
fn rotate<T: Default + std::fmt::Debug, B: Buffer<T>>(worker: &Worker<T, B>, n: usize) {
|
||||
let stealer = worker.stealer();
|
||||
let dummy_worker = Worker::<T, B2>::new();
|
||||
|
||||
for _ in 0..n {
|
||||
worker.push(T::default()).unwrap();
|
||||
stealer.steal_and_pop(&dummy_worker, |_| 1).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_single_threaded_steal() {
|
||||
let rotations: &[_] = if cfg!(miri) {
|
||||
&[42]
|
||||
} else {
|
||||
&[0, 255, 256, 257, 65535, 65536, 65537]
|
||||
};
|
||||
for &rotation in rotations {
|
||||
let worker1 = Worker::<_, B128>::new();
|
||||
let worker2 = Worker::<_, B128>::new();
|
||||
let stealer1 = worker1.stealer();
|
||||
rotate(&worker1, rotation);
|
||||
rotate(&worker2, rotation);
|
||||
|
||||
worker1.push(1).unwrap();
|
||||
worker1.push(2).unwrap();
|
||||
worker1.push(3).unwrap();
|
||||
worker1.push(4).unwrap();
|
||||
|
||||
assert_eq!(worker1.pop(), Some(1));
|
||||
assert_eq!(stealer1.steal_and_pop(&worker2, |_| 2), Ok(3));
|
||||
assert_eq!(worker1.pop(), Some(4));
|
||||
assert_eq!(worker1.pop(), None);
|
||||
assert_eq!(worker2.pop(), Some(2));
|
||||
assert_eq!(worker2.pop(), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_self_steal() {
|
||||
let rotations: &[_] = if cfg!(miri) {
|
||||
&[42]
|
||||
} else {
|
||||
&[0, 255, 256, 257, 65535, 65536, 65537]
|
||||
};
|
||||
for &rotation in rotations {
|
||||
let worker = Worker::<_, B128>::new();
|
||||
rotate(&worker, rotation);
|
||||
let stealer = worker.stealer();
|
||||
|
||||
worker.push(1).unwrap();
|
||||
worker.push(2).unwrap();
|
||||
worker.push(3).unwrap();
|
||||
worker.push(4).unwrap();
|
||||
|
||||
assert_eq!(worker.pop(), Some(1));
|
||||
assert_eq!(stealer.steal_and_pop(&worker, |_| 2), Ok(3));
|
||||
assert_eq!(worker.pop(), Some(4));
|
||||
assert_eq!(worker.pop(), Some(2));
|
||||
assert_eq!(worker.pop(), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_drain_steal() {
|
||||
let rotations: &[_] = if cfg!(miri) {
|
||||
&[42]
|
||||
} else {
|
||||
&[0, 255, 256, 257, 65535, 65536, 65537]
|
||||
};
|
||||
for &rotation in rotations {
|
||||
let worker = Worker::<_, B128>::new();
|
||||
let dummy_worker = Worker::<_, B128>::new();
|
||||
let stealer = worker.stealer();
|
||||
rotate(&worker, rotation);
|
||||
|
||||
worker.push(1).unwrap();
|
||||
worker.push(2).unwrap();
|
||||
worker.push(3).unwrap();
|
||||
worker.push(4).unwrap();
|
||||
|
||||
assert_eq!(worker.pop(), Some(1));
|
||||
let mut iter = worker.drain(|n| n - 1).unwrap();
|
||||
assert_eq!(
|
||||
stealer.steal_and_pop(&dummy_worker, |_| 1),
|
||||
Err(StealError::Busy)
|
||||
);
|
||||
assert_eq!(iter.next(), Some(2));
|
||||
assert_eq!(
|
||||
stealer.steal_and_pop(&dummy_worker, |_| 1),
|
||||
Err(StealError::Busy)
|
||||
);
|
||||
assert_eq!(iter.next(), Some(3));
|
||||
assert_eq!(stealer.steal_and_pop(&dummy_worker, |_| 1), Ok(4));
|
||||
assert_eq!(iter.next(), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_extend_basic() {
|
||||
let rotations: &[_] = if cfg!(miri) {
|
||||
&[42]
|
||||
} else {
|
||||
&[0, 255, 256, 257, 65535, 65536, 65537]
|
||||
};
|
||||
for &rotation in rotations {
|
||||
let worker = Worker::<_, B128>::new();
|
||||
rotate(&worker, rotation);
|
||||
|
||||
let initial_capacity = worker.spare_capacity();
|
||||
worker.push(1).unwrap();
|
||||
worker.push(2).unwrap();
|
||||
worker.extend([3, 4]);
|
||||
|
||||
assert_eq!(worker.spare_capacity(), initial_capacity - 4);
|
||||
assert_eq!(worker.pop(), Some(1));
|
||||
assert_eq!(worker.pop(), Some(2));
|
||||
assert_eq!(worker.pop(), Some(3));
|
||||
assert_eq!(worker.pop(), Some(4));
|
||||
assert_eq!(worker.pop(), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_extend_overflow() {
|
||||
let rotations: &[_] = if cfg!(miri) {
|
||||
&[42]
|
||||
} else {
|
||||
&[0, 255, 256, 257, 65535, 65536, 65537]
|
||||
};
|
||||
for &rotation in rotations {
|
||||
let worker = Worker::<_, B128>::new();
|
||||
rotate(&worker, rotation);
|
||||
|
||||
let initial_capacity = worker.spare_capacity();
|
||||
worker.push(1).unwrap();
|
||||
worker.push(2).unwrap();
|
||||
worker.extend(3..); // try to append infinitely many integers
|
||||
|
||||
assert_eq!(worker.spare_capacity(), 0);
|
||||
for i in 1..=initial_capacity {
|
||||
assert_eq!(worker.pop(), Some(i));
|
||||
}
|
||||
assert_eq!(worker.pop(), None);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn queue_multi_threaded_steal() {
|
||||
use crate::runtime::executor::rng::Rng;
|
||||
|
||||
const N: usize = if cfg!(miri) { 50 } else { 1_000_000 };
|
||||
|
||||
let counter = Arc::new(AtomicUsize::new(0));
|
||||
let worker = Worker::<_, B128>::new();
|
||||
let stealer = worker.stealer();
|
||||
|
||||
let counter0 = counter.clone();
|
||||
let stealer1 = stealer.clone();
|
||||
let counter1 = counter.clone();
|
||||
let stealer = stealer;
|
||||
let counter2 = counter;
|
||||
|
||||
// Worker thread.
|
||||
//
|
||||
// Push all numbers from 0 to N, popping one from time to time.
|
||||
let t0 = spawn(move || {
|
||||
let mut i = 0;
|
||||
let rng = Rng::new(0);
|
||||
let mut stats = vec![0; N];
|
||||
'outer: loop {
|
||||
for _ in 0..(rng.gen_bounded(10) + 1) {
|
||||
while let Err(_) = worker.push(i) {}
|
||||
i += 1;
|
||||
if i == N {
|
||||
break 'outer;
|
||||
}
|
||||
}
|
||||
if let Some(j) = worker.pop() {
|
||||
stats[j] += 1;
|
||||
counter0.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
stats
|
||||
});
|
||||
|
||||
// Stealer threads.
|
||||
//
|
||||
// Repeatedly steal a random number of items.
|
||||
fn steal_periodically(
|
||||
stealer: Stealer<usize, B128>,
|
||||
counter: Arc<AtomicUsize>,
|
||||
rng_seed: u64,
|
||||
) -> Vec<usize> {
|
||||
let mut stats = vec![0; N];
|
||||
let rng = Rng::new(rng_seed);
|
||||
let dest_worker = Worker::<_, B128>::new();
|
||||
|
||||
loop {
|
||||
if let Ok(i) =
|
||||
stealer.steal_and_pop(&dest_worker, |m| rng.gen_bounded(m as u64 + 1) as usize)
|
||||
{
|
||||
stats[i] += 1; // the popped item
|
||||
counter.fetch_add(1, Ordering::Relaxed);
|
||||
while let Some(j) = dest_worker.pop() {
|
||||
stats[j] += 1;
|
||||
counter.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
let count = counter.load(Ordering::Relaxed);
|
||||
if count == N {
|
||||
break;
|
||||
}
|
||||
assert!(count < N);
|
||||
}
|
||||
|
||||
stats
|
||||
}
|
||||
let t1 = spawn(move || steal_periodically(stealer1, counter1, 1));
|
||||
let t2 = spawn(move || steal_periodically(stealer, counter2, 2));
|
||||
let mut stats = Vec::new();
|
||||
stats.push(t0.join().unwrap());
|
||||
stats.push(t1.join().unwrap());
|
||||
stats.push(t2.join().unwrap());
|
||||
for i in 0..N {
|
||||
let mut count = 0;
|
||||
for j in 0..stats.len() {
|
||||
count += stats[j][i];
|
||||
}
|
||||
assert_eq!(count, 1);
|
||||
}
|
||||
}
|
@ -1,323 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
use ::loom::model::Builder;
|
||||
use ::loom::thread;
|
||||
|
||||
// Test adapted from the Tokio test suite.
|
||||
#[test]
|
||||
fn loom_queue_basic_steal() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||
const LOOP_COUNT: usize = 2;
|
||||
const ITEM_COUNT_PER_LOOP: usize = 3;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(|| {
|
||||
let worker = Worker::<usize, B4>::new();
|
||||
let stealer = worker.stealer();
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
let dest_worker = Worker::<usize, B4>::new();
|
||||
let mut n = 0;
|
||||
|
||||
for _ in 0..3 {
|
||||
if stealer.steal_and_pop(&dest_worker, |n| n - n / 2).is_ok() {
|
||||
n += 1;
|
||||
while dest_worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n
|
||||
});
|
||||
|
||||
let mut n = 0;
|
||||
|
||||
for _ in 0..LOOP_COUNT {
|
||||
for _ in 0..(ITEM_COUNT_PER_LOOP - 1) {
|
||||
if worker.push(42).is_err() {
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
|
||||
if worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
// Push another task
|
||||
if worker.push(42).is_err() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
while worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
|
||||
n += th.join().unwrap();
|
||||
|
||||
assert_eq!(ITEM_COUNT_PER_LOOP * LOOP_COUNT, n);
|
||||
});
|
||||
}
|
||||
|
||||
// Test adapted from the Tokio test suite.
|
||||
#[test]
|
||||
fn loom_queue_drain_overflow() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
const ITEM_COUNT: usize = 7;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(|| {
|
||||
let worker = Worker::<usize, B4>::new();
|
||||
let stealer = worker.stealer();
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
let dest_worker = Worker::<usize, B4>::new();
|
||||
let mut n = 0;
|
||||
|
||||
if stealer.steal_and_pop(&dest_worker, |n| n - n / 2).is_ok() {
|
||||
n += 1;
|
||||
while dest_worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
|
||||
n
|
||||
});
|
||||
|
||||
let mut n = 0;
|
||||
|
||||
// Push an item, pop an item.
|
||||
worker.push(42).unwrap();
|
||||
|
||||
if worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
for _ in 0..(ITEM_COUNT - 1) {
|
||||
if worker.push(42).is_err() {
|
||||
// Spin until some of the old items can be drained to make room
|
||||
// for the new item.
|
||||
loop {
|
||||
if let Ok(drain) = worker.drain(|n| n - n / 2) {
|
||||
for _ in drain {
|
||||
n += 1;
|
||||
}
|
||||
assert_eq!(worker.push(42), Ok(()));
|
||||
break;
|
||||
}
|
||||
thread::yield_now();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
n += th.join().unwrap();
|
||||
|
||||
while worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
assert_eq!(ITEM_COUNT, n);
|
||||
});
|
||||
}
|
||||
|
||||
// Test adapted from the Tokio test suite.
|
||||
#[test]
|
||||
fn loom_queue_multi_stealer() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||
const ITEM_COUNT: usize = 5;
|
||||
|
||||
fn steal_half(stealer: Stealer<usize, B4>) -> usize {
|
||||
let dest_worker = Worker::<usize, B4>::new();
|
||||
|
||||
if stealer.steal_and_pop(&dest_worker, |n| n - n / 2).is_ok() {
|
||||
let mut n = 1;
|
||||
while dest_worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
n
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(|| {
|
||||
let worker = Worker::<usize, B4>::new();
|
||||
let stealer1 = worker.stealer();
|
||||
let stealer2 = worker.stealer();
|
||||
|
||||
let th1 = thread::spawn(move || steal_half(stealer1));
|
||||
let th2 = thread::spawn(move || steal_half(stealer2));
|
||||
|
||||
let mut n = 0;
|
||||
for _ in 0..ITEM_COUNT {
|
||||
if worker.push(42).is_err() {
|
||||
n += 1;
|
||||
}
|
||||
}
|
||||
|
||||
while worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
n += th1.join().unwrap();
|
||||
n += th2.join().unwrap();
|
||||
|
||||
assert_eq!(ITEM_COUNT, n);
|
||||
});
|
||||
}
|
||||
|
||||
// Test adapted from the Tokio test suite.
|
||||
#[test]
|
||||
fn loom_queue_chained_steal() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(|| {
|
||||
let w1 = Worker::<usize, B4>::new();
|
||||
let w2 = Worker::<usize, B4>::new();
|
||||
let s1 = w1.stealer();
|
||||
let s2 = w2.stealer();
|
||||
|
||||
for _ in 0..4 {
|
||||
w1.push(42).unwrap();
|
||||
w2.push(42).unwrap();
|
||||
}
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
let dest_worker = Worker::<usize, B4>::new();
|
||||
let _ = s1.steal_and_pop(&dest_worker, |n| n - n / 2);
|
||||
|
||||
while dest_worker.pop().is_some() {}
|
||||
});
|
||||
|
||||
while w1.pop().is_some() {}
|
||||
|
||||
let _ = s2.steal_and_pop(&w1, |n| n - n / 2);
|
||||
|
||||
th.join().unwrap();
|
||||
|
||||
while w1.pop().is_some() {}
|
||||
while w2.pop().is_some() {}
|
||||
});
|
||||
}
|
||||
|
||||
// A variant of multi-stealer with concurrent push.
|
||||
#[test]
|
||||
fn loom_queue_push_and_steal() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
fn steal_half(stealer: Stealer<usize, B4>) -> usize {
|
||||
let dest_worker = Worker::<usize, B4>::new();
|
||||
|
||||
if stealer.steal_and_pop(&dest_worker, |n| n - n / 2).is_ok() {
|
||||
let mut n = 1;
|
||||
while dest_worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
n
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(|| {
|
||||
let worker = Worker::<usize, B4>::new();
|
||||
let stealer1 = worker.stealer();
|
||||
let stealer2 = worker.stealer();
|
||||
|
||||
let th1 = thread::spawn(move || steal_half(stealer1));
|
||||
let th2 = thread::spawn(move || steal_half(stealer2));
|
||||
|
||||
worker.push(42).unwrap();
|
||||
worker.push(42).unwrap();
|
||||
|
||||
let mut n = 0;
|
||||
while worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
n += th1.join().unwrap();
|
||||
n += th2.join().unwrap();
|
||||
|
||||
assert_eq!(n, 2);
|
||||
});
|
||||
}
|
||||
|
||||
// Attempts extending the queue based on `Worker::free_capacity`.
|
||||
#[test]
|
||||
fn loom_queue_extend() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
fn steal_half(stealer: Stealer<usize, B4>) -> usize {
|
||||
let dest_worker = Worker::<usize, B4>::new();
|
||||
|
||||
if stealer.steal_and_pop(&dest_worker, |n| n - n / 2).is_ok() {
|
||||
let mut n = 1;
|
||||
while dest_worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
n
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(|| {
|
||||
let worker = Worker::<usize, B4>::new();
|
||||
let stealer1 = worker.stealer();
|
||||
let stealer2 = worker.stealer();
|
||||
|
||||
let th1 = thread::spawn(move || steal_half(stealer1));
|
||||
let th2 = thread::spawn(move || steal_half(stealer2));
|
||||
|
||||
worker.push(1).unwrap();
|
||||
worker.push(7).unwrap();
|
||||
|
||||
// Try to fill up the queue.
|
||||
let spare_capacity = worker.spare_capacity();
|
||||
assert!(spare_capacity >= 2);
|
||||
worker.extend(0..spare_capacity);
|
||||
|
||||
let mut n = 0;
|
||||
|
||||
n += th1.join().unwrap();
|
||||
n += th2.join().unwrap();
|
||||
|
||||
while worker.pop().is_some() {
|
||||
n += 1;
|
||||
}
|
||||
|
||||
assert_eq!(2 + spare_capacity, n);
|
||||
});
|
||||
}
|
@ -1,7 +0,0 @@
|
||||
use super::*;
|
||||
|
||||
#[cfg(not(asynchronix_loom))]
|
||||
mod general;
|
||||
|
||||
#[cfg(asynchronix_loom)]
|
||||
mod loom;
|
499
asynchronix/src/simulation.rs
Normal file
499
asynchronix/src/simulation.rs
Normal file
@ -0,0 +1,499 @@
|
||||
//! Discrete-event simulation management.
|
||||
//!
|
||||
//! This module contains most notably the [`Simulation`] environment, the
|
||||
//! [`SimInit`] simulation builder, the [`Mailbox`] and [`Address`] types as
|
||||
//! well as miscellaneous other types related to simulation management.
|
||||
//!
|
||||
//! # Simulation lifecycle
|
||||
//!
|
||||
//! The lifecycle of a simulation bench typically comprises the following
|
||||
//! stages:
|
||||
//!
|
||||
//! 1) instantiation of models and their [`Mailbox`]es,
|
||||
//! 2) connection of the models' output/requestor ports to input/replier ports
|
||||
//! using the [`Address`]es of the target models,
|
||||
//! 3) instantiation of a [`SimInit`] simulation builder and migration of all
|
||||
//! models and mailboxes to the builder with [`SimInit::add_model()`],
|
||||
//! 4) initialization of a [`Simulation`] instance with [`SimInit::init()`],
|
||||
//! 5) discrete-time simulation, which typically involves scheduling events and
|
||||
//! incrementing simulation time while observing the models outputs.
|
||||
//!
|
||||
//! Most information necessary to run a simulation is available in the root
|
||||
//! crate [documentation](crate) and in the [`SimInit`] and [`Simulation`]
|
||||
//! documentation. The next section complement this information with a set of
|
||||
//! practical recommendations that can help run and troubleshoot simulations.
|
||||
//!
|
||||
//! # Practical considerations
|
||||
//!
|
||||
//! ## Mailbox capacity
|
||||
//!
|
||||
//! A [`Mailbox`] is a buffer that store incoming events and queries for a
|
||||
//! single model instance. Mailboxes have a bounded capacity, which defaults to
|
||||
//! [`Mailbox::DEFAULT_CAPACITY`].
|
||||
//!
|
||||
//! The capacity is a trade-off: too large a capacity may lead to excessive
|
||||
//! memory usage, whereas too small a capacity can hamper performance and
|
||||
//! increase the likelihood of deadlocks (see next section). Note that, because
|
||||
//! a mailbox may receive events or queries of various sizes, it is actually the
|
||||
//! largest message sent that ultimately determines the amount of allocated
|
||||
//! memory.
|
||||
//!
|
||||
//! The default capacity should prove a reasonable trade-off in most cases, but
|
||||
//! for situations where it is not appropriate, it is possible to instantiate
|
||||
//! mailboxes with a custom capacity by using [`Mailbox::with_capacity()`]
|
||||
//! instead of [`Mailbox::new()`].
|
||||
//!
|
||||
//! ## Avoiding deadlocks
|
||||
//!
|
||||
//! While the underlying architecture of Asynchronix—the actor model—should
|
||||
//! prevent most race conditions (including obviously data races which are not
|
||||
//! possible in safe Rust) it is still possible in theory to generate deadlocks.
|
||||
//! Though rare in practice, these may occur due to one of the below:
|
||||
//!
|
||||
//! 1. *query loopback*: if a model sends a query which is further forwarded by
|
||||
//! other models until it loops back to the initial model, that model would
|
||||
//! in effect wait for its own response and block,
|
||||
//! 2. *mailbox saturation*: if several models concurrently send to one another
|
||||
//! a very large number of messages in succession, these models may end up
|
||||
//! saturating all mailboxes, at which point they will wait for the other's
|
||||
//! mailboxes to free space so they can send the next message, eventually
|
||||
//! preventing all of them to make further progress.
|
||||
//!
|
||||
//! The first scenario is usually very easy to avoid and is typically the result
|
||||
//! of an improper assembly of models. Because requestor ports are only used
|
||||
//! sparingly in idiomatic simulations, this situation should be relatively
|
||||
//! exceptional.
|
||||
//!
|
||||
//! The second scenario is rare in well-behaving models and if it occurs, it is
|
||||
//! most typically at the very beginning of a simulation when all models
|
||||
//! simultaneously send events during the call to
|
||||
//! [`Model::init()`](crate::model::Model::init). If such a large amount of
|
||||
//! concurrent messages is deemed normal behavior, the issue can be readily
|
||||
//! remedied by increasing the capacity of the saturated mailboxes.
|
||||
//!
|
||||
//! At the moment, Asynchronix is unfortunately not able to discriminate between
|
||||
//! such pathological deadlocks and the "expected" deadlock that occurs when all
|
||||
//! tasks in a given time slice have completed and all models are starved on an
|
||||
//! empty mailbox. Consequently, blocking method such as [`SimInit::init()`],
|
||||
//! [`Simulation::step()`], [`Simulation::send_event()`], etc., will return
|
||||
//! without error after a pathological deadlock, leaving the user responsible
|
||||
//! for inferring the deadlock from the behavior of the simulation in the next
|
||||
//! steps. This is obviously not ideal, but is hopefully only a temporary state
|
||||
//! of things until a more precise deadlock detection algorithm is implemented.
|
||||
//!
|
||||
//! ## Modifying connections during simulation
|
||||
//!
|
||||
//! Although uncommon, there is sometimes a need for connecting and/or
|
||||
//! disconnecting models after they have been migrated to the simulation.
|
||||
//! Likewise, one may want to connect or disconnect an [`EventSlot`] or
|
||||
//! [`EventStream`] after the simulation has been instantiated.
|
||||
//!
|
||||
//! There is actually a very simple solution to this problem: since the
|
||||
//! [`InputFn`](crate::model::InputFn) trait also matches closures of type
|
||||
//! `FnOnce(&mut impl Model)`, it is enough to invoke
|
||||
//! [`Simulation::send_event()`] with a closure that connects or disconnects
|
||||
//! a port, such as:
|
||||
//!
|
||||
//! ```
|
||||
//! # use asynchronix::model::{Model, Output};
|
||||
//! # use asynchronix::time::{MonotonicTime, Scheduler};
|
||||
//! # use asynchronix::simulation::{Mailbox, SimInit};
|
||||
//! # pub struct ModelA {
|
||||
//! # pub output: Output<i32>,
|
||||
//! # }
|
||||
//! # impl Model for ModelA {};
|
||||
//! # pub struct ModelB {}
|
||||
//! # impl ModelB {
|
||||
//! # pub fn input(&mut self, value: i32) {}
|
||||
//! # }
|
||||
//! # impl Model for ModelB {};
|
||||
//! # let modelA_addr = Mailbox::<ModelA>::new().address();
|
||||
//! # let modelB_addr = Mailbox::<ModelB>::new().address();
|
||||
//! # let mut simu = SimInit::new().init(MonotonicTime::EPOCH);
|
||||
//! simu.send_event(
|
||||
//! |m: &mut ModelA| {
|
||||
//! m.output.connect(ModelB::input, modelB_addr);
|
||||
//! },
|
||||
//! (),
|
||||
//! &modelA_addr
|
||||
//! );
|
||||
//! ```
|
||||
mod endpoints;
|
||||
mod mailbox;
|
||||
mod sim_init;
|
||||
|
||||
pub use endpoints::{EventSlot, EventStream};
|
||||
pub use mailbox::{Address, Mailbox};
|
||||
pub use sim_init::SimInit;
|
||||
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use recycle_box::{coerce_box, RecycleBox};
|
||||
|
||||
use crate::executor::Executor;
|
||||
use crate::model::{InputFn, Model, ReplierFn};
|
||||
use crate::time::{self, CancellationError, MonotonicTime, TearableAtomicTime};
|
||||
use crate::time::{ScheduledTimeError, SchedulerKey, SchedulerQueue};
|
||||
use crate::util::futures::SeqFuture;
|
||||
use crate::util::slot;
|
||||
use crate::util::sync_cell::SyncCell;
|
||||
|
||||
/// Simulation environment.
|
||||
///
|
||||
/// A `Simulation` is created by calling the
|
||||
/// [`SimInit::init()`](crate::simulation::SimInit::init) method on a simulation
|
||||
/// initializer. It contains an asynchronous executor that runs all simulation
|
||||
/// models added beforehand to [`SimInit`](crate::simulation::SimInit).
|
||||
///
|
||||
/// A [`Simulation`] object also manages an event scheduling queue and
|
||||
/// simulation time. The scheduling queue can be accessed from the simulation
|
||||
/// itself, but also from models via the optional
|
||||
/// [`&Scheduler`][crate::time::Scheduler] argument of input and replier port
|
||||
/// methods. Likewise, simulation time can be accessed with the
|
||||
/// [`Simulation::time()`] method, or from models with the
|
||||
/// [`Scheduler::time()`](crate::time::Scheduler::time) method.
|
||||
///
|
||||
/// Events and queries can be scheduled immediately, *i.e.* for the current
|
||||
/// simulation time, using [`send_event()`](Simulation::send_event) and
|
||||
/// [`send_query()`](Simulation::send_query). Calling these methods will block
|
||||
/// until all computations triggered by such event or query have completed. In
|
||||
/// the case of queries, the response is returned.
|
||||
///
|
||||
/// Events can also be scheduled at a future simulation time using
|
||||
/// [`schedule_in()`](Simulation::schedule_in) or
|
||||
/// [`schedule_at()`](Simulation::schedule_at). These methods queue an event
|
||||
/// without blocking.
|
||||
///
|
||||
/// Finally, the [`Simulation`] instance manages simulation time. Calling
|
||||
/// [`step()`](Simulation::step) will increment simulation time until that of
|
||||
/// the next scheduled event in chronological order, whereas
|
||||
/// [`step_by()`](Simulation::step_by) and
|
||||
/// [`step_until()`](Simulation::step_until) can increment time by an arbitrary
|
||||
/// duration, running the computations for all intermediate time slices
|
||||
/// sequentially. These methods will block until all computations for the
|
||||
/// relevant time slice(s) have completed.
|
||||
pub struct Simulation {
|
||||
executor: Executor,
|
||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||
time: SyncCell<TearableAtomicTime>,
|
||||
}
|
||||
|
||||
impl Simulation {
|
||||
/// Creates a new `Simulation`.
|
||||
pub(crate) fn new(
|
||||
executor: Executor,
|
||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||
time: SyncCell<TearableAtomicTime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
executor,
|
||||
scheduler_queue,
|
||||
time,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current simulation time.
|
||||
pub fn time(&self) -> MonotonicTime {
|
||||
self.time.read()
|
||||
}
|
||||
|
||||
/// Advances simulation time to that of the next scheduled task, processing
|
||||
/// that task as well as all other tasks scheduled for the same time.
|
||||
///
|
||||
/// This method may block. Once it returns, it is guaranteed that all newly
|
||||
/// processed tasks (if any) have completed.
|
||||
pub fn step(&mut self) {
|
||||
self.step_to_next_bounded(MonotonicTime::MAX);
|
||||
}
|
||||
|
||||
/// Iteratively advances the simulation time by the specified duration and
|
||||
/// processes all tasks scheduled up to the target time.
|
||||
///
|
||||
/// This method may block. Once it returns, it is guaranteed that (i) all
|
||||
/// tasks scheduled up to the specified target time have completed and (ii)
|
||||
/// the final simulation time has been incremented by the specified
|
||||
/// duration.
|
||||
pub fn step_by(&mut self, duration: Duration) {
|
||||
let target_time = self.time.read() + duration;
|
||||
|
||||
self.step_until_unchecked(target_time);
|
||||
}
|
||||
|
||||
/// Iteratively advances the simulation time and processes all tasks
|
||||
/// scheduled up to the specified target time.
|
||||
///
|
||||
/// This method may block. Once it returns, it is guaranteed that (i) all
|
||||
/// tasks scheduled up to the specified target time have completed and (ii)
|
||||
/// the final simulation time matches the target time.
|
||||
pub fn step_until(&mut self, target_time: MonotonicTime) -> Result<(), ScheduledTimeError<()>> {
|
||||
if self.time.read() >= target_time {
|
||||
return Err(ScheduledTimeError(()));
|
||||
}
|
||||
self.step_until_unchecked(target_time);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Schedules an event at the lapse of the specified duration.
|
||||
///
|
||||
/// An error is returned if the specified duration is null.
|
||||
///
|
||||
/// Events scheduled for the same time and targeting the same model are
|
||||
/// guaranteed to be processed according to the scheduling order.
|
||||
pub fn schedule_in<M, F, T, S>(
|
||||
&mut self,
|
||||
duration: Duration,
|
||||
func: F,
|
||||
arg: T,
|
||||
address: impl Into<Address<M>>,
|
||||
) -> Result<SchedulerKey, ScheduledTimeError<T>>
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> InputFn<'a, M, T, S>,
|
||||
T: Send + Clone + 'static,
|
||||
{
|
||||
if duration.is_zero() {
|
||||
return Err(ScheduledTimeError(arg));
|
||||
}
|
||||
let time = self.time.read() + duration;
|
||||
|
||||
let schedule_key = time::schedule_event_at_unchecked(
|
||||
time,
|
||||
func,
|
||||
arg,
|
||||
address.into().0,
|
||||
&self.scheduler_queue,
|
||||
);
|
||||
|
||||
Ok(schedule_key)
|
||||
}
|
||||
|
||||
/// Schedules an event at a future time.
|
||||
///
|
||||
/// An error is returned if the specified time is not in the future of the
|
||||
/// current simulation time.
|
||||
///
|
||||
/// Events scheduled for the same time and targeting the same model are
|
||||
/// guaranteed to be processed according to the scheduling order.
|
||||
pub fn schedule_at<M, F, T, S>(
|
||||
&mut self,
|
||||
time: MonotonicTime,
|
||||
func: F,
|
||||
arg: T,
|
||||
address: impl Into<Address<M>>,
|
||||
) -> Result<SchedulerKey, ScheduledTimeError<T>>
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> InputFn<'a, M, T, S>,
|
||||
T: Send + Clone + 'static,
|
||||
{
|
||||
if self.time.read() >= time {
|
||||
return Err(ScheduledTimeError(arg));
|
||||
}
|
||||
let schedule_key = time::schedule_event_at_unchecked(
|
||||
time,
|
||||
func,
|
||||
arg,
|
||||
address.into().0,
|
||||
&self.scheduler_queue,
|
||||
);
|
||||
|
||||
Ok(schedule_key)
|
||||
}
|
||||
|
||||
/// Cancels an event with a scheduled time in the future of the current
|
||||
/// simulation time.
|
||||
///
|
||||
/// If the corresponding event was already executed, or if it is scheduled
|
||||
/// for the current simulation time, an error is returned.
|
||||
pub fn cancel(&self, scheduler_key: SchedulerKey) -> Result<(), CancellationError> {
|
||||
time::cancel_scheduled(scheduler_key, &self.scheduler_queue)
|
||||
}
|
||||
|
||||
/// Sends and processes an event, blocking until completion.
|
||||
///
|
||||
/// Simulation time remains unchanged.
|
||||
pub fn send_event<M, F, T, S>(&mut self, func: F, arg: T, address: impl Into<Address<M>>)
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> InputFn<'a, M, T, S>,
|
||||
T: Send + Clone + 'static,
|
||||
{
|
||||
let sender = address.into().0;
|
||||
let fut = async move {
|
||||
// Ignore send errors.
|
||||
let _ = sender
|
||||
.send(
|
||||
move |model: &mut M,
|
||||
scheduler,
|
||||
recycle_box: RecycleBox<()>|
|
||||
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
|
||||
let fut = func.call(model, arg, scheduler);
|
||||
|
||||
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||
},
|
||||
)
|
||||
.await;
|
||||
};
|
||||
|
||||
self.executor.spawn_and_forget(fut);
|
||||
self.executor.run();
|
||||
}
|
||||
|
||||
/// Sends and processes a query, blocking until completion.
|
||||
///
|
||||
/// Simulation time remains unchanged.
|
||||
pub fn send_query<M, F, T, R, S>(
|
||||
&mut self,
|
||||
func: F,
|
||||
arg: T,
|
||||
address: impl Into<Address<M>>,
|
||||
) -> Result<R, QueryError>
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> ReplierFn<'a, M, T, R, S>,
|
||||
T: Send + Clone + 'static,
|
||||
R: Send + 'static,
|
||||
{
|
||||
let (reply_writer, mut reply_reader) = slot::slot();
|
||||
let sender = address.into().0;
|
||||
|
||||
let fut = async move {
|
||||
// Ignore send errors.
|
||||
let _ = sender
|
||||
.send(
|
||||
move |model: &mut M,
|
||||
scheduler,
|
||||
recycle_box: RecycleBox<()>|
|
||||
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
|
||||
let fut = async move {
|
||||
let reply = func.call(model, arg, scheduler).await;
|
||||
let _ = reply_writer.write(reply);
|
||||
};
|
||||
|
||||
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||
},
|
||||
)
|
||||
.await;
|
||||
};
|
||||
|
||||
self.executor.spawn_and_forget(fut);
|
||||
self.executor.run();
|
||||
|
||||
reply_reader.try_read().map_err(|_| QueryError {})
|
||||
}
|
||||
|
||||
/// Advances simulation time to that of the next scheduled task if its
|
||||
/// scheduling time does not exceed the specified bound, processing that
|
||||
/// task as well as all other tasks scheduled for the same time.
|
||||
///
|
||||
/// If at least one task was found that satisfied the time bound, the
|
||||
/// corresponding new simulation time is returned.
|
||||
fn step_to_next_bounded(&mut self, upper_time_bound: MonotonicTime) -> Option<MonotonicTime> {
|
||||
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
|
||||
|
||||
let mut current_key = match scheduler_queue.peek_key() {
|
||||
Some(&k) if k.0 <= upper_time_bound => k,
|
||||
_ => return None,
|
||||
};
|
||||
|
||||
// Set the simulation time to that of the next scheduled task
|
||||
self.time.write(current_key.0);
|
||||
|
||||
loop {
|
||||
let task = scheduler_queue.pull().unwrap().1;
|
||||
|
||||
let mut next_key = scheduler_queue.peek_key();
|
||||
if next_key != Some(¤t_key) {
|
||||
// Since there are no other tasks targeting the same mailbox
|
||||
// and the same time, the task is spawned immediately.
|
||||
self.executor.spawn_and_forget(Box::into_pin(task));
|
||||
} else {
|
||||
// To ensure that their relative order of execution is
|
||||
// preserved, all tasks targeting the same mailbox are
|
||||
// concatenated into a single future.
|
||||
let mut task_sequence = SeqFuture::new();
|
||||
|
||||
task_sequence.push(Box::into_pin(task));
|
||||
loop {
|
||||
let task = scheduler_queue.pull().unwrap().1;
|
||||
task_sequence.push(Box::into_pin(task));
|
||||
next_key = scheduler_queue.peek_key();
|
||||
if next_key != Some(¤t_key) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Spawn a parent task that sequentially polls all sub-tasks.
|
||||
self.executor.spawn_and_forget(task_sequence);
|
||||
}
|
||||
|
||||
match next_key {
|
||||
// If the next task is scheduled at the same time, update the key and continue.
|
||||
Some(k) if k.0 == current_key.0 => {
|
||||
current_key = *k;
|
||||
}
|
||||
// Otherwise wait until all tasks have completed and return.
|
||||
_ => {
|
||||
drop(scheduler_queue); // make sure the queue's mutex is unlocked.
|
||||
self.executor.run();
|
||||
|
||||
return Some(current_key.0);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Iteratively advances simulation time and processes all tasks scheduled
|
||||
/// up to the specified target time.
|
||||
///
|
||||
/// Once the method returns it is guaranteed that (i) all tasks scheduled up
|
||||
/// to the specified target time have completed and (ii) the final
|
||||
/// simulation time matches the target time.
|
||||
///
|
||||
/// This method does not check whether the specified time lies in the future
|
||||
/// of the current simulation time.
|
||||
fn step_until_unchecked(&mut self, target_time: MonotonicTime) {
|
||||
loop {
|
||||
match self.step_to_next_bounded(target_time) {
|
||||
// The target time was reached exactly.
|
||||
Some(t) if t == target_time => return,
|
||||
// No tasks are scheduled before or at the target time.
|
||||
None => {
|
||||
// Update the simulation time.
|
||||
self.time.write(target_time);
|
||||
return;
|
||||
}
|
||||
// The target time was not reached yet.
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Simulation {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Simulation")
|
||||
.field("time", &self.time.read())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// Error returned when a query did not obtain a response.
|
||||
///
|
||||
/// This can happen either because the model targeted by the address was not
|
||||
/// added to the simulation or due to a simulation deadlock.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct QueryError {}
|
||||
|
||||
impl fmt::Display for QueryError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(fmt, "the query did not receive a response")
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for QueryError {}
|
69
asynchronix/src/simulation/endpoints.rs
Normal file
69
asynchronix/src/simulation/endpoints.rs
Normal file
@ -0,0 +1,69 @@
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Mutex, TryLockError, TryLockResult};
|
||||
|
||||
use crate::util::spsc_queue;
|
||||
|
||||
/// An iterator that returns all events that were broadcast by an output port.
|
||||
///
|
||||
/// Events are returned in first-in-first-out order. Note that even if the
|
||||
/// iterator returns `None`, it may still produce more items after simulation
|
||||
/// time is incremented.
|
||||
pub struct EventStream<T> {
|
||||
consumer: spsc_queue::Consumer<T>,
|
||||
}
|
||||
|
||||
impl<T> EventStream<T> {
|
||||
/// Creates a new `EventStream`.
|
||||
pub(crate) fn new(consumer: spsc_queue::Consumer<T>) -> Self {
|
||||
Self { consumer }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Iterator for EventStream<T> {
|
||||
type Item = T;
|
||||
|
||||
fn next(&mut self) -> Option<Self::Item> {
|
||||
self.consumer.pop()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for EventStream<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("EventStream").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// A single-value slot that holds the last event that was broadcast by an
|
||||
/// output port.
|
||||
pub struct EventSlot<T> {
|
||||
slot: Arc<Mutex<Option<T>>>,
|
||||
}
|
||||
|
||||
impl<T> EventSlot<T> {
|
||||
/// Creates a new `EventSlot`.
|
||||
pub(crate) fn new(slot: Arc<Mutex<Option<T>>>) -> Self {
|
||||
Self { slot }
|
||||
}
|
||||
|
||||
/// Take the last event, if any, leaving the slot empty.
|
||||
///
|
||||
/// Note that even after the event is taken, it may become populated anew
|
||||
/// after simulation time is incremented.
|
||||
pub fn take(&mut self) -> Option<T> {
|
||||
// We don't actually need to take self by mutable reference, but this
|
||||
// signature is probably less surprising for the user and more
|
||||
// consistent with `EventStream`. It also prevents multi-threaded
|
||||
// access, which would be likely to be misused.
|
||||
match self.slot.try_lock() {
|
||||
TryLockResult::Ok(mut v) => v.take(),
|
||||
TryLockResult::Err(TryLockError::WouldBlock) => None,
|
||||
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> fmt::Debug for EventSlot<T> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("EventSlot").finish_non_exhaustive()
|
||||
}
|
||||
}
|
97
asynchronix/src/simulation/mailbox.rs
Normal file
97
asynchronix/src/simulation/mailbox.rs
Normal file
@ -0,0 +1,97 @@
|
||||
use std::fmt;
|
||||
|
||||
use crate::channel::{Receiver, Sender};
|
||||
use crate::model::Model;
|
||||
|
||||
/// A model mailbox.
|
||||
///
|
||||
/// A mailbox is an entity associated to a model instance that collects all
|
||||
/// messages sent to that model. The size of its internal buffer can be
|
||||
/// optionally specified at construction time using
|
||||
/// [`with_capacity`](Mailbox::with_capacity).
|
||||
pub struct Mailbox<M: Model>(pub(crate) Receiver<M>);
|
||||
|
||||
impl<M: Model> Mailbox<M> {
|
||||
/// Default capacity when created with `new` or `Default::default`.
|
||||
pub const DEFAULT_CAPACITY: usize = 16;
|
||||
|
||||
/// Creates a new mailbox with capacity `Self::DEFAULT_CAPACITY`.
|
||||
pub fn new() -> Self {
|
||||
Self(Receiver::new(Self::DEFAULT_CAPACITY))
|
||||
}
|
||||
|
||||
/// Creates a new mailbox with the specified capacity.
|
||||
///
|
||||
/// # Panic
|
||||
///
|
||||
/// The constructor will panic if the requested capacity is 0 or is greater
|
||||
/// than `usize::MAX/2 + 1`.
|
||||
pub fn with_capacity(capacity: usize) -> Self {
|
||||
Self(Receiver::new(capacity))
|
||||
}
|
||||
|
||||
/// Returns a handle to this mailbox.
|
||||
pub fn address(&self) -> Address<M> {
|
||||
Address(self.0.sender())
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: Model> Default for Mailbox<M> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: Model> fmt::Debug for Mailbox<M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Mailbox")
|
||||
.field("mailbox_id", &self.0.channel_id().to_string())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle to a model mailbox.
|
||||
///
|
||||
/// An address always points to the same mailbox. Unlike a [`Mailbox`], however,
|
||||
/// an address can be cloned and shared between threads.
|
||||
///
|
||||
/// For the sake of convenience, methods that require an address by value will
|
||||
/// typically also accept an `&Address` or an `&Mailbox` since these references
|
||||
/// implement the `Into<Address>` trait, automatically invoking
|
||||
/// `Address::clone()` or `Mailbox::address()` as appropriate.
|
||||
pub struct Address<M: Model>(pub(crate) Sender<M>);
|
||||
|
||||
impl<M: Model> Clone for Address<M> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: Model> From<&Address<M>> for Address<M> {
|
||||
/// Converts an [`Address`] reference into an [`Address`].
|
||||
///
|
||||
/// This clones the reference and returns the clone.
|
||||
#[inline]
|
||||
fn from(s: &Address<M>) -> Address<M> {
|
||||
s.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: Model> From<&Mailbox<M>> for Address<M> {
|
||||
/// Converts a [Mailbox] reference into an [`Address`].
|
||||
///
|
||||
/// This calls [`Mailbox::address()`] on the mailbox and returns the
|
||||
/// address.
|
||||
#[inline]
|
||||
fn from(s: &Mailbox<M>) -> Address<M> {
|
||||
s.address()
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: Model> fmt::Debug for Address<M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("Address")
|
||||
.field("mailbox_id", &self.0.channel_id().to_string())
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
79
asynchronix/src/simulation/sim_init.rs
Normal file
79
asynchronix/src/simulation/sim_init.rs
Normal file
@ -0,0 +1,79 @@
|
||||
use std::fmt;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use crate::executor::Executor;
|
||||
use crate::model::Model;
|
||||
use crate::time::Scheduler;
|
||||
use crate::time::{MonotonicTime, SchedulerQueue, TearableAtomicTime};
|
||||
use crate::util::priority_queue::PriorityQueue;
|
||||
use crate::util::sync_cell::SyncCell;
|
||||
|
||||
use super::{Mailbox, Simulation};
|
||||
|
||||
/// Builder for a multi-threaded, discrete-event simulation.
|
||||
pub struct SimInit {
|
||||
executor: Executor,
|
||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||
time: SyncCell<TearableAtomicTime>,
|
||||
}
|
||||
|
||||
impl SimInit {
|
||||
/// Creates a builder for a multithreaded simulation running on all
|
||||
/// available logical threads.
|
||||
pub fn new() -> Self {
|
||||
Self::with_num_threads(num_cpus::get())
|
||||
}
|
||||
|
||||
/// Creates a builder for a multithreaded simulation running on the
|
||||
/// specified number of threads.
|
||||
pub fn with_num_threads(num_threads: usize) -> Self {
|
||||
// The current executor's implementation caps the number of thread to 64
|
||||
// on 64-bit systems and 32 on 32-bit systems.
|
||||
let num_threads = num_threads.min(usize::BITS as usize);
|
||||
|
||||
Self {
|
||||
executor: Executor::new(num_threads),
|
||||
scheduler_queue: Arc::new(Mutex::new(PriorityQueue::new())),
|
||||
time: SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a model and its mailbox to the simulation bench.
|
||||
pub fn add_model<M: Model>(self, model: M, mailbox: Mailbox<M>) -> Self {
|
||||
let scheduler_queue = self.scheduler_queue.clone();
|
||||
let time = self.time.reader();
|
||||
let mut receiver = mailbox.0;
|
||||
|
||||
self.executor.spawn_and_forget(async move {
|
||||
let sender = receiver.sender();
|
||||
let scheduler = Scheduler::new(sender, scheduler_queue, time);
|
||||
let mut model = model.init(&scheduler).await.0;
|
||||
|
||||
while receiver.recv(&mut model, &scheduler).await.is_ok() {}
|
||||
});
|
||||
|
||||
self
|
||||
}
|
||||
|
||||
/// Builds a simulation initialized at the specified simulation time,
|
||||
/// executing the [`Model::init()`](crate::model::Model::init) method on all
|
||||
/// model initializers.
|
||||
pub fn init(mut self, start_time: MonotonicTime) -> Simulation {
|
||||
self.time.write(start_time);
|
||||
self.executor.run();
|
||||
|
||||
Simulation::new(self.executor, self.scheduler_queue, self.time)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SimInit {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for SimInit {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("SimInit").finish_non_exhaustive()
|
||||
}
|
||||
}
|
54
asynchronix/src/time.rs
Normal file
54
asynchronix/src/time.rs
Normal file
@ -0,0 +1,54 @@
|
||||
//! Simulation time and scheduling.
|
||||
//!
|
||||
//! This module provides most notably:
|
||||
//!
|
||||
//! * [`MonotonicTime`]: a monotonic timestamp based on the [TAI] time standard,
|
||||
//! * [`Scheduler`]: a model-local handle to the global scheduler that can be
|
||||
//! used by models to schedule future actions onto themselves.
|
||||
//!
|
||||
//! [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
|
||||
//!
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! An alarm clock model that prints a message when the simulation time reaches
|
||||
//! the specified timestamp.
|
||||
//!
|
||||
//! ```
|
||||
//! use asynchronix::model::Model;
|
||||
//! use asynchronix::time::{MonotonicTime, Scheduler};
|
||||
//!
|
||||
//! // An alarm clock model.
|
||||
//! pub struct AlarmClock {
|
||||
//! msg: String
|
||||
//! }
|
||||
//!
|
||||
//! impl AlarmClock {
|
||||
//! // Creates a new alarm clock.
|
||||
//! pub fn new(msg: String) -> Self {
|
||||
//! Self { msg }
|
||||
//! }
|
||||
//!
|
||||
//! // Sets an alarm [input port].
|
||||
//! pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
|
||||
//! if scheduler.schedule_at(setting, Self::ring, ()).is_err() {
|
||||
//! println!("The alarm clock can only be set for a future time");
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! // Rings the alarm [private input port].
|
||||
//! fn ring(&mut self) {
|
||||
//! println!("{}", self.msg);
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl Model for AlarmClock {}
|
||||
//! ```
|
||||
|
||||
mod monotonic_time;
|
||||
mod scheduler;
|
||||
|
||||
pub(crate) use monotonic_time::TearableAtomicTime;
|
||||
pub use monotonic_time::{MonotonicTime, SystemTimeError};
|
||||
pub(crate) use scheduler::{cancel_scheduled, schedule_event_at_unchecked, SchedulerQueue};
|
||||
pub use scheduler::{CancellationError, ScheduledTimeError, Scheduler, SchedulerKey};
|
665
asynchronix/src/time/monotonic_time.rs
Normal file
665
asynchronix/src/time/monotonic_time.rs
Normal file
@ -0,0 +1,665 @@
|
||||
//! Monotonic simulation time.
|
||||
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::ops::{Add, AddAssign, Sub, SubAssign};
|
||||
use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use crate::util::sync_cell::TearableAtomic;
|
||||
|
||||
const NANOS_PER_SEC: u32 = 1_000_000_000;
|
||||
|
||||
/// A nanosecond-precision monotonic clock timestamp.
|
||||
///
|
||||
/// A timestamp specifies a [TAI] point in time. It is represented as a 64-bit
|
||||
/// signed number of seconds and a positive number of nanoseconds, counted with
|
||||
/// reference to 1970-01-01 00:00:00 TAI. This timestamp format has a number of
|
||||
/// desirable properties:
|
||||
///
|
||||
/// - it enables cheap inter-operation with the standard [`Duration`] type which
|
||||
/// uses a very similar internal representation,
|
||||
/// - it constitutes a strict 96-bit superset of 80-bit PTP IEEE-1588
|
||||
/// timestamps, with the same epoch,
|
||||
/// - if required, exact conversion to a Unix timestamp is trivial and only
|
||||
/// requires subtracting from this timestamp the number of leap seconds
|
||||
/// between TAI and UTC time (see also the
|
||||
/// [`as_unix_secs`](MonotonicTime::as_unix_secs) method).
|
||||
///
|
||||
/// Although no date-time conversion methods are provided, conversion from
|
||||
/// timestamp to TAI date-time representations and back can be easily performed
|
||||
/// using `NaiveDateTime` from the [chrono] crate or `OffsetDateTime` from the
|
||||
/// [time] crate, treating the timestamp as a regular (UTC) Unix timestamp.
|
||||
///
|
||||
/// [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
|
||||
/// [chrono]: https://crates.io/crates/chrono
|
||||
/// [time]: https://crates.io/crates/time
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// // Set the timestamp to 2009-02-13 23:31:30.987654321 TAI.
|
||||
/// let mut timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
||||
///
|
||||
/// // Increment the timestamp by 123.456s.
|
||||
/// timestamp += Duration::new(123, 456_000_000);
|
||||
///
|
||||
/// assert_eq!(timestamp, MonotonicTime::new(1_234_568_014, 443_654_321));
|
||||
/// assert_eq!(timestamp.as_secs(), 1_234_568_014);
|
||||
/// assert_eq!(timestamp.subsec_nanos(), 443_654_321);
|
||||
/// ```
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct MonotonicTime {
|
||||
/// The number of whole seconds in the future (if positive) or in the past
|
||||
/// (if negative) of 1970-01-01 00:00:00 TAI.
|
||||
///
|
||||
/// Note that the automatic derivation of `PartialOrd` relies on
|
||||
/// lexicographical comparison so the `secs` field must appear before
|
||||
/// `nanos` in declaration order to be given higher priority.
|
||||
secs: i64,
|
||||
/// The sub-second number of nanoseconds in the future of the point in time
|
||||
/// defined by `secs`.
|
||||
nanos: u32,
|
||||
}
|
||||
|
||||
impl MonotonicTime {
|
||||
/// The epoch used by `MonotonicTime`, equal to 1970-01-01 00:00:00 TAI.
|
||||
///
|
||||
/// This epoch coincides with the PTP epoch defined in the IEEE-1588
|
||||
/// standard.
|
||||
pub const EPOCH: Self = Self { secs: 0, nanos: 0 };
|
||||
|
||||
/// The minimum possible `MonotonicTime` timestamp.
|
||||
pub const MIN: Self = Self {
|
||||
secs: i64::MIN,
|
||||
nanos: 0,
|
||||
};
|
||||
|
||||
/// The maximum possible `MonotonicTime` timestamp.
|
||||
pub const MAX: Self = Self {
|
||||
secs: i64::MAX,
|
||||
nanos: NANOS_PER_SEC - 1,
|
||||
};
|
||||
|
||||
/// Creates a timestamp directly from timestamp parts.
|
||||
///
|
||||
/// The number of seconds is relative to the [`EPOCH`](MonotonicTime::EPOCH)
|
||||
/// (1970-01-01 00:00:00 TAI). It is negative for dates in the past of the
|
||||
/// epoch.
|
||||
///
|
||||
/// The number of nanoseconds is always positive and always points towards
|
||||
/// the future.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This constructor will panic if the number of nanoseconds is greater than
|
||||
/// or equal to 1 second.
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// // A timestamp set to 2009-02-13 23:31:30.987654321 TAI.
|
||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
||||
///
|
||||
/// // A timestamp set 3.5s before the epoch.
|
||||
/// let timestamp = MonotonicTime::new(-4, 500_000_000);
|
||||
/// assert_eq!(timestamp, MonotonicTime::EPOCH - Duration::new(3, 500_000_000));
|
||||
/// ```
|
||||
pub const fn new(secs: i64, subsec_nanos: u32) -> Self {
|
||||
assert!(
|
||||
subsec_nanos < NANOS_PER_SEC,
|
||||
"invalid number of nanoseconds"
|
||||
);
|
||||
|
||||
Self {
|
||||
secs,
|
||||
nanos: subsec_nanos,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates a timestamp from the current system time.
|
||||
///
|
||||
/// The argument is the current difference between TAI and UTC time in
|
||||
/// seconds (a.k.a. leap seconds). For reference, this offset has been +37s
|
||||
/// since 2017-01-01, a value which is to remain valid until at least
|
||||
/// 2023-06-30. See the IETF's [leap second
|
||||
/// data](https://www.ietf.org/timezones/data/leap-seconds.list) for current
|
||||
/// and historical values.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// This method will return an error if the reported system time is in the
|
||||
/// past of the Unix epoch or if the offset-adjusted timestamp is outside
|
||||
/// the representable range.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// // Compute the current TAI time assuming that the current difference
|
||||
/// // between TAI and UTC time is 37s.
|
||||
/// let timestamp = MonotonicTime::from_system(37).unwrap();
|
||||
/// ```
|
||||
pub fn from_system(leap_secs: i64) -> Result<Self, SystemTimeError> {
|
||||
let utc_timestamp = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.map_err(|_| SystemTimeError::InvalidSystemTime)?;
|
||||
|
||||
Self::new(leap_secs, 0)
|
||||
.checked_add(utc_timestamp)
|
||||
.ok_or(SystemTimeError::OutOfRange)
|
||||
}
|
||||
|
||||
/// Returns the number of whole seconds relative to
|
||||
/// [`EPOCH`](MonotonicTime::EPOCH) (1970-01-01 00:00:00 TAI).
|
||||
///
|
||||
/// Consistently with the interpretation of seconds and nanoseconds in the
|
||||
/// [`new`][Self::new] constructor, seconds are always rounded towards `-∞`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
||||
/// assert_eq!(timestamp.as_secs(), 1_234_567_890);
|
||||
///
|
||||
/// let timestamp = MonotonicTime::EPOCH - Duration::new(3, 500_000_000);
|
||||
/// assert_eq!(timestamp.as_secs(), -4);
|
||||
/// ```
|
||||
pub const fn as_secs(&self) -> i64 {
|
||||
self.secs
|
||||
}
|
||||
|
||||
/// Returns the number of seconds of the corresponding Unix time.
|
||||
///
|
||||
/// The argument is the difference between TAI and UTC time in seconds
|
||||
/// (a.k.a. leap seconds) applicable at the date represented by the
|
||||
/// timestamp. See the IETF's [leap second
|
||||
/// data](https://www.ietf.org/timezones/data/leap-seconds.list) for current
|
||||
/// and historical values.
|
||||
///
|
||||
/// This method merely subtracts the offset from the value returned by
|
||||
/// [`as_secs`](Self::as_secs) and checks for potential overflow; its main
|
||||
/// purpose is to prevent mistakes regarding the direction in which the
|
||||
/// offset should be applied.
|
||||
///
|
||||
/// Note that the nanosecond part of a Unix timestamp can be simply
|
||||
/// retrieved with [`subsec_nanos`][Self::subsec_nanos] since UTC and TAI
|
||||
/// differ by a whole number of seconds.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This will panic if the offset-adjusted timestamp cannot be represented
|
||||
/// as an `i64`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// // Set the date to 2000-01-01 00:00:00 TAI.
|
||||
/// let timestamp = MonotonicTime::new(946_684_800, 0);
|
||||
///
|
||||
/// // Convert to a Unix timestamp, accounting for the +32s difference between
|
||||
/// // TAI and UTC on 2000-01-01.
|
||||
/// let unix_secs = timestamp.as_unix_secs(32);
|
||||
/// ```
|
||||
pub const fn as_unix_secs(&self, leap_secs: i64) -> i64 {
|
||||
if let Some(secs) = self.secs.checked_sub(leap_secs) {
|
||||
secs
|
||||
} else {
|
||||
panic!("timestamp outside representable range");
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the sub-second fractional part in nanoseconds.
|
||||
///
|
||||
/// Note that nanoseconds always point towards the future even if the date
|
||||
/// is in the past of the [`EPOCH`](MonotonicTime::EPOCH).
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
||||
/// assert_eq!(timestamp.subsec_nanos(), 987_654_321);
|
||||
/// ```
|
||||
pub const fn subsec_nanos(&self) -> u32 {
|
||||
self.nanos
|
||||
}
|
||||
|
||||
/// Adds a duration to a timestamp, checking for overflow.
|
||||
///
|
||||
/// Returns `None` if overflow occurred.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
||||
/// assert!(timestamp.checked_add(Duration::new(10, 123_456_789)).is_some());
|
||||
/// assert!(timestamp.checked_add(Duration::MAX).is_none());
|
||||
/// ```
|
||||
pub const fn checked_add(self, rhs: Duration) -> Option<Self> {
|
||||
// A durations in seconds greater than `i64::MAX` is actually fine as
|
||||
// long as the number of seconds does not effectively overflow which is
|
||||
// why the below does not use `checked_add`. So technically the below
|
||||
// addition may wrap around on the negative side due to the
|
||||
// unsigned-to-signed cast of the duration, but this does not
|
||||
// necessarily indicate an actual overflow. Actual overflow can be ruled
|
||||
// out by verifying that the new timestamp is in the future of the old
|
||||
// timestamp.
|
||||
let mut secs = self.secs.wrapping_add(rhs.as_secs() as i64);
|
||||
|
||||
// Check for overflow.
|
||||
if secs < self.secs {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut nanos = self.nanos + rhs.subsec_nanos();
|
||||
if nanos >= NANOS_PER_SEC {
|
||||
secs = if let Some(s) = secs.checked_add(1) {
|
||||
s
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
nanos -= NANOS_PER_SEC;
|
||||
}
|
||||
|
||||
Some(Self { secs, nanos })
|
||||
}
|
||||
|
||||
/// Subtracts a duration from a timestamp, checking for overflow.
|
||||
///
|
||||
/// Returns `None` if overflow occurred.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
||||
/// assert!(timestamp.checked_sub(Duration::new(10, 123_456_789)).is_some());
|
||||
/// assert!(timestamp.checked_sub(Duration::MAX).is_none());
|
||||
/// ```
|
||||
pub const fn checked_sub(self, rhs: Duration) -> Option<Self> {
|
||||
// A durations in seconds greater than `i64::MAX` is actually fine as
|
||||
// long as the number of seconds does not effectively overflow, which is
|
||||
// why the below does not use `checked_sub`. So technically the below
|
||||
// subtraction may wrap around on the positive side due to the
|
||||
// unsigned-to-signed cast of the duration, but this does not
|
||||
// necessarily indicate an actual overflow. Actual overflow can be ruled
|
||||
// out by verifying that the new timestamp is in the past of the old
|
||||
// timestamp.
|
||||
let mut secs = self.secs.wrapping_sub(rhs.as_secs() as i64);
|
||||
|
||||
// Check for overflow.
|
||||
if secs > self.secs {
|
||||
return None;
|
||||
}
|
||||
|
||||
let nanos = if self.nanos < rhs.subsec_nanos() {
|
||||
secs = if let Some(s) = secs.checked_sub(1) {
|
||||
s
|
||||
} else {
|
||||
return None;
|
||||
};
|
||||
|
||||
(self.nanos + NANOS_PER_SEC) - rhs.subsec_nanos()
|
||||
} else {
|
||||
self.nanos - rhs.subsec_nanos()
|
||||
};
|
||||
|
||||
Some(Self { secs, nanos })
|
||||
}
|
||||
|
||||
/// Subtracts a timestamp from another timestamp.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// Panics if the argument lies in the future of `self`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321);
|
||||
/// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789);
|
||||
/// assert_eq!(
|
||||
/// timestamp_later.duration_since(timestamp_earlier),
|
||||
/// Duration::new(20, 135_802_468)
|
||||
/// );
|
||||
/// ```
|
||||
pub fn duration_since(self, earlier: Self) -> Duration {
|
||||
self.checked_duration_since(earlier)
|
||||
.expect("attempt to substract a timestamp from an earlier timestamp")
|
||||
}
|
||||
|
||||
/// Computes the duration elapsed between a timestamp and an earlier
|
||||
/// timestamp, checking that the timestamps are appropriately ordered.
|
||||
///
|
||||
/// Returns `None` if the argument lies in the future of `self`.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use asynchronix::time::MonotonicTime;
|
||||
///
|
||||
/// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321);
|
||||
/// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789);
|
||||
/// assert!(timestamp_later.checked_duration_since(timestamp_earlier).is_some());
|
||||
/// assert!(timestamp_earlier.checked_duration_since(timestamp_later).is_none());
|
||||
/// ```
|
||||
pub const fn checked_duration_since(self, earlier: Self) -> Option<Duration> {
|
||||
// If the subtraction of the nanosecond fractions would overflow, carry
|
||||
// over one second to the nanoseconds.
|
||||
let (secs, nanos) = if earlier.nanos > self.nanos {
|
||||
if let Some(s) = self.secs.checked_sub(1) {
|
||||
(s, self.nanos + NANOS_PER_SEC)
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
} else {
|
||||
(self.secs, self.nanos)
|
||||
};
|
||||
|
||||
// Make sure the computation of the duration will not overflow the
|
||||
// seconds.
|
||||
if secs < earlier.secs {
|
||||
return None;
|
||||
}
|
||||
|
||||
// This subtraction may wrap around if the difference between the two
|
||||
// timestamps is more than `i64::MAX`, but even if it does the result
|
||||
// will be correct once cast to an unsigned integer.
|
||||
let delta_secs = secs.wrapping_sub(earlier.secs) as u64;
|
||||
|
||||
// The below subtraction is guaranteed to never overflow.
|
||||
let delta_nanos = nanos - earlier.nanos;
|
||||
|
||||
Some(Duration::new(delta_secs, delta_nanos))
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<Duration> for MonotonicTime {
|
||||
type Output = Self;
|
||||
|
||||
/// Adds a duration to a timestamp.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if the resulting timestamp cannot be
|
||||
/// represented. See [`MonotonicTime::checked_add`] for a panic-free
|
||||
/// version.
|
||||
fn add(self, other: Duration) -> Self {
|
||||
self.checked_add(other)
|
||||
.expect("overflow when adding duration to timestamp")
|
||||
}
|
||||
}
|
||||
|
||||
impl Sub<Duration> for MonotonicTime {
|
||||
type Output = Self;
|
||||
|
||||
/// Subtracts a duration from a timestamp.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if the resulting timestamp cannot be
|
||||
/// represented. See [`MonotonicTime::checked_sub`] for a panic-free
|
||||
/// version.
|
||||
fn sub(self, other: Duration) -> Self {
|
||||
self.checked_sub(other)
|
||||
.expect("overflow when subtracting duration from timestamp")
|
||||
}
|
||||
}
|
||||
|
||||
impl AddAssign<Duration> for MonotonicTime {
|
||||
/// Increments the timestamp by a duration.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if the resulting timestamp cannot be represented.
|
||||
fn add_assign(&mut self, other: Duration) {
|
||||
*self = *self + other;
|
||||
}
|
||||
}
|
||||
|
||||
impl SubAssign<Duration> for MonotonicTime {
|
||||
/// Decrements the timestamp by a duration.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// This function panics if the resulting timestamp cannot be represented.
|
||||
fn sub_assign(&mut self, other: Duration) {
|
||||
*self = *self - other;
|
||||
}
|
||||
}
|
||||
|
||||
/// An error that may be returned when initializing a [`MonotonicTime`] from
|
||||
/// system time.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub enum SystemTimeError {
|
||||
/// The system time is in the past of the Unix epoch.
|
||||
InvalidSystemTime,
|
||||
/// The system time cannot be represented as a `MonotonicTime`.
|
||||
OutOfRange,
|
||||
}
|
||||
|
||||
impl fmt::Display for SystemTimeError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::InvalidSystemTime => write!(fmt, "invalid system time"),
|
||||
Self::OutOfRange => write!(fmt, "timestamp outside representable range"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for SystemTimeError {}
|
||||
|
||||
/// A tearable atomic adapter over a `MonotonicTime`.
|
||||
///
|
||||
/// This makes it possible to store the simulation time in a `SyncCell`, an
|
||||
/// efficient, seqlock-based alternative to `RwLock`.
|
||||
pub(crate) struct TearableAtomicTime {
|
||||
secs: AtomicI64,
|
||||
nanos: AtomicU32,
|
||||
}
|
||||
|
||||
impl TearableAtomicTime {
|
||||
pub(crate) fn new(time: MonotonicTime) -> Self {
|
||||
Self {
|
||||
secs: AtomicI64::new(time.secs),
|
||||
nanos: AtomicU32::new(time.nanos),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TearableAtomic for TearableAtomicTime {
|
||||
type Value = MonotonicTime;
|
||||
|
||||
fn tearable_load(&self) -> MonotonicTime {
|
||||
// Load each field separately. This can never create invalid values of a
|
||||
// `MonotonicTime`, even if the load is torn.
|
||||
MonotonicTime {
|
||||
secs: self.secs.load(Ordering::Relaxed),
|
||||
nanos: self.nanos.load(Ordering::Relaxed),
|
||||
}
|
||||
}
|
||||
|
||||
fn tearable_store(&self, value: MonotonicTime) {
|
||||
// Write each field separately. This can never create invalid values of
|
||||
// a `MonotonicTime`, even if the store is torn.
|
||||
self.secs.store(value.secs, Ordering::Relaxed);
|
||||
self.nanos.store(value.nanos, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, not(asynchronix_loom)))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn time_equality() {
|
||||
let t0 = MonotonicTime::new(123, 123_456_789);
|
||||
let t1 = MonotonicTime::new(123, 123_456_789);
|
||||
let t2 = MonotonicTime::new(123, 123_456_790);
|
||||
let t3 = MonotonicTime::new(124, 123_456_789);
|
||||
|
||||
assert_eq!(t0, t1);
|
||||
assert_ne!(t0, t2);
|
||||
assert_ne!(t0, t3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_ordering() {
|
||||
let t0 = MonotonicTime::new(0, 1);
|
||||
let t1 = MonotonicTime::new(1, 0);
|
||||
|
||||
assert!(t1 > t0);
|
||||
}
|
||||
|
||||
#[cfg(not(miri))]
|
||||
#[test]
|
||||
fn time_from_system_smoke() {
|
||||
const START_OF_2022: i64 = 1640995200;
|
||||
const START_OF_2050: i64 = 2524608000;
|
||||
|
||||
let now_secs = MonotonicTime::from_system(0).unwrap().as_secs();
|
||||
|
||||
assert!(now_secs > START_OF_2022);
|
||||
assert!(now_secs < START_OF_2050);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn time_invalid() {
|
||||
MonotonicTime::new(123, 1_000_000_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_duration_since_smoke() {
|
||||
let t0 = MonotonicTime::new(100, 100_000_000);
|
||||
let t1 = MonotonicTime::new(123, 223_456_789);
|
||||
|
||||
assert_eq!(
|
||||
t1.checked_duration_since(t0),
|
||||
Some(Duration::new(23, 123_456_789))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_duration_with_carry() {
|
||||
let t0 = MonotonicTime::new(100, 200_000_000);
|
||||
let t1 = MonotonicTime::new(101, 100_000_000);
|
||||
|
||||
assert_eq!(
|
||||
t1.checked_duration_since(t0),
|
||||
Some(Duration::new(0, 900_000_000))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_duration_since_extreme() {
|
||||
const MIN_TIME: MonotonicTime = MonotonicTime::new(i64::MIN, 0);
|
||||
const MAX_TIME: MonotonicTime = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1);
|
||||
|
||||
assert_eq!(
|
||||
MAX_TIME.checked_duration_since(MIN_TIME),
|
||||
Some(Duration::new(u64::MAX, NANOS_PER_SEC - 1))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_duration_since_invalid() {
|
||||
let t0 = MonotonicTime::new(100, 0);
|
||||
let t1 = MonotonicTime::new(99, 0);
|
||||
|
||||
assert_eq!(t1.checked_duration_since(t0), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_add_duration_smoke() {
|
||||
let t = MonotonicTime::new(-100, 100_000_000);
|
||||
let dt = Duration::new(400, 300_000_000);
|
||||
|
||||
assert_eq!(t + dt, MonotonicTime::new(300, 400_000_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_add_duration_with_carry() {
|
||||
let t = MonotonicTime::new(-100, 900_000_000);
|
||||
let dt1 = Duration::new(400, 100_000_000);
|
||||
let dt2 = Duration::new(400, 300_000_000);
|
||||
|
||||
assert_eq!(t + dt1, MonotonicTime::new(301, 0));
|
||||
assert_eq!(t + dt2, MonotonicTime::new(301, 200_000_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_add_duration_extreme() {
|
||||
let t = MonotonicTime::new(i64::MIN, 0);
|
||||
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
|
||||
|
||||
assert_eq!(t + dt, MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn time_add_duration_overflow() {
|
||||
let t = MonotonicTime::new(i64::MIN, 1);
|
||||
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
|
||||
|
||||
let _ = t + dt;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_sub_duration_smoke() {
|
||||
let t = MonotonicTime::new(100, 500_000_000);
|
||||
let dt = Duration::new(400, 300_000_000);
|
||||
|
||||
assert_eq!(t - dt, MonotonicTime::new(-300, 200_000_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_sub_duration_with_carry() {
|
||||
let t = MonotonicTime::new(100, 100_000_000);
|
||||
let dt1 = Duration::new(400, 100_000_000);
|
||||
let dt2 = Duration::new(400, 300_000_000);
|
||||
|
||||
assert_eq!(t - dt1, MonotonicTime::new(-300, 0));
|
||||
assert_eq!(t - dt2, MonotonicTime::new(-301, 800_000_000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn time_sub_duration_extreme() {
|
||||
let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1);
|
||||
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
|
||||
|
||||
assert_eq!(t - dt, MonotonicTime::new(i64::MIN, 0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[should_panic]
|
||||
fn time_sub_duration_overflow() {
|
||||
let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 2);
|
||||
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
|
||||
|
||||
let _ = t - dt;
|
||||
}
|
||||
}
|
346
asynchronix/src/time/scheduler.rs
Normal file
346
asynchronix/src/time/scheduler.rs
Normal file
@ -0,0 +1,346 @@
|
||||
//! Scheduling functions and types.
|
||||
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::future::Future;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Duration;
|
||||
|
||||
use recycle_box::{coerce_box, RecycleBox};
|
||||
|
||||
use crate::channel::{ChannelId, Sender};
|
||||
use crate::model::{InputFn, Model};
|
||||
use crate::time::{MonotonicTime, TearableAtomicTime};
|
||||
use crate::util::priority_queue::{self, PriorityQueue};
|
||||
use crate::util::sync_cell::SyncCellReader;
|
||||
|
||||
/// Shorthand for the scheduler queue type.
|
||||
pub(crate) type SchedulerQueue =
|
||||
PriorityQueue<(MonotonicTime, ChannelId), Box<dyn Future<Output = ()> + Send>>;
|
||||
|
||||
/// A local scheduler for models.
|
||||
///
|
||||
/// A `Scheduler` is a handle to the global scheduler associated to a model
|
||||
/// instance. It can be used by the model to retrieve the simulation time, to
|
||||
/// schedule delayed actions on itself or to cancel such actions.
|
||||
///
|
||||
/// ### Caveat: self-scheduling `async` methods
|
||||
///
|
||||
/// Due to a current rustc issue, `async` methods that schedule themselves will
|
||||
/// not compile unless an explicit `Send` bound is added to the returned future.
|
||||
/// This can be done by replacing the `async` signature with a partially
|
||||
/// desugared signature such as:
|
||||
///
|
||||
/// ```ignore
|
||||
/// fn self_scheduling_method<'a>(
|
||||
/// &'a mut self,
|
||||
/// arg: MyEventType,
|
||||
/// scheduler: &'a Scheduler<Self>
|
||||
/// ) -> impl Future<Output=()> + Send + 'a {
|
||||
/// async move {
|
||||
/// /* implementation */
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
///
|
||||
/// Self-scheduling methods which are not `async` are not affected by this
|
||||
/// issue.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// A model that sends a greeting after some delay.
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use asynchronix::model::{Model, Output}; use asynchronix::time::Scheduler;
|
||||
///
|
||||
/// #[derive(Default)]
|
||||
/// pub struct DelayedGreeter {
|
||||
/// msg_out: Output<String>
|
||||
/// }
|
||||
/// impl DelayedGreeter {
|
||||
/// // Triggers a greeting on the output port after some delay [input port].
|
||||
/// pub async fn greet_with_delay(&mut self, delay: Duration, scheduler: &Scheduler<Self>) {
|
||||
/// let time = scheduler.time();
|
||||
/// let greeting = format!("Hello, this message was scheduled at:
|
||||
/// {:?}.", time);
|
||||
///
|
||||
/// if let Err(err) = scheduler.schedule_in(delay, Self::send_msg, greeting) {
|
||||
/// // ^^^^^^^^ scheduled method
|
||||
/// // The duration was zero, so greet right away.
|
||||
/// let greeting = err.0;
|
||||
/// self.msg_out.send(greeting).await;
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// // Sends a message to the output [private input port].
|
||||
/// async fn send_msg(&mut self, msg: String) {
|
||||
/// self.msg_out.send(msg).await;
|
||||
/// }
|
||||
/// }
|
||||
/// impl Model for DelayedGreeter {}
|
||||
/// ```
|
||||
|
||||
// The self-scheduling caveat seems related to this issue:
|
||||
// https://github.com/rust-lang/rust/issues/78649
|
||||
pub struct Scheduler<M: Model> {
|
||||
sender: Sender<M>,
|
||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||
time: SyncCellReader<TearableAtomicTime>,
|
||||
}
|
||||
|
||||
impl<M: Model> Scheduler<M> {
|
||||
/// Creates a new local scheduler.
|
||||
pub(crate) fn new(
|
||||
sender: Sender<M>,
|
||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||
time: SyncCellReader<TearableAtomicTime>,
|
||||
) -> Self {
|
||||
Self {
|
||||
sender,
|
||||
scheduler_queue,
|
||||
time,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the current simulation time.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use asynchronix::model::Model;
|
||||
/// use asynchronix::time::{MonotonicTime, Scheduler};
|
||||
///
|
||||
/// fn is_third_millenium<M: Model>(scheduler: &Scheduler<M>) -> bool {
|
||||
/// let time = scheduler.time();
|
||||
///
|
||||
/// time >= MonotonicTime::new(978307200, 0) && time < MonotonicTime::new(32535216000, 0)
|
||||
/// }
|
||||
/// ```
|
||||
pub fn time(&self) -> MonotonicTime {
|
||||
self.time.try_read().expect("internal simulation error: could not perform a synchronized read of the simulation time")
|
||||
}
|
||||
|
||||
/// Schedules an event at the lapse of the specified duration.
|
||||
///
|
||||
/// An error is returned if the specified duration is null.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::time::Duration;
|
||||
/// use std::future::Future;
|
||||
/// use asynchronix::model::Model;
|
||||
/// use asynchronix::time::Scheduler;
|
||||
///
|
||||
/// // A model that logs the value of a counter every second after being
|
||||
/// // triggered the first time.
|
||||
/// pub struct PeriodicLogger {}
|
||||
///
|
||||
/// impl PeriodicLogger {
|
||||
/// // Triggers the logging of a timestamp every second [input port].
|
||||
/// pub fn trigger(&mut self, counter: u64, scheduler: &Scheduler<Self>) {
|
||||
/// println!("counter: {}", counter);
|
||||
///
|
||||
/// // Schedule this method again in 1s with an incremented counter.
|
||||
/// scheduler
|
||||
/// .schedule_in(Duration::from_secs(1), Self::trigger, counter + 1)
|
||||
/// .unwrap();
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// impl Model for PeriodicLogger {}
|
||||
/// ```
|
||||
pub fn schedule_in<F, T, S>(
|
||||
&self,
|
||||
duration: Duration,
|
||||
func: F,
|
||||
arg: T,
|
||||
) -> Result<SchedulerKey, ScheduledTimeError<T>>
|
||||
where
|
||||
F: for<'a> InputFn<'a, M, T, S>,
|
||||
T: Send + Clone + 'static,
|
||||
{
|
||||
if duration.is_zero() {
|
||||
return Err(ScheduledTimeError(arg));
|
||||
}
|
||||
let time = self.time() + duration;
|
||||
let sender = self.sender.clone();
|
||||
let schedule_key =
|
||||
schedule_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue);
|
||||
|
||||
Ok(schedule_key)
|
||||
}
|
||||
|
||||
/// Schedules an event at a future time.
|
||||
///
|
||||
/// An error is returned if the specified time is not in the future of the
|
||||
/// current simulation time.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use asynchronix::model::Model;
|
||||
/// use asynchronix::time::{MonotonicTime, Scheduler};
|
||||
///
|
||||
/// // An alarm clock model.
|
||||
/// pub struct AlarmClock {
|
||||
/// msg: String
|
||||
/// }
|
||||
///
|
||||
/// impl AlarmClock {
|
||||
/// // Creates a new alarm clock.
|
||||
/// pub fn new(msg: String) -> Self {
|
||||
/// Self { msg }
|
||||
/// }
|
||||
///
|
||||
/// // Sets an alarm [input port].
|
||||
/// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
|
||||
/// if scheduler.schedule_at(setting, Self::ring, ()).is_err() {
|
||||
/// println!("The alarm clock can only be set for a future time");
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// // Rings the alarm [private input port].
|
||||
/// fn ring(&mut self) {
|
||||
/// println!("{}", self.msg);
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// impl Model for AlarmClock {}
|
||||
/// ```
|
||||
pub fn schedule_at<F, T, S>(
|
||||
&self,
|
||||
time: MonotonicTime,
|
||||
func: F,
|
||||
arg: T,
|
||||
) -> Result<SchedulerKey, ScheduledTimeError<T>>
|
||||
where
|
||||
F: for<'a> InputFn<'a, M, T, S>,
|
||||
T: Send + Clone + 'static,
|
||||
{
|
||||
if self.time() >= time {
|
||||
return Err(ScheduledTimeError(arg));
|
||||
}
|
||||
let sender = self.sender.clone();
|
||||
let schedule_key =
|
||||
schedule_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue);
|
||||
|
||||
Ok(schedule_key)
|
||||
}
|
||||
|
||||
/// Cancels an event with a scheduled time in the future of the current
|
||||
/// simulation time.
|
||||
///
|
||||
/// If the corresponding event was already executed, or if it is scheduled
|
||||
/// for the current simulation time but was not yet executed, an error is
|
||||
/// returned.
|
||||
pub fn cancel(&self, scheduler_key: SchedulerKey) -> Result<(), CancellationError> {
|
||||
cancel_scheduled(scheduler_key, &self.scheduler_queue)
|
||||
}
|
||||
}
|
||||
|
||||
impl<M: Model> fmt::Debug for Scheduler<M> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
f.debug_struct("Scheduler").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
/// Unique identifier for a scheduled event.
|
||||
///
|
||||
/// A `SchedulerKey` can be used to cancel a future event.
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub struct SchedulerKey(priority_queue::InsertKey);
|
||||
|
||||
impl SchedulerKey {
|
||||
pub(crate) fn new(key: priority_queue::InsertKey) -> Self {
|
||||
Self(key)
|
||||
}
|
||||
}
|
||||
|
||||
/// Error returned when the scheduled time does not lie in the future of the
|
||||
/// current simulation time.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct ScheduledTimeError<T>(pub T);
|
||||
|
||||
impl<T> fmt::Display for ScheduledTimeError<T> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"the scheduled time should be in the future of the current simulation time"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: fmt::Debug> Error for ScheduledTimeError<T> {}
|
||||
|
||||
/// Error returned when the cancellation of a scheduler event is unsuccessful.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub struct CancellationError {}
|
||||
|
||||
impl fmt::Display for CancellationError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
fmt,
|
||||
"the scheduler key should belong to an event or command scheduled in the future of the current simulation time"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for CancellationError {}
|
||||
|
||||
/// Schedules an event at a future time.
|
||||
///
|
||||
/// This method does not check whether the specified time lies in the future
|
||||
/// of the current simulation time.
|
||||
pub(crate) fn schedule_event_at_unchecked<M, F, T, S>(
|
||||
time: MonotonicTime,
|
||||
func: F,
|
||||
arg: T,
|
||||
sender: Sender<M>,
|
||||
scheduler_queue: &Mutex<SchedulerQueue>,
|
||||
) -> SchedulerKey
|
||||
where
|
||||
M: Model,
|
||||
F: for<'a> InputFn<'a, M, T, S>,
|
||||
T: Send + Clone + 'static,
|
||||
{
|
||||
let channel_id = sender.channel_id();
|
||||
|
||||
let fut = async move {
|
||||
let _ = sender
|
||||
.send(
|
||||
move |model: &mut M,
|
||||
scheduler,
|
||||
recycle_box: RecycleBox<()>|
|
||||
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
|
||||
let fut = func.call(model, arg, scheduler);
|
||||
|
||||
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||
},
|
||||
)
|
||||
.await;
|
||||
};
|
||||
|
||||
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
||||
let insert_key = scheduler_queue.insert((time, channel_id), Box::new(fut));
|
||||
|
||||
SchedulerKey::new(insert_key)
|
||||
}
|
||||
|
||||
/// Cancels an event or command with a scheduled time in the future of the
|
||||
/// current simulation time.
|
||||
///
|
||||
/// If the corresponding event or command was already executed, or if it is
|
||||
/// scheduled for the current simulation time, an error is returned.
|
||||
pub(crate) fn cancel_scheduled(
|
||||
scheduler_key: SchedulerKey,
|
||||
scheduler_queue: &Mutex<SchedulerQueue>,
|
||||
) -> Result<(), CancellationError> {
|
||||
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
||||
if scheduler_queue.delete(scheduler_key.0) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(CancellationError {})
|
||||
}
|
7
asynchronix/src/util.rs
Normal file
7
asynchronix/src/util.rs
Normal file
@ -0,0 +1,7 @@
|
||||
pub(crate) mod bit;
|
||||
pub(crate) mod futures;
|
||||
pub(crate) mod priority_queue;
|
||||
pub(crate) mod rng;
|
||||
pub(crate) mod slot;
|
||||
pub(crate) mod spsc_queue;
|
||||
pub(crate) mod sync_cell;
|
@ -1,3 +1,7 @@
|
||||
//! Bit manipulation and algorithms.
|
||||
|
||||
#![allow(unused)]
|
||||
|
||||
/// Find the position of the `Nᵗʰ` set bit starting the search from the least
|
||||
/// significant bit.
|
||||
///
|
||||
@ -20,10 +24,10 @@
|
||||
/// Implementation notes: the implementation is based on a tree-of-adders
|
||||
/// algorithm followed by binary search, with overall theoretical complexity
|
||||
/// `O(log(usize::BITS))`. In release mode the function is optimized to fully
|
||||
/// branchless code with a pretty moderate cost of about 70 CPU cycles on x86-64
|
||||
/// and less than 60 instruction on aarch64, independently of the inputs. The
|
||||
/// use of the `popcnt` intrinsic was also investigated to compute sub-sums in
|
||||
/// the binary search but was found to be slower than the tree-of-adders.
|
||||
/// branchless code with a pretty moderate cost of about 70 instructions on
|
||||
/// x86-64 and less than 60 instruction on aarch64, independently of the inputs.
|
||||
/// The use of the `popcnt` intrinsic was also investigated to compute sub-sums
|
||||
/// in the binary search but was found to be slower than the tree-of-adders.
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
pub(crate) fn find_bit<F: FnOnce(usize) -> usize>(value: usize, rank_fn: F) -> usize {
|
||||
const P: usize = usize::BITS.trailing_zeros() as usize; // P = log2(usize::BITS)
|
||||
@ -121,13 +125,13 @@ const fn sum_masks() -> [usize; usize::BITS.trailing_zeros() as usize] {
|
||||
|
||||
#[cfg(all(test, not(asynchronix_loom), not(miri)))]
|
||||
mod tests {
|
||||
use super::super::rng;
|
||||
use super::*;
|
||||
use crate::util::rng;
|
||||
|
||||
// Fuzzing test.
|
||||
#[test]
|
||||
fn find_bit_fuzz() {
|
||||
const SAMPLES: usize = 100_000;
|
||||
const SAMPLES: usize = 10_000;
|
||||
|
||||
#[inline(always)]
|
||||
fn check(value: usize) {
|
53
asynchronix/src/util/futures.rs
Normal file
53
asynchronix/src/util/futures.rs
Normal file
@ -0,0 +1,53 @@
|
||||
//! Futures and future-related functions.
|
||||
|
||||
#![allow(unused)]
|
||||
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
/// An owned future which sequentially polls a collection of futures.
|
||||
///
|
||||
/// The outputs of the futures, if any, are ignored. For simplicity, the
|
||||
/// implementation assumes that the futures are `Unpin`. This constrained could
|
||||
/// be relaxed if necessary by using something else than a `Vec` to ensure that
|
||||
/// each future is pinned (a `Vec` is not suitable for pinning because it may
|
||||
/// move its items when dropped).
|
||||
pub(crate) struct SeqFuture<F> {
|
||||
inner: Vec<F>,
|
||||
idx: usize,
|
||||
}
|
||||
|
||||
impl<F> SeqFuture<F> {
|
||||
/// Creates a new, empty `SeqFuture`.
|
||||
pub(crate) fn new() -> Self {
|
||||
Self {
|
||||
inner: Vec::new(),
|
||||
idx: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Appends a future.
|
||||
pub(crate) fn push(&mut self, future: F) {
|
||||
self.inner.push(future);
|
||||
}
|
||||
}
|
||||
|
||||
impl<F: Future + Unpin> Future for SeqFuture<F> {
|
||||
type Output = ();
|
||||
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let this = &mut *self;
|
||||
|
||||
// The below will panic due to out of bound access when polling after
|
||||
// completion: this is intentional.
|
||||
while Pin::new(&mut this.inner[this.idx]).poll(cx).is_ready() {
|
||||
this.idx += 1;
|
||||
if this.idx == this.inner.len() {
|
||||
return Poll::Ready(());
|
||||
}
|
||||
}
|
||||
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
661
asynchronix/src/util/priority_queue.rs
Normal file
661
asynchronix/src/util/priority_queue.rs
Normal file
@ -0,0 +1,661 @@
|
||||
//! Associative priority queue.
|
||||
|
||||
#![allow(unused)]
|
||||
|
||||
use std::mem;
|
||||
|
||||
/// An associative container optimized for extraction of the value with the
|
||||
/// lowest key and deletion of arbitrary key-value pairs.
|
||||
///
|
||||
/// This implementation has the same theoretical complexity for insert and pull
|
||||
/// operations as a conventional array-based binary heap but does differ from
|
||||
/// the latter in some important aspects:
|
||||
///
|
||||
/// - elements can be deleted in *O*(log(*N*)) time rather than *O*(*N*) time
|
||||
/// using a unique index returned at insertion time.
|
||||
/// - same-key elements are guaranteed to be pulled in FIFO order,
|
||||
///
|
||||
/// Under the hood, the priority queue relies on a binary heap cross-indexed
|
||||
/// with values stored in a slab allocator. Each item of the binary heap
|
||||
/// contains an index pointing to the associated slab-allocated node, as well as
|
||||
/// the user-provided key. Each slab node contains the value associated to the
|
||||
/// key and a back-pointing index to the binary heap. The heap items also
|
||||
/// contain a unique epoch which allows same-key nodes to be sorted by insertion
|
||||
/// order. The epoch is used as well to build unique indices that enable
|
||||
/// efficient deletion of arbitrary key-value pairs.
|
||||
///
|
||||
/// The slab-based design is what makes *O*(log(*N*)) deletion possible, but it
|
||||
/// does come with some trade-offs:
|
||||
///
|
||||
/// - its memory footprint is higher because it needs 2 extra pointer-sized
|
||||
/// indices for each element to cross-index the heap and the slab,
|
||||
/// - its computational footprint is higher because of the extra cost associated
|
||||
/// with random slab access; that being said, array-based binary heaps are not
|
||||
/// extremely cache-friendly to start with so unless the slab becomes very
|
||||
/// fragmented, this is not expected to introduce more than a reasonable
|
||||
/// constant-factor penalty compared to a conventional binary heap.
|
||||
///
|
||||
/// The computational penalty is partially offset by the fact that the value
|
||||
/// never needs to be moved from the moment it is inserted until it is pulled.
|
||||
///
|
||||
/// Note that the `Copy` bound on they keys could be lifted but this would make
|
||||
/// the implementation slightly less efficient unless `unsafe` is used.
|
||||
pub(crate) struct PriorityQueue<K, V>
|
||||
where
|
||||
K: Copy + Clone + Ord,
|
||||
{
|
||||
heap: Vec<Item<K>>,
|
||||
slab: Vec<Node<V>>,
|
||||
first_free_node: Option<usize>,
|
||||
next_epoch: u64,
|
||||
}
|
||||
|
||||
impl<K: Copy + Ord, V> PriorityQueue<K, V> {
|
||||
/// Creates an empty `PriorityQueue`.
|
||||
pub(crate) fn new() -> Self {
|
||||
Self {
|
||||
heap: Vec::new(),
|
||||
slab: Vec::new(),
|
||||
first_free_node: None,
|
||||
next_epoch: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an empty `PriorityQueue` with at least the specified capacity.
|
||||
pub(crate) fn with_capacity(capacity: usize) -> Self {
|
||||
Self {
|
||||
heap: Vec::with_capacity(capacity),
|
||||
slab: Vec::with_capacity(capacity),
|
||||
first_free_node: None,
|
||||
next_epoch: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the number of key-value pairs in the priority queue.
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.heap.len()
|
||||
}
|
||||
|
||||
/// Inserts a new key-value pair and returns a unique insertion key.
|
||||
///
|
||||
/// This operation has *O*(log(*N*)) amortized worse-case theoretical
|
||||
/// complexity and *O*(1) amortized theoretical complexity for a
|
||||
/// sufficiently random heap.
|
||||
pub(crate) fn insert(&mut self, key: K, value: V) -> InsertKey {
|
||||
// Build a unique key from the user-provided key and a unique epoch.
|
||||
let epoch = self.next_epoch;
|
||||
assert_ne!(epoch, u64::MAX);
|
||||
self.next_epoch += 1;
|
||||
let unique_key = UniqueKey { key, epoch };
|
||||
|
||||
// Add a new node to the slab, either by re-using a free node or by
|
||||
// appending a new one.
|
||||
let slab_idx = match self.first_free_node {
|
||||
Some(idx) => {
|
||||
self.first_free_node = self.slab[idx].unwrap_next_free_node();
|
||||
|
||||
self.slab[idx] = Node::HeapNode(HeapNode {
|
||||
value,
|
||||
heap_idx: 0, // temporary value overridden in `sift_up`
|
||||
});
|
||||
|
||||
idx
|
||||
}
|
||||
None => {
|
||||
let idx = self.slab.len();
|
||||
self.slab.push(Node::HeapNode(HeapNode {
|
||||
value,
|
||||
heap_idx: 0, // temporary value overridden in `sift_up`
|
||||
}));
|
||||
|
||||
idx
|
||||
}
|
||||
};
|
||||
|
||||
// Add a new node at the bottom of the heap.
|
||||
let heap_idx = self.heap.len();
|
||||
self.heap.push(Item {
|
||||
key: unique_key, // temporary value overridden in `sift_up`
|
||||
slab_idx: 0, // temporary value overridden in `sift_up`
|
||||
});
|
||||
|
||||
// Sift up the new node.
|
||||
self.sift_up(
|
||||
Item {
|
||||
key: unique_key,
|
||||
slab_idx,
|
||||
},
|
||||
heap_idx,
|
||||
);
|
||||
|
||||
InsertKey { slab_idx, epoch }
|
||||
}
|
||||
|
||||
/// Pulls the value with the lowest key.
|
||||
///
|
||||
/// If there are several equal lowest keys, the value which was inserted
|
||||
/// first is returned.
|
||||
///
|
||||
/// This operation has *O*(log(N)) non-amortized theoretical complexity.
|
||||
pub(crate) fn pull(&mut self) -> Option<(K, V)> {
|
||||
let item = self.heap.first()?;
|
||||
let top_slab_idx = item.slab_idx;
|
||||
let key = item.key.key;
|
||||
|
||||
// Free the top node, extracting its value.
|
||||
let value = mem::replace(
|
||||
&mut self.slab[top_slab_idx],
|
||||
Node::FreeNode(FreeNode {
|
||||
next: self.first_free_node,
|
||||
}),
|
||||
)
|
||||
.unwrap_value();
|
||||
|
||||
self.first_free_node = Some(top_slab_idx);
|
||||
|
||||
// Sift the last node at the bottom of the heap from the top of the heap.
|
||||
let last_item = self.heap.pop().unwrap();
|
||||
if last_item.slab_idx != top_slab_idx {
|
||||
self.sift_down(last_item, 0);
|
||||
}
|
||||
|
||||
Some((key, value))
|
||||
}
|
||||
|
||||
/// Peeks a reference to the key-value pair with the lowest key, leaving it
|
||||
/// in the queue.
|
||||
///
|
||||
/// If there are several equal lowest keys, a reference to the key-value
|
||||
/// pair which was inserted first is returned.
|
||||
///
|
||||
/// This operation has *O*(1) non-amortized theoretical complexity.
|
||||
pub(crate) fn peek(&self) -> Option<(&K, &V)> {
|
||||
let item = self.heap.first()?;
|
||||
let top_slab_idx = item.slab_idx;
|
||||
let key = &item.key.key;
|
||||
let value = self.slab[top_slab_idx].unwrap_value_ref();
|
||||
|
||||
Some((key, value))
|
||||
}
|
||||
|
||||
/// Peeks a reference to the lowest key, leaving it in the queue.
|
||||
///
|
||||
/// If there are several equal lowest keys, a reference to the key which was
|
||||
/// inserted first is returned.
|
||||
///
|
||||
/// This operation has *O*(1) non-amortized theoretical complexity.
|
||||
pub(crate) fn peek_key(&self) -> Option<&K> {
|
||||
let item = self.heap.first()?;
|
||||
|
||||
Some(&item.key.key)
|
||||
}
|
||||
|
||||
/// Delete the key-value pair associated to the provided insertion key if it
|
||||
/// is still in the queue.
|
||||
///
|
||||
/// Using an insertion key returned from another `PriorityQueue` is a logic
|
||||
/// error and could result in the deletion of an arbitrary key-value pair.
|
||||
///
|
||||
/// This method returns `true` if the pair was indeed in the queue and
|
||||
/// `false` otherwise.
|
||||
///
|
||||
/// This operation has guaranteed *O*(log(*N*)) theoretical complexity.
|
||||
pub(crate) fn delete(&mut self, insert_key: InsertKey) -> bool {
|
||||
// Check that (i) there is a node at this index, (ii) this node is in
|
||||
// the heap and (iii) this node has the correct epoch.
|
||||
let slab_idx = insert_key.slab_idx;
|
||||
let heap_idx = if let Some(Node::HeapNode(node)) = self.slab.get(slab_idx) {
|
||||
let heap_idx = node.heap_idx;
|
||||
if self.heap[heap_idx].key.epoch != insert_key.epoch {
|
||||
return false;
|
||||
}
|
||||
heap_idx
|
||||
} else {
|
||||
return false;
|
||||
};
|
||||
|
||||
// If the last item of the heap is not the one to be deleted, sift it up
|
||||
// or down as appropriate starting from the vacant spot.
|
||||
let last_item = self.heap.pop().unwrap();
|
||||
if let Some(item) = self.heap.get(heap_idx) {
|
||||
if last_item.key < item.key {
|
||||
self.sift_up(last_item, heap_idx);
|
||||
} else {
|
||||
self.sift_down(last_item, heap_idx);
|
||||
}
|
||||
}
|
||||
|
||||
// Free the deleted node in the slab.
|
||||
self.slab[slab_idx] = Node::FreeNode(FreeNode {
|
||||
next: self.first_free_node,
|
||||
});
|
||||
self.first_free_node = Some(slab_idx);
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
/// Take a heap item and, starting at `heap_idx`, move it up the heap while
|
||||
/// a parent has a larger key.
|
||||
#[inline]
|
||||
fn sift_up(&mut self, item: Item<K>, heap_idx: usize) {
|
||||
let mut child_heap_idx = heap_idx;
|
||||
let key = &item.key;
|
||||
|
||||
while child_heap_idx != 0 {
|
||||
let parent_heap_idx = (child_heap_idx - 1) / 2;
|
||||
|
||||
// Stop when the key is larger or equal to the parent's.
|
||||
if key >= &self.heap[parent_heap_idx].key {
|
||||
break;
|
||||
}
|
||||
|
||||
// Move the parent down one level.
|
||||
self.heap[child_heap_idx] = self.heap[parent_heap_idx];
|
||||
let parent_slab_idx = self.heap[parent_heap_idx].slab_idx;
|
||||
*self.slab[parent_slab_idx].unwrap_heap_index_mut() = child_heap_idx;
|
||||
|
||||
// Stop when the key is larger or equal to the parent's.
|
||||
if key >= &self.heap[parent_heap_idx].key {
|
||||
break;
|
||||
}
|
||||
// Make the former parent the new child.
|
||||
child_heap_idx = parent_heap_idx;
|
||||
}
|
||||
|
||||
// Move the original item to the current child.
|
||||
self.heap[child_heap_idx] = item;
|
||||
*self.slab[item.slab_idx].unwrap_heap_index_mut() = child_heap_idx;
|
||||
}
|
||||
|
||||
/// Take a heap item and, starting at `heap_idx`, move it down the heap
|
||||
/// while a child has a smaller key.
|
||||
#[inline]
|
||||
fn sift_down(&mut self, item: Item<K>, heap_idx: usize) {
|
||||
let mut parent_heap_idx = heap_idx;
|
||||
let mut child_heap_idx = 2 * parent_heap_idx + 1;
|
||||
let key = &item.key;
|
||||
|
||||
while child_heap_idx < self.heap.len() {
|
||||
// If the sibling exists and has a smaller key, make it the
|
||||
// candidate for swapping.
|
||||
if let Some(other_child) = self.heap.get(child_heap_idx + 1) {
|
||||
child_heap_idx += (self.heap[child_heap_idx].key > other_child.key) as usize;
|
||||
}
|
||||
|
||||
// Stop when the key is smaller or equal to the child with the smallest key.
|
||||
if key <= &self.heap[child_heap_idx].key {
|
||||
break;
|
||||
}
|
||||
|
||||
// Move the child up one level.
|
||||
self.heap[parent_heap_idx] = self.heap[child_heap_idx];
|
||||
let child_slab_idx = self.heap[child_heap_idx].slab_idx;
|
||||
*self.slab[child_slab_idx].unwrap_heap_index_mut() = parent_heap_idx;
|
||||
|
||||
// Make the child the new parent.
|
||||
parent_heap_idx = child_heap_idx;
|
||||
child_heap_idx = 2 * parent_heap_idx + 1;
|
||||
}
|
||||
|
||||
// Move the original item to the current parent.
|
||||
self.heap[parent_heap_idx] = item;
|
||||
*self.slab[item.slab_idx].unwrap_heap_index_mut() = parent_heap_idx;
|
||||
}
|
||||
}
|
||||
|
||||
/// Data related to a single key-value pair stored in the heap.
|
||||
#[derive(Copy, Clone)]
|
||||
struct Item<K: Copy> {
|
||||
// A unique key by which the heap is sorted.
|
||||
key: UniqueKey<K>,
|
||||
// An index pointing to the corresponding node in the slab.
|
||||
slab_idx: usize,
|
||||
}
|
||||
|
||||
/// Data related to a single key-value pair stored in the slab.
|
||||
enum Node<V> {
|
||||
FreeNode(FreeNode),
|
||||
HeapNode(HeapNode<V>),
|
||||
}
|
||||
|
||||
impl<V> Node<V> {
|
||||
/// Unwraps the `FreeNode::next` field.
|
||||
fn unwrap_next_free_node(&self) -> Option<usize> {
|
||||
match self {
|
||||
Self::FreeNode(n) => n.next,
|
||||
_ => panic!("the node was expected to be a free node"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Unwraps the `HeapNode::value` field.
|
||||
fn unwrap_value(self) -> V {
|
||||
match self {
|
||||
Self::HeapNode(n) => n.value,
|
||||
_ => panic!("the node was expected to be a heap node"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Unwraps the `HeapNode::value` field.
|
||||
fn unwrap_value_ref(&self) -> &V {
|
||||
match self {
|
||||
Self::HeapNode(n) => &n.value,
|
||||
_ => panic!("the node was expected to be a heap node"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Unwraps a mutable reference to the `HeapNode::heap_idx` field.
|
||||
fn unwrap_heap_index_mut(&mut self) -> &mut usize {
|
||||
match self {
|
||||
Self::HeapNode(n) => &mut n.heap_idx,
|
||||
_ => panic!("the node was expected to be a heap node"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A node that is no longer in the binary heap.
|
||||
struct FreeNode {
|
||||
// An index pointing to the next free node, if any.
|
||||
next: Option<usize>,
|
||||
}
|
||||
|
||||
/// A node currently in the binary heap.
|
||||
struct HeapNode<V> {
|
||||
// The value associated to this node.
|
||||
value: V,
|
||||
// Index of the node in the heap.
|
||||
heap_idx: usize,
|
||||
}
|
||||
|
||||
/// A unique insertion key that can be used for key-value pair deletion.
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
||||
pub(crate) struct InsertKey {
|
||||
// An index pointing to a node in the slab.
|
||||
slab_idx: usize,
|
||||
// The epoch when the node was inserted.
|
||||
epoch: u64,
|
||||
}
|
||||
|
||||
/// A unique key made of the user-provided key complemented by a unique epoch.
|
||||
///
|
||||
/// Implementation note: `UniqueKey` automatically derives `PartialOrd`, which
|
||||
/// implies that lexicographic order between `key` and `epoch` must be preserved
|
||||
/// to make sure that `key` has a higher sorting priority than `epoch`.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||
struct UniqueKey<K: Copy + Clone> {
|
||||
/// The user-provided key.
|
||||
key: K,
|
||||
/// A unique epoch that indicates the insertion date.
|
||||
epoch: u64,
|
||||
}
|
||||
|
||||
#[cfg(all(test, not(asynchronix_loom)))]
|
||||
mod tests {
|
||||
use std::fmt::Debug;
|
||||
|
||||
use super::*;
|
||||
|
||||
enum Op<K, V> {
|
||||
Insert(K, V),
|
||||
InsertAndMark(K, V),
|
||||
Pull(Option<(K, V)>),
|
||||
DeleteMarked(bool),
|
||||
}
|
||||
|
||||
fn check<K: Copy + Clone + Ord + Debug, V: Eq + Debug>(
|
||||
operations: impl Iterator<Item = Op<K, V>>,
|
||||
) {
|
||||
let mut queue = PriorityQueue::new();
|
||||
let mut marked = None;
|
||||
|
||||
for op in operations {
|
||||
match op {
|
||||
Op::Insert(key, value) => {
|
||||
queue.insert(key, value);
|
||||
}
|
||||
Op::InsertAndMark(key, value) => {
|
||||
marked = Some(queue.insert(key, value));
|
||||
}
|
||||
Op::Pull(kv) => {
|
||||
assert_eq!(queue.pull(), kv);
|
||||
}
|
||||
Op::DeleteMarked(success) => {
|
||||
assert_eq!(
|
||||
queue.delete(marked.take().expect("no item was marked for deletion")),
|
||||
success
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn priority_queue_smoke() {
|
||||
let operations = [
|
||||
Op::Insert(5, 'a'),
|
||||
Op::Insert(2, 'b'),
|
||||
Op::Insert(3, 'c'),
|
||||
Op::Insert(4, 'd'),
|
||||
Op::Insert(9, 'e'),
|
||||
Op::Insert(1, 'f'),
|
||||
Op::Insert(8, 'g'),
|
||||
Op::Insert(0, 'h'),
|
||||
Op::Insert(7, 'i'),
|
||||
Op::Insert(6, 'j'),
|
||||
Op::Pull(Some((0, 'h'))),
|
||||
Op::Pull(Some((1, 'f'))),
|
||||
Op::Pull(Some((2, 'b'))),
|
||||
Op::Pull(Some((3, 'c'))),
|
||||
Op::Pull(Some((4, 'd'))),
|
||||
Op::Pull(Some((5, 'a'))),
|
||||
Op::Pull(Some((6, 'j'))),
|
||||
Op::Pull(Some((7, 'i'))),
|
||||
Op::Pull(Some((8, 'g'))),
|
||||
Op::Pull(Some((9, 'e'))),
|
||||
];
|
||||
|
||||
check(operations.into_iter());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn priority_queue_interleaved() {
|
||||
let operations = [
|
||||
Op::Insert(2, 'a'),
|
||||
Op::Insert(7, 'b'),
|
||||
Op::Insert(5, 'c'),
|
||||
Op::Pull(Some((2, 'a'))),
|
||||
Op::Insert(4, 'd'),
|
||||
Op::Pull(Some((4, 'd'))),
|
||||
Op::Insert(8, 'e'),
|
||||
Op::Insert(2, 'f'),
|
||||
Op::Pull(Some((2, 'f'))),
|
||||
Op::Pull(Some((5, 'c'))),
|
||||
Op::Pull(Some((7, 'b'))),
|
||||
Op::Insert(5, 'g'),
|
||||
Op::Insert(3, 'h'),
|
||||
Op::Pull(Some((3, 'h'))),
|
||||
Op::Pull(Some((5, 'g'))),
|
||||
Op::Pull(Some((8, 'e'))),
|
||||
Op::Pull(None),
|
||||
];
|
||||
|
||||
check(operations.into_iter());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn priority_queue_equal_keys() {
|
||||
let operations = [
|
||||
Op::Insert(4, 'a'),
|
||||
Op::Insert(1, 'b'),
|
||||
Op::Insert(3, 'c'),
|
||||
Op::Pull(Some((1, 'b'))),
|
||||
Op::Insert(4, 'd'),
|
||||
Op::Insert(8, 'e'),
|
||||
Op::Insert(3, 'f'),
|
||||
Op::Pull(Some((3, 'c'))),
|
||||
Op::Pull(Some((3, 'f'))),
|
||||
Op::Pull(Some((4, 'a'))),
|
||||
Op::Insert(8, 'g'),
|
||||
Op::Pull(Some((4, 'd'))),
|
||||
Op::Pull(Some((8, 'e'))),
|
||||
Op::Pull(Some((8, 'g'))),
|
||||
Op::Pull(None),
|
||||
];
|
||||
|
||||
check(operations.into_iter());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn priority_queue_delete_valid() {
|
||||
let operations = [
|
||||
Op::Insert(8, 'a'),
|
||||
Op::Insert(1, 'b'),
|
||||
Op::Insert(3, 'c'),
|
||||
Op::InsertAndMark(3, 'd'),
|
||||
Op::Insert(2, 'e'),
|
||||
Op::Pull(Some((1, 'b'))),
|
||||
Op::Insert(4, 'f'),
|
||||
Op::DeleteMarked(true),
|
||||
Op::Insert(5, 'g'),
|
||||
Op::Pull(Some((2, 'e'))),
|
||||
Op::Pull(Some((3, 'c'))),
|
||||
Op::Pull(Some((4, 'f'))),
|
||||
Op::Pull(Some((5, 'g'))),
|
||||
Op::Pull(Some((8, 'a'))),
|
||||
Op::Pull(None),
|
||||
];
|
||||
|
||||
check(operations.into_iter());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn priority_queue_delete_invalid() {
|
||||
let operations = [
|
||||
Op::Insert(0, 'a'),
|
||||
Op::Insert(7, 'b'),
|
||||
Op::InsertAndMark(2, 'c'),
|
||||
Op::Insert(4, 'd'),
|
||||
Op::Pull(Some((0, 'a'))),
|
||||
Op::Insert(2, 'e'),
|
||||
Op::Pull(Some((2, 'c'))),
|
||||
Op::Insert(4, 'f'),
|
||||
Op::DeleteMarked(false),
|
||||
Op::Pull(Some((2, 'e'))),
|
||||
Op::Pull(Some((4, 'd'))),
|
||||
Op::Pull(Some((4, 'f'))),
|
||||
Op::Pull(Some((7, 'b'))),
|
||||
Op::Pull(None),
|
||||
];
|
||||
|
||||
check(operations.into_iter());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn priority_queue_fuzz() {
|
||||
use std::cell::Cell;
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use crate::util::rng::Rng;
|
||||
|
||||
// Number of fuzzing operations.
|
||||
const ITER: usize = if cfg!(miri) { 1000 } else { 10_000_000 };
|
||||
|
||||
// Inclusive upper bound for randomly generated keys.
|
||||
const MAX_KEY: u64 = 99;
|
||||
|
||||
// Probabilistic weight of each of the 4 operations.
|
||||
//
|
||||
// The weight for pull values should probably stay close to the sum of
|
||||
// the two insertion weights to prevent queue size runaway.
|
||||
const INSERT_WEIGHT: u64 = 5;
|
||||
const INSERT_AND_MARK_WEIGHT: u64 = 1;
|
||||
const PULL_WEIGHT: u64 = INSERT_WEIGHT + INSERT_AND_MARK_WEIGHT;
|
||||
const DELETE_MARKED_WEIGHT: u64 = 1;
|
||||
|
||||
// Defines 4 basic operations on the priority queue, each of them being
|
||||
// performed on both the tested implementation and on a shadow queue
|
||||
// implemented with a `BTreeMap`. Any mismatch between the outcomes of
|
||||
// pull and delete operations between the two queues triggers a panic.
|
||||
let epoch: Cell<usize> = Cell::new(0);
|
||||
let marked: Cell<Option<InsertKey>> = Cell::new(None);
|
||||
let shadow_marked: Cell<Option<(u64, usize)>> = Cell::new(None);
|
||||
|
||||
let insert_fn = |queue: &mut PriorityQueue<u64, u64>,
|
||||
shadow_queue: &mut BTreeMap<(u64, usize), u64>,
|
||||
key,
|
||||
value| {
|
||||
queue.insert(key, value);
|
||||
shadow_queue.insert((key, epoch.get()), value);
|
||||
epoch.set(epoch.get() + 1);
|
||||
};
|
||||
|
||||
let insert_and_mark_fn = |queue: &mut PriorityQueue<u64, u64>,
|
||||
shadow_queue: &mut BTreeMap<(u64, usize), u64>,
|
||||
key,
|
||||
value| {
|
||||
marked.set(Some(queue.insert(key, value)));
|
||||
shadow_queue.insert((key, epoch.get()), value);
|
||||
shadow_marked.set(Some((key, epoch.get())));
|
||||
epoch.set(epoch.get() + 1);
|
||||
};
|
||||
|
||||
let pull_fn = |queue: &mut PriorityQueue<u64, u64>,
|
||||
shadow_queue: &mut BTreeMap<(u64, usize), u64>| {
|
||||
let value = queue.pull();
|
||||
let shadow_value = match shadow_queue.iter().next() {
|
||||
Some((&unique_key, &value)) => {
|
||||
shadow_queue.remove(&unique_key);
|
||||
Some((unique_key.0, value))
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
assert_eq!(value, shadow_value);
|
||||
};
|
||||
|
||||
let delete_marked_fn =
|
||||
|queue: &mut PriorityQueue<u64, u64>,
|
||||
shadow_queue: &mut BTreeMap<(u64, usize), u64>| {
|
||||
let success = match marked.take() {
|
||||
Some(delete_key) => Some(queue.delete(delete_key)),
|
||||
None => None,
|
||||
};
|
||||
let shadow_success = match shadow_marked.take() {
|
||||
Some(delete_key) => Some(shadow_queue.remove(&delete_key).is_some()),
|
||||
None => None,
|
||||
};
|
||||
assert_eq!(success, shadow_success);
|
||||
};
|
||||
|
||||
// Fuzz away.
|
||||
let mut queue = PriorityQueue::new();
|
||||
let mut shadow_queue = BTreeMap::new();
|
||||
|
||||
let rng = Rng::new(12345);
|
||||
const TOTAL_WEIGHT: u64 =
|
||||
INSERT_WEIGHT + INSERT_AND_MARK_WEIGHT + PULL_WEIGHT + DELETE_MARKED_WEIGHT;
|
||||
|
||||
for _ in 0..ITER {
|
||||
// Randomly choose one of the 4 possible operations, respecting the
|
||||
// probability weights.
|
||||
let mut op = rng.gen_bounded(TOTAL_WEIGHT);
|
||||
if op < INSERT_WEIGHT {
|
||||
let key = rng.gen_bounded(MAX_KEY + 1);
|
||||
let val = rng.gen();
|
||||
insert_fn(&mut queue, &mut shadow_queue, key, val);
|
||||
continue;
|
||||
}
|
||||
op -= INSERT_WEIGHT;
|
||||
if op < INSERT_AND_MARK_WEIGHT {
|
||||
let key = rng.gen_bounded(MAX_KEY + 1);
|
||||
let val = rng.gen();
|
||||
insert_and_mark_fn(&mut queue, &mut shadow_queue, key, val);
|
||||
continue;
|
||||
}
|
||||
op -= INSERT_AND_MARK_WEIGHT;
|
||||
if op < PULL_WEIGHT {
|
||||
pull_fn(&mut queue, &mut shadow_queue);
|
||||
continue;
|
||||
}
|
||||
delete_marked_fn(&mut queue, &mut shadow_queue);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,6 +1,10 @@
|
||||
//! Pseudo-random number generation.
|
||||
|
||||
#![allow(unused)]
|
||||
|
||||
use std::cell::Cell;
|
||||
|
||||
/// A pseudo-random number generator based on Wang Yi's Wyrand.
|
||||
/// A pseudo-random generator for 64-bit integers based on Wang Yi's Wyrand.
|
||||
///
|
||||
/// See: <https://github.com/wangyi-fudan/wyhash>
|
||||
#[derive(Clone, Debug)]
|
445
asynchronix/src/util/slot.rs
Normal file
445
asynchronix/src/util/slot.rs
Normal file
@ -0,0 +1,445 @@
|
||||
//! A primitive similar to a one-shot channel but without any signaling
|
||||
//! capability.
|
||||
|
||||
#![allow(unused)]
|
||||
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem::{ManuallyDrop, MaybeUninit};
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use std::ptr::{self, NonNull};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crate::loom_exports::cell::UnsafeCell;
|
||||
use crate::loom_exports::sync::atomic::{self, AtomicUsize};
|
||||
|
||||
// [C] Indicates whether the writer or the reader has been dropped.
|
||||
const CLOSED: usize = 0b01;
|
||||
// [P] Indicates whether a value is available (implies CLOSED).
|
||||
const POPULATED: usize = 0b10;
|
||||
|
||||
// Possible states:
|
||||
//
|
||||
// | P | C |
|
||||
// |---|---|
|
||||
// | 0 | 0 |
|
||||
// | 0 | 1 |
|
||||
// | 1 | 1 |
|
||||
|
||||
/// The shared data of `SlotWriter` and `SlotReader`.
|
||||
struct Inner<T> {
|
||||
/// A bit field for `CLOSED` and `POPULATED`.
|
||||
state: AtomicUsize,
|
||||
/// The value, if any.
|
||||
value: UnsafeCell<MaybeUninit<T>>,
|
||||
}
|
||||
|
||||
impl<T> Inner<T> {
|
||||
// Sets the value without dropping the previous content.
|
||||
//
|
||||
// # Safety
|
||||
//
|
||||
// The caller must have exclusive access to the value.
|
||||
unsafe fn write_value(&self, t: T) {
|
||||
self.value.with_mut(|value| (*value).write(t));
|
||||
}
|
||||
|
||||
// Reads the value without moving it.
|
||||
//
|
||||
// # Safety
|
||||
//
|
||||
// The value must be initialized and the caller must have exclusive access
|
||||
// to the value. After the call, the value slot within `Inner` should be
|
||||
// considered uninitialized in order to avoid a double-drop.
|
||||
unsafe fn read_value(&self) -> T {
|
||||
self.value.with(|value| (*value).as_ptr().read())
|
||||
}
|
||||
|
||||
// Drops the value in place without deallocation.
|
||||
//
|
||||
// # Safety
|
||||
//
|
||||
// The value must be initialized and the caller must have exclusive access
|
||||
// to the value.
|
||||
unsafe fn drop_value_in_place(&self) {
|
||||
self.value
|
||||
.with_mut(|value| ptr::drop_in_place((*value).as_mut_ptr()));
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle to a slot that can write the value.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SlotWriter<T> {
|
||||
/// The shared data.
|
||||
inner: NonNull<Inner<T>>,
|
||||
/// Drop checker hint: we may drop an `Inner<T>` and thus a `T`.
|
||||
_phantom: PhantomData<Inner<T>>,
|
||||
}
|
||||
|
||||
impl<T> SlotWriter<T> {
|
||||
/// Writes a value to the slot.
|
||||
pub(crate) fn write(self, value: T) -> Result<(), WriteError> {
|
||||
// Prevent the drop handler from running.
|
||||
let this = ManuallyDrop::new(self);
|
||||
|
||||
// Safety: it is safe to access `inner` as we did not set the `CLOSED`
|
||||
// flag.
|
||||
unsafe {
|
||||
this.inner.as_ref().write_value(value);
|
||||
|
||||
// Ordering: this Release operation synchronizes with the Acquire
|
||||
// operations in `SlotReader::try_read` and in `SlotReader`'s drop
|
||||
// handler, ensuring that the value written is fully visible when it
|
||||
// is read.
|
||||
let state = this
|
||||
.inner
|
||||
.as_ref()
|
||||
.state
|
||||
.fetch_or(POPULATED | CLOSED, Ordering::Release);
|
||||
|
||||
if state & CLOSED == CLOSED {
|
||||
// Ensure that all atomic accesses to the state have completed
|
||||
// before deallocation.
|
||||
//
|
||||
// Ordering: this Acquire fence synchronizes with the Release
|
||||
// operation in the drop handler of the `SlotReader` that set
|
||||
// the `CLOSED` flag.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// Drop the value written above.
|
||||
//
|
||||
// Safety: the value was just written and we have exclusive
|
||||
// access to it since the reader was dropped.
|
||||
this.inner.as_ref().drop_value_in_place();
|
||||
|
||||
// Deallocate inner.
|
||||
drop(Box::from_raw(this.inner.as_ptr()));
|
||||
|
||||
Err(WriteError {})
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for SlotWriter<T> {
|
||||
fn drop(&mut self) {
|
||||
// Safety: it is safe to access `inner` as we did not set the `CLOSED`
|
||||
// flag.
|
||||
unsafe {
|
||||
// Ordering: Acquire ordering is necessary in case the `CLOSED` flag
|
||||
// is set: it synchronizes with the Release operation in the drop
|
||||
// handler of the `SlotReader` that set the `CLOSED` flag and
|
||||
// ensures that the all accesses to the slot have completed before
|
||||
// deallocation.
|
||||
let mut state = self.inner.as_ref().state.load(Ordering::Acquire);
|
||||
|
||||
// Close the slot if it isn't already.
|
||||
//
|
||||
// Ordering: Acquire ordering in case the `CLOSED` flag was set just
|
||||
// after the state was loaded above, for the reasons stated as
|
||||
// above. Release ordering is in turn necessary in the expected case
|
||||
// where the `CLOSED` flag is now set: it synchronizes with the
|
||||
// Acquire operation in the drop handler of the `SlotReader` and
|
||||
// ensures that this access to the slot has completed before the
|
||||
// `SlotReader` performs deallocation.
|
||||
if state & CLOSED == 0 {
|
||||
state = self.inner.as_ref().state.fetch_or(CLOSED, Ordering::AcqRel);
|
||||
|
||||
// The reader is alive, so let it handle the cleanup.
|
||||
if state & CLOSED == 0 {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Deallocate the slot since it was closed by the reader.
|
||||
//
|
||||
// Note: there can't be any value because `write` consumes the writer
|
||||
// and does not run the drop handler.
|
||||
//
|
||||
// Safety: `inner` will no longer be used once deallocated.
|
||||
drop(Box::from_raw(self.inner.as_ptr()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for SlotWriter<T> {}
|
||||
unsafe impl<T: Send> Sync for SlotWriter<T> {}
|
||||
|
||||
impl<T> UnwindSafe for SlotWriter<T> {}
|
||||
impl<T> RefUnwindSafe for SlotWriter<T> {}
|
||||
|
||||
/// A handle to a slot that can read the value.
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct SlotReader<T> {
|
||||
/// The shared data.
|
||||
inner: NonNull<Inner<T>>,
|
||||
/// Drop checker hint: we may drop an `Inner<T>` and thus a `T`.
|
||||
_phantom: PhantomData<Inner<T>>,
|
||||
}
|
||||
|
||||
impl<T> SlotReader<T> {
|
||||
/// Attempts to read the value.
|
||||
pub(crate) fn try_read(&mut self) -> Result<T, ReadError> {
|
||||
// Safety: it is safe to access `inner` as we did not set the `CLOSED`
|
||||
// flag.
|
||||
unsafe {
|
||||
// Ordering: this Acquire load synchronizes with the Release
|
||||
// operation in `SlotWriter::write`, ensuring that the value written
|
||||
// is fully visible when the `POPULATED` flag is read.
|
||||
let state = self.inner.as_ref().state.load(Ordering::Acquire);
|
||||
|
||||
// If there is no value but the writer is still alive, return `NoValue`.
|
||||
if state == 0 {
|
||||
return Err(ReadError::NoValue);
|
||||
}
|
||||
|
||||
// If there is no value and the writer was dropped, return `Closed`.
|
||||
if state & POPULATED == 0 {
|
||||
return Err(ReadError::Closed);
|
||||
}
|
||||
|
||||
// At this point, we know that `POPULATED`, and therefore `CLOSED`, are
|
||||
// set.
|
||||
|
||||
// Clear the `POPULATED` flag since we are going to take the value.
|
||||
//
|
||||
// Ordering: there is no need for further synchronization since the
|
||||
// above Acquire load already ensures that the value is visible and
|
||||
// the value will no longer be used. The value of the `POPULATED`
|
||||
// flag is only observed by this thread.
|
||||
self.inner.as_ref().state.store(CLOSED, Ordering::Relaxed);
|
||||
|
||||
// Safety: we know there is a value and that it is fully visible.
|
||||
Ok(self.inner.as_ref().read_value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for SlotReader<T> {
|
||||
fn drop(&mut self) {
|
||||
// Safety: it is safe to access `inner` as we did not set the `CLOSED`
|
||||
// flag.
|
||||
unsafe {
|
||||
// Ordering: Acquire ordering is necessary in case the `CLOSED` flag
|
||||
// is set: it synchronizes with the Release operation in the drop
|
||||
// handler of the `SlotWriter` that set the `CLOSED` flag and
|
||||
// ensures that the all accesses to the slot have completed before
|
||||
// the value is dropped and the slot is deallocated.
|
||||
let mut state = self.inner.as_ref().state.load(Ordering::Acquire);
|
||||
|
||||
// Close the slot if it isn't already.
|
||||
if state & CLOSED == 0 {
|
||||
// Ordering: this Acquire operation synchronizes with the
|
||||
// Release operation in `SlotWriter::write`, ensuring that the
|
||||
// value written is fully visible in case it needs to be
|
||||
// dropped. Release ordering is in turn necessary in the
|
||||
// expected case where the `CLOSED` flag is now set: it
|
||||
// synchronizes with the Acquire operation in the `write` method
|
||||
// or the drop handler of the `SlotWriter` and ensures that this
|
||||
// access to the slot has completed before the `SlotWriter`
|
||||
// performs deallocation.
|
||||
state = self.inner.as_ref().state.fetch_or(CLOSED, Ordering::AcqRel);
|
||||
|
||||
// The writer is alive, so let it handle the cleanup.
|
||||
if state & CLOSED == 0 {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Drop the value if necessary and deallocate the slot since it was
|
||||
// closed by the writer.
|
||||
//
|
||||
// Safety: `inner` will no longer be used once deallocated. If there
|
||||
// is an unread value, drop it first.
|
||||
if state & POPULATED == POPULATED {
|
||||
// Safety: the presence of an initialized value was just checked
|
||||
// and there is no live writer so no risk of race.
|
||||
self.inner.as_ref().drop_value_in_place();
|
||||
}
|
||||
drop(Box::from_raw(self.inner.as_ptr()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for SlotReader<T> {}
|
||||
unsafe impl<T: Send> Sync for SlotReader<T> {}
|
||||
|
||||
impl<T> UnwindSafe for SlotReader<T> {}
|
||||
impl<T> RefUnwindSafe for SlotReader<T> {}
|
||||
|
||||
/// Error returned when reading a value fails.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub(crate) enum ReadError {
|
||||
/// The slot does not contain any value yet.
|
||||
NoValue,
|
||||
/// The writer was dropped or the value was already taken.
|
||||
Closed,
|
||||
}
|
||||
|
||||
impl fmt::Display for ReadError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::NoValue => write!(fmt, "no value in the slot"),
|
||||
Self::Closed => write!(fmt, "slot closed by writer"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ReadError {}
|
||||
|
||||
/// Error returned when writing a value fails due to the reader being dropped.
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
pub(crate) struct WriteError {}
|
||||
|
||||
impl fmt::Display for WriteError {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(fmt, "slot closed by reader")
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for WriteError {}
|
||||
|
||||
/// Creates the writer and reader handles of a slot.
|
||||
pub(crate) fn slot<T>() -> (SlotWriter<T>, SlotReader<T>) {
|
||||
let inner = NonNull::new(Box::into_raw(Box::new(Inner {
|
||||
state: AtomicUsize::new(0),
|
||||
value: UnsafeCell::new(MaybeUninit::uninit()),
|
||||
})))
|
||||
.unwrap();
|
||||
|
||||
let writer = SlotWriter {
|
||||
inner,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
|
||||
let reader = SlotReader {
|
||||
inner,
|
||||
_phantom: PhantomData,
|
||||
};
|
||||
|
||||
(writer, reader)
|
||||
}
|
||||
|
||||
#[cfg(all(test, not(asynchronix_loom)))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use std::io::Read;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
#[test]
|
||||
fn slot_single_threaded_write() {
|
||||
let (writer, mut reader) = slot();
|
||||
|
||||
assert_eq!(reader.try_read(), Err(ReadError::NoValue));
|
||||
assert!(writer.write(42).is_ok());
|
||||
assert_eq!(reader.try_read(), Ok(42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slot_single_threaded_drop_writer() {
|
||||
let (writer, mut reader) = slot::<i32>();
|
||||
|
||||
assert_eq!(reader.try_read(), Err(ReadError::NoValue));
|
||||
drop(writer);
|
||||
assert_eq!(reader.try_read(), Err(ReadError::Closed));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slot_single_threaded_drop_reader() {
|
||||
let writer = slot().0;
|
||||
|
||||
assert!(writer.write(42).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slot_multi_threaded_write() {
|
||||
let (mut writer, mut reader) = slot();
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
assert!(writer.write(42).is_ok());
|
||||
});
|
||||
|
||||
loop {
|
||||
if let Ok(v) = reader.try_read() {
|
||||
assert_eq!(v, 42);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
th.join().unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn slot_multi_threaded_drop_writer() {
|
||||
let (mut writer, mut reader) = slot::<i32>();
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
drop(writer);
|
||||
});
|
||||
|
||||
loop {
|
||||
let v = reader.try_read();
|
||||
assert!(v.is_err());
|
||||
if v == Err(ReadError::Closed) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
th.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(all(test, asynchronix_loom))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use loom::model::Builder;
|
||||
use loom::thread;
|
||||
|
||||
#[test]
|
||||
fn loom_slot_write() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (writer, mut reader) = slot();
|
||||
|
||||
let th = thread::spawn(move || assert!(writer.write(42).is_ok()));
|
||||
|
||||
if let Ok(v) = reader.try_read() {
|
||||
assert_eq!(v, 42);
|
||||
}
|
||||
|
||||
th.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_slot_drop_writer() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (writer, mut reader) = slot::<i32>();
|
||||
|
||||
let th = thread::spawn(move || drop(writer));
|
||||
|
||||
assert!(reader.try_read().is_err());
|
||||
|
||||
th.join().unwrap();
|
||||
});
|
||||
}
|
||||
}
|
393
asynchronix/src/util/spsc_queue.rs
Normal file
393
asynchronix/src/util/spsc_queue.rs
Normal file
@ -0,0 +1,393 @@
|
||||
//! Single-producer single-consumer unbounded FIFO queue that stores values in
|
||||
//! fixed-size memory segments.
|
||||
|
||||
#![allow(unused)]
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
use std::marker::PhantomData;
|
||||
use std::mem::MaybeUninit;
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
use std::ptr::{self, NonNull};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crossbeam_utils::CachePadded;
|
||||
|
||||
use crate::loom_exports::cell::UnsafeCell;
|
||||
use crate::loom_exports::sync::atomic::{AtomicBool, AtomicPtr};
|
||||
use crate::loom_exports::sync::Arc;
|
||||
|
||||
/// The number of slots in a single segment.
|
||||
const SEGMENT_LEN: usize = 32;
|
||||
|
||||
/// A slot containing a single value.
|
||||
struct Slot<T> {
|
||||
has_value: AtomicBool,
|
||||
value: UnsafeCell<MaybeUninit<T>>,
|
||||
}
|
||||
|
||||
impl<T> Default for Slot<T> {
|
||||
fn default() -> Self {
|
||||
Slot {
|
||||
has_value: AtomicBool::new(false),
|
||||
value: UnsafeCell::new(MaybeUninit::uninit()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A memory segment containing `SEGMENT_LEN` slots.
|
||||
struct Segment<T> {
|
||||
/// Address of the next segment.
|
||||
///
|
||||
/// A null pointer means that the next segment is not allocated yet.
|
||||
next_segment: AtomicPtr<Segment<T>>,
|
||||
data: [Slot<T>; SEGMENT_LEN],
|
||||
}
|
||||
|
||||
impl<T> Segment<T> {
|
||||
/// Allocates a new segment.
|
||||
fn allocate_new() -> NonNull<Self> {
|
||||
let segment = Self {
|
||||
next_segment: AtomicPtr::new(ptr::null_mut()),
|
||||
data: Default::default(),
|
||||
};
|
||||
|
||||
// Safety: the pointer is non-null since it comes from a box.
|
||||
unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(segment))) }
|
||||
}
|
||||
}
|
||||
|
||||
/// The head of the queue from which values are popped.
|
||||
struct Head<T> {
|
||||
/// Pointer to the segment at the head of the queue.
|
||||
segment: NonNull<Segment<T>>,
|
||||
/// Index of the next value to be read.
|
||||
///
|
||||
/// If the index is equal to the segment length, it is necessary to move to
|
||||
/// the next segment before the next value can be read.
|
||||
next_read_idx: usize,
|
||||
}
|
||||
|
||||
/// The tail of the queue to which values are pushed.
|
||||
struct Tail<T> {
|
||||
/// Pointer to the segment at the tail of the queue.
|
||||
segment: NonNull<Segment<T>>,
|
||||
/// Index of the next value to be written.
|
||||
///
|
||||
/// If the index is equal to the segment length, a new segment must be
|
||||
/// allocated before a new value can be written.
|
||||
next_write_idx: usize,
|
||||
}
|
||||
|
||||
/// A single-producer, single-consumer unbounded FIFO queue.
|
||||
struct Queue<T> {
|
||||
head: CachePadded<UnsafeCell<Head<T>>>,
|
||||
tail: CachePadded<UnsafeCell<Tail<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Queue<T> {
|
||||
/// Creates a new queue.
|
||||
fn new() -> Self {
|
||||
let segment = Segment::allocate_new();
|
||||
|
||||
let head = Head {
|
||||
segment,
|
||||
next_read_idx: 0,
|
||||
};
|
||||
let tail = Tail {
|
||||
segment,
|
||||
next_write_idx: 0,
|
||||
};
|
||||
|
||||
Self {
|
||||
head: CachePadded::new(UnsafeCell::new(head)),
|
||||
tail: CachePadded::new(UnsafeCell::new(tail)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Pushes a new value.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The method cannot be called from multiple threads concurrently.
|
||||
unsafe fn push(&self, value: T) {
|
||||
// Safety: this is the only thread accessing the tail.
|
||||
let tail = self.tail.with_mut(|p| &mut *p);
|
||||
|
||||
// If the whole segment has been written, allocate a new segment.
|
||||
if tail.next_write_idx == SEGMENT_LEN {
|
||||
let old_segment = tail.segment;
|
||||
tail.segment = Segment::allocate_new();
|
||||
|
||||
// Safety: the old segment is still allocated since the consumer
|
||||
// cannot deallocate it before `next_segment` is set to a non-null
|
||||
// value.
|
||||
old_segment
|
||||
.as_ref()
|
||||
.next_segment
|
||||
.store(tail.segment.as_ptr(), Ordering::Release);
|
||||
|
||||
tail.next_write_idx = 0;
|
||||
}
|
||||
|
||||
// Safety: the tail segment is allocated since the consumer cannot
|
||||
// deallocate it before `next_segment` is set to a non-null value.
|
||||
let data = &tail.segment.as_ref().data[tail.next_write_idx];
|
||||
|
||||
// Safety: we have exclusive access to the slot value since the consumer
|
||||
// cannot access it before `has_value` is set to true.
|
||||
data.value.with_mut(|p| (*p).write(value));
|
||||
|
||||
// Ordering: this Release store synchronizes with the Acquire load in
|
||||
// `pop` and ensures that the value is visible to the consumer once
|
||||
// `has_value` reads `true`.
|
||||
data.has_value.store(true, Ordering::Release);
|
||||
|
||||
tail.next_write_idx += 1;
|
||||
}
|
||||
|
||||
/// Pops a new value.
|
||||
///
|
||||
/// # Safety
|
||||
///
|
||||
/// The method cannot be called from multiple threads concurrently.
|
||||
unsafe fn pop(&self) -> Option<T> {
|
||||
// Safety: this is the only thread accessing the head.
|
||||
let head = self.head.with_mut(|p| &mut *p);
|
||||
|
||||
// If the whole segment has been read, try to move to the next segment.
|
||||
if head.next_read_idx == SEGMENT_LEN {
|
||||
// Read the next segment or return `None` if it is not ready yet.
|
||||
//
|
||||
// Safety: the head segment is still allocated since we are the only
|
||||
// thread that can deallocate it.
|
||||
let next_segment = head.segment.as_ref().next_segment.load(Ordering::Acquire);
|
||||
let next_segment = NonNull::new(next_segment)?;
|
||||
|
||||
// Deallocate the old segment.
|
||||
//
|
||||
// Safety: the pointer was initialized from a box and the segment is
|
||||
// still allocated since we are the only thread that can deallocate
|
||||
// it.
|
||||
let _ = Box::from_raw(head.segment.as_ptr());
|
||||
|
||||
// Update the segment and the next index.
|
||||
head.segment = next_segment;
|
||||
head.next_read_idx = 0;
|
||||
}
|
||||
|
||||
let data = &head.segment.as_ref().data[head.next_read_idx];
|
||||
|
||||
// Ordering: this Acquire load synchronizes with the Release store in
|
||||
// `push` and ensures that the value is visible once `has_value` reads
|
||||
// `true`.
|
||||
if !data.has_value.load(Ordering::Acquire) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Safety: since `has_value` is `true` then we have exclusive ownership
|
||||
// of the value and we know that it was initialized.
|
||||
let value = data.value.with(|p| (*p).assume_init_read());
|
||||
|
||||
head.next_read_idx += 1;
|
||||
|
||||
Some(value)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Drop for Queue<T> {
|
||||
fn drop(&mut self) {
|
||||
unsafe {
|
||||
// Drop all values.
|
||||
while self.pop().is_some() {}
|
||||
|
||||
// All values have been dropped: the last segment can be freed.
|
||||
|
||||
// Safety: this is the only thread accessing the head since both the
|
||||
// consumer and producer have been dropped.
|
||||
let head = self.head.with_mut(|p| &mut *p);
|
||||
|
||||
// Safety: the pointer was initialized from a box and the segment is
|
||||
// still allocated since we are the only thread that can deallocate
|
||||
// it.
|
||||
let _ = Box::from_raw(head.segment.as_ptr());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: Send> Send for Queue<T> {}
|
||||
unsafe impl<T: Send> Sync for Queue<T> {}
|
||||
|
||||
impl<T> UnwindSafe for Queue<T> {}
|
||||
impl<T> RefUnwindSafe for Queue<T> {}
|
||||
|
||||
/// A handle to a single-producer, single-consumer queue that can push values.
|
||||
pub(crate) struct Producer<T> {
|
||||
queue: Arc<Queue<T>>,
|
||||
_non_sync_phantom: PhantomData<Cell<()>>,
|
||||
}
|
||||
impl<T> Producer<T> {
|
||||
/// Pushes a value to the queue.
|
||||
pub(crate) fn push(&self, value: T) -> Result<(), PushError> {
|
||||
if Arc::strong_count(&self.queue) == 1 {
|
||||
return Err(PushError {});
|
||||
}
|
||||
|
||||
unsafe { self.queue.push(value) };
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||
/// Error returned when a push failed due to the consumer being dropped.
|
||||
pub(crate) struct PushError {}
|
||||
|
||||
impl fmt::Display for PushError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "sending message into a closed mailbox")
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for PushError {}
|
||||
|
||||
/// A handle to a single-producer, single-consumer queue that can pop values.
|
||||
pub(crate) struct Consumer<T> {
|
||||
queue: Arc<Queue<T>>,
|
||||
_non_sync_phantom: PhantomData<Cell<()>>,
|
||||
}
|
||||
impl<T> Consumer<T> {
|
||||
/// Pops a value from the queue.
|
||||
pub(crate) fn pop(&self) -> Option<T> {
|
||||
unsafe { self.queue.pop() }
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates the producer and consumer handles of a single-producer,
|
||||
/// single-consumer queue.
|
||||
pub(crate) fn spsc_queue<T>() -> (Producer<T>, Consumer<T>) {
|
||||
let queue = Arc::new(Queue::new());
|
||||
|
||||
let producer = Producer {
|
||||
queue: queue.clone(),
|
||||
_non_sync_phantom: PhantomData,
|
||||
};
|
||||
let consumer = Consumer {
|
||||
queue,
|
||||
_non_sync_phantom: PhantomData,
|
||||
};
|
||||
|
||||
(producer, consumer)
|
||||
}
|
||||
|
||||
/// Loom tests.
|
||||
#[cfg(all(test, not(asynchronix_loom)))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use std::thread;
|
||||
|
||||
#[test]
|
||||
fn spsc_queue_basic() {
|
||||
const VALUE_COUNT: usize = if cfg!(miri) { 1000 } else { 100_000 };
|
||||
|
||||
let (producer, consumer) = spsc_queue();
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
for i in 0..VALUE_COUNT {
|
||||
let value = loop {
|
||||
if let Some(v) = consumer.pop() {
|
||||
break v;
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(value, i);
|
||||
}
|
||||
});
|
||||
|
||||
for i in 0..VALUE_COUNT {
|
||||
producer.push(i).unwrap();
|
||||
}
|
||||
|
||||
th.join().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
/// Loom tests.
|
||||
#[cfg(all(test, asynchronix_loom))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use loom::model::Builder;
|
||||
use loom::thread;
|
||||
|
||||
#[test]
|
||||
fn loom_spsc_queue_basic() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
const VALUE_COUNT: usize = 10;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (producer, consumer) = spsc_queue();
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
let mut value = 0;
|
||||
for _ in 0..VALUE_COUNT {
|
||||
if let Some(v) = consumer.pop() {
|
||||
assert_eq!(v, value);
|
||||
value += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for i in 0..VALUE_COUNT {
|
||||
let _ = producer.push(i);
|
||||
}
|
||||
|
||||
th.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_spsc_queue_new_segment() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
const VALUE_COUNT_BEFORE: usize = 5;
|
||||
const VALUE_COUNT_AFTER: usize = 5;
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let (producer, consumer) = spsc_queue();
|
||||
|
||||
// Fill up the first segment except for the last `VALUE_COUNT_BEFORE` slots.
|
||||
for i in 0..(SEGMENT_LEN - VALUE_COUNT_BEFORE) {
|
||||
producer.push(i).unwrap();
|
||||
consumer.pop();
|
||||
}
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
let mut value = SEGMENT_LEN - VALUE_COUNT_BEFORE;
|
||||
for _ in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) {
|
||||
if let Some(v) = consumer.pop() {
|
||||
assert_eq!(v, value);
|
||||
value += 1;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
for i in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) {
|
||||
let _ = producer.push(i);
|
||||
}
|
||||
|
||||
th.join().unwrap();
|
||||
});
|
||||
}
|
||||
}
|
318
asynchronix/src/util/sync_cell.rs
Normal file
318
asynchronix/src/util/sync_cell.rs
Normal file
@ -0,0 +1,318 @@
|
||||
//! Very efficient, single-writer alternative to `RwLock` based on a fully safe
|
||||
//! seqlock implementation.
|
||||
|
||||
#![allow(unused)]
|
||||
|
||||
use std::cell::Cell;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use crate::loom_exports::sync::atomic::{self, AtomicUsize, Ordering};
|
||||
use crate::loom_exports::sync::Arc;
|
||||
|
||||
/// An adapter to a `Value` type which may safely read and write a value from
|
||||
/// multiple threads without synchronization, but which only guarantees that the
|
||||
/// value from a read is valid when all write and read accesses were
|
||||
/// synchronized.
|
||||
///
|
||||
/// Taking for instance a value composed of several `u64` fields, then its
|
||||
/// `TearableAtomic` adaptor could using simple relaxed loads and stores on
|
||||
/// several `AtomicU64`. While `tearable_store`/`tearable_read` could possibly
|
||||
/// store/read inconsistent values when racing with a writer, they should always
|
||||
/// store/read consistent values in the absence of a race.
|
||||
///
|
||||
/// This trait is meant to enable optimistic reads of the value and discard such
|
||||
/// reads whenever a race could have taken place.
|
||||
pub(crate) trait TearableAtomic: Sync {
|
||||
/// The value to be read and written.
|
||||
type Value;
|
||||
|
||||
/// Reads a value which is guaranteed to be the same as the last written
|
||||
/// value, provided there were no races when writing and reading.
|
||||
///
|
||||
/// If an inconsistent value is produced as the result of a torn load,
|
||||
/// however, its construction and destruction should neither lead to UB nor
|
||||
/// produce a panic or other unwanted side-effects.
|
||||
fn tearable_load(&self) -> Self::Value;
|
||||
|
||||
/// Writes a value which is guaranteed to remain unchanged until it is read
|
||||
/// back, provided there were no races when writing and reading.
|
||||
///
|
||||
/// If an inconsistent value is produced as the result of a torn store,
|
||||
/// however, its construction and destruction should neither lead to UB nor
|
||||
/// produce a panic or other unwanted side-effects.
|
||||
fn tearable_store(&self, value: Self::Value);
|
||||
}
|
||||
|
||||
/// The inner type of `SyncCell` and `SyncCellReader`.
|
||||
struct Inner<T: TearableAtomic> {
|
||||
tearable: T,
|
||||
sequence: AtomicUsize,
|
||||
}
|
||||
|
||||
/// A single-writer, multiple-readers synchronized cell based on a fully safe
|
||||
/// seqlock implementation.
|
||||
///
|
||||
/// Yes, there are already crates that implement seqlocks for arbitrary types,
|
||||
/// but as of today these either need to rely on UB or have various
|
||||
/// shortcomings. See in particular this RFC, which intends to eventually bring
|
||||
/// a proper solution to this problem:
|
||||
///
|
||||
/// <https://github.com/rust-lang/rfcs/pull/3301>.
|
||||
///
|
||||
/// In the meantime, this implementation sidesteps these issues by only dealing
|
||||
/// with values that implement the `TearableAtomic` trait, which basically means
|
||||
/// values which may become inconsistent due to torn stores or loads but which
|
||||
/// can still be constructed and destructed even in such case.
|
||||
///
|
||||
/// Note that it is still possible to use a `SyncCell` for types that cannot be
|
||||
/// safely constructed from a teared state: it is enough to make
|
||||
/// `TearableAtomic::Value` an always-safe-to-construct builder type for the
|
||||
/// actual value, and to build the actual value only when a builder is returned
|
||||
/// from the `read` method since such builder is then guaranteed to be in a
|
||||
/// valid state.
|
||||
///
|
||||
/// `SyncCell` is restricted to a single writer, which is the `SyncCell` object
|
||||
/// itself. This makes it possible to increment the sequence count with simple
|
||||
/// loads and stores instead of more expensive read-modify-write atomic
|
||||
/// operations. It also gives the `SyncCell` object the possibility to read the
|
||||
/// value at any time without any synchronization overhead. Multiple thread-safe
|
||||
/// reader handles can be constructed using the `reader` method.
|
||||
pub(crate) struct SyncCell<T: TearableAtomic> {
|
||||
inner: Arc<Inner<T>>,
|
||||
_non_sync_phantom: PhantomData<Cell<()>>,
|
||||
}
|
||||
|
||||
impl<T: TearableAtomic> SyncCell<T> {
|
||||
/// Creates a synchronized cell.
|
||||
pub(crate) fn new(tearable: T) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Inner {
|
||||
tearable,
|
||||
sequence: AtomicUsize::new(0),
|
||||
}),
|
||||
_non_sync_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs a synchronized read.
|
||||
pub(crate) fn read(&self) -> T::Value {
|
||||
// The below read is always synchronized since `SyncCell` is `!Sync` and
|
||||
// therefore there cannot be concurrent write operations.
|
||||
self.inner.tearable.tearable_load()
|
||||
}
|
||||
|
||||
/// Performs a synchronized write.
|
||||
pub(crate) fn write(&self, value: T::Value) {
|
||||
// Increment the sequence count to an odd number.
|
||||
//
|
||||
// Note: this thread is the only one that can change the sequence count
|
||||
// so even a plain load will always return the last sequence count.
|
||||
let seq = self.inner.sequence.load(Ordering::Relaxed);
|
||||
self.inner
|
||||
.sequence
|
||||
.store(seq.wrapping_add(1), Ordering::Relaxed);
|
||||
|
||||
// Store the value.
|
||||
//
|
||||
// Ordering: this Release fence synchronizes with the `Acquire` fence in
|
||||
// `SyncCellReader::try_read` and ensures that either the above
|
||||
// increment to an odd sequence count is visible after the value is
|
||||
// tentatively read, or a later increment of the sequence count.
|
||||
atomic::fence(Ordering::Release);
|
||||
self.inner.tearable.tearable_store(value);
|
||||
|
||||
// Increment the sequence count to an even number.
|
||||
//
|
||||
// Ordering: this Release store synchronizes with the Acquire load of
|
||||
// the sequence count at the beginning of `SyncCellReader::try_read` and
|
||||
// ensure that if the sequence count loaded is indeed even, then the
|
||||
// value has been fully written (though it may have been later
|
||||
// overwritten).
|
||||
self.inner
|
||||
.sequence
|
||||
.store(seq.wrapping_add(2), Ordering::Release);
|
||||
}
|
||||
|
||||
/// Returns a reader handle.
|
||||
pub(crate) fn reader(&self) -> SyncCellReader<T> {
|
||||
SyncCellReader {
|
||||
inner: self.inner.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A handle to a `SyncCell` that enables synchronized reads from multiple
|
||||
/// threads.
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct SyncCellReader<T: TearableAtomic> {
|
||||
inner: Arc<Inner<T>>,
|
||||
}
|
||||
|
||||
impl<T: TearableAtomic> SyncCellReader<T> {
|
||||
/// Attempts a synchronized read.
|
||||
///
|
||||
/// An error is returned if this read operation raced with a write
|
||||
/// operation.
|
||||
pub(crate) fn try_read(&self) -> Result<T::Value, SyncCellReadError> {
|
||||
// Read the initial sequence count and make sure it is even.
|
||||
//
|
||||
// Ordering: this Acquire load synchronizes with the Release store of an
|
||||
// even sequence count at the end of `SyncCell::write` and ensure that
|
||||
// if the sequence count is indeed even, then the value stored before
|
||||
// the sequence count was set was fully written (though it may have been
|
||||
// later overwritten).
|
||||
let seq = self.inner.sequence.load(Ordering::Acquire);
|
||||
if seq & 1 != 0 {
|
||||
return Err(SyncCellReadError {});
|
||||
}
|
||||
|
||||
// Attempt to load the value, which may be torn if there is a concurrent
|
||||
// write operation.
|
||||
let value = self.inner.tearable.tearable_load();
|
||||
|
||||
// Ordering: this Acquire fence synchronizes with the Release fence in
|
||||
// `SyncCell::write` and ensures that the below read of the sequence
|
||||
// count sees the increment to an odd sequence count the precedes the
|
||||
// tearable store, or a later increment of the sequence count.
|
||||
atomic::fence(Ordering::Acquire);
|
||||
|
||||
// Check that the sequence count has not changed.
|
||||
let new_seq = self.inner.sequence.load(Ordering::Relaxed);
|
||||
if new_seq == seq {
|
||||
Ok(value)
|
||||
} else {
|
||||
Err(SyncCellReadError {})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An error returned when attempting to perform a read operation concurrently
|
||||
/// with a write operation.
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub(crate) struct SyncCellReadError {}
|
||||
|
||||
/// Loom tests.
|
||||
#[cfg(all(test, asynchronix_loom))]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
use loom::lazy_static;
|
||||
use loom::model::Builder;
|
||||
use loom::sync::atomic::AtomicBool;
|
||||
use loom::thread;
|
||||
|
||||
struct TestTearable<const N: usize> {
|
||||
inner: [AtomicUsize; N],
|
||||
}
|
||||
|
||||
impl<const N: usize> TestTearable<N> {
|
||||
fn new(value: [usize; N]) -> Self {
|
||||
let inner: Vec<_> = value.into_iter().map(|v| AtomicUsize::new(v)).collect();
|
||||
|
||||
Self {
|
||||
inner: inner.try_into().unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<const N: usize> TearableAtomic for TestTearable<N> {
|
||||
type Value = [usize; N];
|
||||
|
||||
fn tearable_load(&self) -> Self::Value {
|
||||
let mut value = [0usize; N];
|
||||
for i in 0..N {
|
||||
value[i] = self.inner[i].load(Ordering::Relaxed);
|
||||
}
|
||||
|
||||
value
|
||||
}
|
||||
|
||||
fn tearable_store(&self, value: Self::Value) {
|
||||
for i in 0..N {
|
||||
self.inner[i].store(value[i], Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_sync_cell_race() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
const VALUE1: [usize; 3] = [1, 2, 3];
|
||||
const VALUE2: [usize; 3] = [4, 5, 6];
|
||||
const VALUE3: [usize; 3] = [7, 8, 9];
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
let tearable = TestTearable::new(VALUE1);
|
||||
let cell = SyncCell::new(tearable);
|
||||
let reader = cell.reader();
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
if let Ok(v) = reader.try_read() {
|
||||
assert!(v == VALUE1 || v == VALUE2 || v == VALUE3, "v = {:?}", v);
|
||||
}
|
||||
});
|
||||
|
||||
cell.write(VALUE2);
|
||||
cell.write(VALUE3);
|
||||
th.join().unwrap();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn loom_sync_cell_synchronized() {
|
||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||
|
||||
const VALUE1: [usize; 3] = [1, 2, 3];
|
||||
const VALUE2: [usize; 3] = [4, 5, 6];
|
||||
const VALUE3: [usize; 3] = [7, 8, 9];
|
||||
|
||||
let mut builder = Builder::new();
|
||||
if builder.preemption_bound.is_none() {
|
||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||
}
|
||||
|
||||
builder.check(move || {
|
||||
lazy_static! {
|
||||
static ref NEW_VALUE_FLAG: AtomicBool = AtomicBool::new(false);
|
||||
};
|
||||
|
||||
let tearable = TestTearable::new(VALUE1);
|
||||
let cell = SyncCell::new(tearable);
|
||||
let reader = cell.reader();
|
||||
|
||||
let th = thread::spawn(move || {
|
||||
if NEW_VALUE_FLAG.load(Ordering::Acquire) {
|
||||
let v = reader
|
||||
.try_read()
|
||||
.expect("read should always succeed when synchronized");
|
||||
assert!(v == VALUE2 || v == VALUE3, "v = {:?}", v);
|
||||
|
||||
NEW_VALUE_FLAG.store(false, Ordering::Release);
|
||||
|
||||
if NEW_VALUE_FLAG.load(Ordering::Acquire) {
|
||||
let v = reader
|
||||
.try_read()
|
||||
.expect("read should always succeed when synchronized");
|
||||
assert_eq!(v, VALUE3);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
cell.write(VALUE2);
|
||||
NEW_VALUE_FLAG.store(true, Ordering::Release);
|
||||
|
||||
if !NEW_VALUE_FLAG.load(Ordering::Acquire) {
|
||||
cell.write(VALUE3);
|
||||
NEW_VALUE_FLAG.store(true, Ordering::Release);
|
||||
}
|
||||
|
||||
th.join().unwrap();
|
||||
});
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user