1
0
forked from ROMEO/nexosim

Initial (g)RPC implementation

This commit is contained in:
Serge Barral 2024-04-25 11:12:54 +02:00
parent c984202005
commit e84e802f09
55 changed files with 5814 additions and 1996 deletions

View File

@ -28,7 +28,7 @@ jobs:
toolchain: ${{ matrix.rust }}
- name: Run cargo check
run: cargo check --all-features
run: cargo check
test:
name: Test suite

View File

@ -13,7 +13,6 @@ on:
- 'asynchronix/src/model/ports/broadcaster.rs'
- 'asynchronix/src/model/ports/broadcaster/**'
- 'asynchronix/src/util/slot.rs'
- 'asynchronix/src/util/spsc_queue.rs'
- 'asynchronix/src/util/sync_cell.rs'
jobs:

2
.gitignore vendored
View File

@ -1,2 +1,2 @@
target
Cargo.lock
Cargo.lock

View File

@ -20,17 +20,26 @@ categories = ["simulation", "aerospace", "science"]
keywords = ["simulation", "discrete-event", "systems", "cyberphysical", "real-time"]
autotests = false
[features]
serde = ["dep:serde"]
# Remote procedure call API.
rpc = ["dep:rmp-serde", "dep:serde", "dep:tonic", "dep:prost", "dep:prost-types", "dep:bytes"]
# This feature forces protobuf/gRPC code (re-)generation.
rpc-codegen = ["dep:tonic-build"]
# gRPC server.
grpc-server = ["rpc", "dep:tokio"]
# API-unstable public exports meant for external test/benchmarking; development only.
dev-hooks = []
# Logging of performance-related statistics; development only.
dev-logs = []
[dependencies]
# Mandatory dependencies.
async-event = "0.1"
crossbeam-utils = "0.8"
diatomic-waker = "0.1"
futures-channel = "0.3"
futures-task = "0.3"
multishot = "0.3.2"
num_cpus = "1.13"
@ -39,21 +48,34 @@ recycle-box = "0.2"
slab = "0.4"
spin_sleep = "1"
st3 = "0.4"
tai-time = "0.3"
# Common RPC dependencies.
bytes = { version = "1", default-features = false, optional = true }
prost = { version = "0.12", optional = true }
prost-types = { version = "0.12", optional = true }
rmp-serde = { version = "1.1", optional = true }
serde = { version = "1", optional = true }
# gRPC dependencies.
tokio = { version = "1.0", optional = true }
tonic = { version = "0.11", optional = true }
[dependencies.serde]
version = "1"
optional = true
features = ["derive"]
[target.'cfg(asynchronix_loom)'.dependencies]
loom = "0.5"
waker-fn = "1.1"
[dev-dependencies]
futures-util = "0.3"
futures-channel = "0.3"
futures-executor = "0.3"
[build-dependencies]
tonic-build = { version = "0.11", optional = true }
[[test]]
name = "integration"
path = "tests/tests.rs"

17
asynchronix/build.rs Normal file
View File

@ -0,0 +1,17 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
#[cfg(feature = "rpc-codegen")]
let builder = tonic_build::configure()
.build_client(false)
.out_dir("src/rpc/codegen/");
#[cfg(all(feature = "rpc-codegen", not(feature = "grpc-server")))]
let builder = builder.build_server(false);
#[cfg(feature = "rpc-codegen")]
builder.compile(
&["simulation.proto", "custom_transport.proto"],
&["src/rpc/api/"],
)?;
Ok(())
}

View File

@ -35,9 +35,10 @@ use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use asynchronix::model::{InitializedModel, Model, Output};
use asynchronix::model::{InitializedModel, Model};
use asynchronix::ports::{EventSlot, Output};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
use asynchronix::time::{ActionKey, MonotonicTime, Scheduler};
/// Water pump.
pub struct Pump {
@ -81,7 +82,7 @@ pub struct Controller {
water_sense: WaterSenseState,
/// Event key, which if present indicates that the machine is currently
/// brewing -- internal state.
stop_brew_key: Option<EventKey>,
stop_brew_key: Option<ActionKey>,
}
impl Controller {
@ -323,7 +324,7 @@ impl Model for Tank {
/// is non-zero.
struct TankDynamicState {
last_volume_update: MonotonicTime,
set_empty_key: EventKey,
set_empty_key: ActionKey,
flow_rate: f64,
}
@ -364,7 +365,8 @@ fn main() {
pump.flow_rate.connect(Tank::set_flow_rate, &tank_mbox);
// Model handles for simulation.
let mut flow_rate = pump.flow_rate.connect_slot().0;
let mut flow_rate = EventSlot::new();
pump.flow_rate.connect_sink(&flow_rate);
let controller_addr = controller_mbox.address();
let tank_addr = tank_mbox.address();
@ -387,48 +389,48 @@ fn main() {
assert_eq!(simu.time(), t);
// Brew one espresso shot with the default brew time.
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
simu.process_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
simu.step();
t += Controller::DEFAULT_BREW_TIME;
assert_eq!(simu.time(), t);
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
// Drink too much coffee.
let volume_per_shot = pump_flow_rate * Controller::DEFAULT_BREW_TIME.as_secs_f64();
let shots_per_tank = (init_tank_volume / volume_per_shot) as u64; // YOLO--who cares about floating-point rounding errors?
for _ in 0..(shots_per_tank - 1) {
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
simu.process_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
simu.step();
t += Controller::DEFAULT_BREW_TIME;
assert_eq!(simu.time(), t);
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
}
// Check that the tank becomes empty before the completion of the next shot.
simu.send_event(Controller::brew_cmd, (), &controller_addr);
simu.process_event(Controller::brew_cmd, (), &controller_addr);
simu.step();
assert!(simu.time() < t + Controller::DEFAULT_BREW_TIME);
t = simu.time();
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
// Try to brew another shot while the tank is still empty.
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert!(flow_rate.take().is_none());
simu.process_event(Controller::brew_cmd, (), &controller_addr);
assert!(flow_rate.next().is_none());
// Change the brew time and fill up the tank.
let brew_time = Duration::new(30, 0);
simu.send_event(Controller::brew_time, brew_time, &controller_addr);
simu.send_event(Tank::fill, 1.0e-3, tank_addr);
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
simu.process_event(Controller::brew_time, brew_time, &controller_addr);
simu.process_event(Tank::fill, 1.0e-3, tank_addr);
simu.process_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
simu.step();
t += brew_time;
assert_eq!(simu.time(), t);
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
// Interrupt the brew after 15s by pressing again the brew button.
simu.schedule_event(
@ -438,11 +440,11 @@ fn main() {
&controller_addr,
)
.unwrap();
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
simu.process_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
simu.step();
t += Duration::from_secs(15);
assert_eq!(simu.time(), t);
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
}

View File

@ -26,7 +26,8 @@
//! │ ├───────────────────────────────▶ Total power
//! └──────────┘
//! ```
use asynchronix::model::{Model, Output, Requestor};
use asynchronix::model::Model;
use asynchronix::ports::{EventSlot, Output, Requestor};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::MonotonicTime;
@ -124,10 +125,14 @@ fn main() {
psu.pwr_out.connect(Load::pwr_in, &load3_mbox);
// Model handles for simulation.
let mut psu_power = psu.power.connect_slot().0;
let mut load1_power = load1.power.connect_slot().0;
let mut load2_power = load2.power.connect_slot().0;
let mut load3_power = load3.power.connect_slot().0;
let mut psu_power = EventSlot::new();
let mut load1_power = EventSlot::new();
let mut load2_power = EventSlot::new();
let mut load3_power = EventSlot::new();
psu.power.connect_sink(&psu_power);
load1.power.connect_sink(&load1_power);
load2.power.connect_sink(&load2_power);
load3.power.connect_sink(&load3_power);
let psu_addr = psu_mbox.address();
// Start time (arbitrary since models do not depend on absolute time).
@ -153,14 +158,14 @@ fn main() {
// Vary the supply voltage, check the load and power supply consumptions.
for voltage in [10.0, 15.0, 20.0] {
simu.send_event(PowerSupply::voltage_setting, voltage, &psu_addr);
simu.process_event(PowerSupply::voltage_setting, voltage, &psu_addr);
let v_square = voltage * voltage;
assert!(same_power(load1_power.take().unwrap(), v_square / r1));
assert!(same_power(load2_power.take().unwrap(), v_square / r2));
assert!(same_power(load3_power.take().unwrap(), v_square / r3));
assert!(same_power(load1_power.next().unwrap(), v_square / r1));
assert!(same_power(load2_power.next().unwrap(), v_square / r2));
assert!(same_power(load3_power.next().unwrap(), v_square / r3));
assert!(same_power(
psu_power.take().unwrap(),
psu_power.next().unwrap(),
v_square * (1.0 / r1 + 1.0 / r2 + 1.0 / r3)
));
}

View File

@ -18,7 +18,8 @@ use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use asynchronix::model::{InitializedModel, Model, Output};
use asynchronix::model::{InitializedModel, Model};
use asynchronix::ports::{EventBuffer, Output};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{MonotonicTime, Scheduler};
@ -200,7 +201,8 @@ fn main() {
driver.current_out.connect(Motor::current_in, &motor_mbox);
// Model handles for simulation.
let mut position = motor.position.connect_stream().0;
let mut position = EventBuffer::new();
motor.position.connect_sink(&position);
let motor_addr = motor_mbox.address();
let driver_addr = driver_mbox.address();
@ -258,7 +260,7 @@ fn main() {
assert!(position.next().is_none());
// Increase the load beyond the torque limit for a 1A driver current.
simu.send_event(Motor::load, 2.0, &motor_addr);
simu.process_event(Motor::load, 2.0, &motor_addr);
// Advance simulation time and check that the motor is blocked.
simu.step();
@ -274,7 +276,7 @@ fn main() {
// Decrease the load below the torque limit for a 1A driver current and
// advance simulation time.
simu.send_event(Motor::load, 0.5, &motor_addr);
simu.process_event(Motor::load, 0.5, &motor_addr);
simu.step();
t += Duration::new(0, 100_000_000);
@ -298,7 +300,7 @@ fn main() {
// Now make the motor rotate in the opposite direction. Note that this
// driver only accounts for a new PPS at the next pulse.
simu.send_event(Driver::pulse_rate, -10.0, &driver_addr);
simu.process_event(Driver::pulse_rate, -10.0, &driver_addr);
simu.step();
t += Duration::new(0, 100_000_000);
assert_eq!(simu.time(), t);

View File

@ -8,7 +8,6 @@ use std::error;
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
use std::sync::atomic::{self, AtomicUsize, Ordering};
use std::sync::Arc;
@ -154,7 +153,7 @@ impl<M: Model> Receiver<M> {
/// time, but an identifier may be reused after all handles to a channel
/// have been dropped.
pub(crate) fn channel_id(&self) -> ChannelId {
ChannelId(NonZeroUsize::new(&*self.inner as *const Inner<M> as usize).unwrap())
ChannelId(&*self.inner as *const Inner<M> as usize)
}
}
@ -255,8 +254,8 @@ impl<M: Model> Sender<M> {
/// All channels are guaranteed to have different identifiers at any given
/// time, but an identifier may be reused after all handles to a channel
/// have been dropped.
pub(crate) fn channel_id(&self) -> ChannelId {
ChannelId(NonZeroUsize::new(&*self.inner as *const Inner<M> as usize).unwrap())
pub(crate) fn channel_id(&self) -> usize {
Arc::as_ptr(&self.inner) as usize
}
}
@ -369,7 +368,7 @@ where
/// Unique identifier for a channel.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) struct ChannelId(NonZeroUsize);
pub(crate) struct ChannelId(usize);
impl fmt::Display for ChannelId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {

View File

@ -85,7 +85,7 @@ struct Slot<T: ?Sized> {
message: UnsafeCell<MessageBox<T>>,
}
/// An fast MPSC queue that stores its items in recyclable boxes.
/// A fast MPSC queue that stores its items in recyclable boxes.
///
/// The item may be unsized.
///

View File

@ -88,7 +88,7 @@ pub(crate) struct Executor {
active_tasks: Arc<Mutex<Slab<CancelToken>>>,
/// Parker for the main executor thread.
parker: Parker,
/// Join handles of the worker threads.
/// Handles to the worker threads.
worker_handles: Vec<JoinHandle<()>>,
}

View File

@ -36,18 +36,18 @@
//!
//! Models can contain four kinds of ports:
//!
//! * _output ports_, which are instances of the [`Output`](model::Output) type
//! * _output ports_, which are instances of the [`Output`](ports::Output) type
//! and can be used to broadcast a message,
//! * _requestor ports_, which are instances of the
//! [`Requestor`](model::Requestor) type and can be used to broadcast a
//! [`Requestor`](ports::Requestor) type and can be used to broadcast a
//! message and receive an iterator yielding the replies from all connected
//! replier ports,
//! * _input ports_, which are synchronous or asynchronous methods that
//! implement the [`InputFn`](model::InputFn) trait and take an `&mut self`
//! implement the [`InputFn`](ports::InputFn) trait and take an `&mut self`
//! argument, a message argument, and an optional
//! [`&Scheduler`](time::Scheduler) argument,
//! * _replier ports_, which are similar to input ports but implement the
//! [`ReplierFn`](model::ReplierFn) trait and return a reply.
//! [`ReplierFn`](ports::ReplierFn) trait and return a reply.
//!
//! Messages that are broadcast by an output port to an input port are referred
//! to as *events*, while messages exchanged between requestor and replier ports
@ -78,7 +78,8 @@
//! `Multiplier` could be implemented as follows:
//!
//! ```
//! use asynchronix::model::{Model, Output};
//! use asynchronix::model::Model;
//! use asynchronix::ports::Output;
//!
//! #[derive(Default)]
//! pub struct Multiplier {
@ -104,7 +105,8 @@
//!
//! ```
//! use std::time::Duration;
//! use asynchronix::model::{Model, Output};
//! use asynchronix::model::Model;
//! use asynchronix::ports::Output;
//! use asynchronix::time::Scheduler;
//!
//! #[derive(Default)]
@ -166,7 +168,8 @@
//! ```
//! # mod models {
//! # use std::time::Duration;
//! # use asynchronix::model::{Model, Output};
//! # use asynchronix::model::Model;
//! # use asynchronix::ports::Output;
//! # use asynchronix::time::Scheduler;
//! # #[derive(Default)]
//! # pub struct Multiplier {
@ -193,6 +196,7 @@
//! # impl Model for Delay {}
//! # }
//! use std::time::Duration;
//! use asynchronix::ports::EventSlot;
//! use asynchronix::simulation::{Mailbox, SimInit};
//! use asynchronix::time::MonotonicTime;
//!
@ -217,7 +221,8 @@
//! delay1.output.connect(Delay::input, &delay2_mbox);
//!
//! // Keep handles to the system input and output for the simulation.
//! let mut output_slot = delay2.output.connect_slot().0;
//! let mut output_slot = EventSlot::new();
//! delay2.output.connect_sink(&output_slot);
//! let input_address = multiplier1_mbox.address();
//!
//! // Pick an arbitrary simulation start time and build the simulation.
@ -239,23 +244,20 @@
//! deadline using for instance
//! [`Simulation::step_by()`](simulation::Simulation::step_by).
//! 2. by sending events or queries without advancing simulation time, using
//! [`Simulation::send_event()`](simulation::Simulation::send_event) or
//! [`Simulation::send_query()`](simulation::Simulation::send_query),
//! [`Simulation::process_event()`](simulation::Simulation::process_event) or
//! [`Simulation::send_query()`](simulation::Simulation::process_query),
//! 3. by scheduling events, using for instance
//! [`Simulation::schedule_event()`](simulation::Simulation::schedule_event).
//!
//! When a simulation is initialized via
//! [`SimInit::init()`](simulation::SimInit::init) then the simulation will run
//! as fast as possible, without regard for the actual wall clock time.
//! Alternatively, it is possible to initialize a simulation via
//! [`SimInit::init_with_clock()`](simulation::SimInit::init_with_clock) to bind
//! the simulation time to the wall clock time using a custom
//! [`Clock`](time::Clock) type or a readily-available real-time clock such as
//! [`AutoSystemClock`](time::AutoSystemClock).
//! When initialized with the default clock, the simulation will run as fast as
//! possible, without regard for the actual wall clock time. Alternatively, the
//! simulation time can be synchronized to the wall clock time using
//! [`SimInit::set_clock()`](simulation::SimInit::set_clock) and providing a
//! custom [`Clock`](time::Clock) type or a readily-available real-time clock
//! such as [`AutoSystemClock`](time::AutoSystemClock).
//!
//! Simulation outputs can be monitored using
//! [`EventSlot`](simulation::EventSlot)s and
//! [`EventStream`](simulation::EventStream)s, which can be connected to any
//! Simulation outputs can be monitored using [`EventSlot`](ports::EventSlot)s
//! and [`EventBuffer`](ports::EventBuffer)s, which can be connected to any
//! model's output port. While an event slot only gives access to the last value
//! sent from a port, an event stream is an iterator that yields all events that
//! were sent in first-in-first-out order.
@ -266,7 +268,8 @@
//! ```
//! # mod models {
//! # use std::time::Duration;
//! # use asynchronix::model::{Model, Output};
//! # use asynchronix::model::Model;
//! # use asynchronix::ports::Output;
//! # use asynchronix::time::Scheduler;
//! # #[derive(Default)]
//! # pub struct Multiplier {
@ -293,6 +296,7 @@
//! # impl Model for Delay {}
//! # }
//! # use std::time::Duration;
//! # use asynchronix::ports::EventSlot;
//! # use asynchronix::simulation::{Mailbox, SimInit};
//! # use asynchronix::time::MonotonicTime;
//! # use models::{Delay, Multiplier};
@ -308,7 +312,8 @@
//! # multiplier1.output.connect(Multiplier::input, &multiplier2_mbox);
//! # multiplier2.output.connect(Delay::input, &delay2_mbox);
//! # delay1.output.connect(Delay::input, &delay2_mbox);
//! # let mut output_slot = delay2.output.connect_slot().0;
//! # let mut output_slot = EventSlot::new();
//! # delay2.output.connect_sink(&output_slot);
//! # let input_address = multiplier1_mbox.address();
//! # let t0 = MonotonicTime::EPOCH;
//! # let mut simu = SimInit::new()
@ -318,21 +323,21 @@
//! # .add_model(delay2, delay2_mbox)
//! # .init(t0);
//! // Send a value to the first multiplier.
//! simu.send_event(Multiplier::input, 21.0, &input_address);
//! simu.process_event(Multiplier::input, 21.0, &input_address);
//!
//! // The simulation is still at t0 so nothing is expected at the output of the
//! // second delay gate.
//! assert!(output_slot.take().is_none());
//! assert!(output_slot.next().is_none());
//!
//! // Advance simulation time until the next event and check the time and output.
//! simu.step();
//! assert_eq!(simu.time(), t0 + Duration::from_secs(1));
//! assert_eq!(output_slot.take(), Some(84.0));
//! assert_eq!(output_slot.next(), Some(84.0));
//!
//! // Get the answer to the ultimate question of life, the universe & everything.
//! simu.step();
//! assert_eq!(simu.time(), t0 + Duration::from_secs(2));
//! assert_eq!(output_slot.take(), Some(42.0));
//! assert_eq!(output_slot.next(), Some(42.0));
//! ```
//!
//! # Message ordering guarantees
@ -406,6 +411,9 @@ pub(crate) mod executor;
mod loom_exports;
pub(crate) mod macros;
pub mod model;
pub mod ports;
#[cfg(feature = "rpc")]
pub mod rpc;
pub mod simulation;
pub mod time;
pub(crate) mod util;

View File

@ -65,8 +65,9 @@
//! ### Output and requestor ports
//!
//! Output and requestor ports can be added to a model using composition, adding
//! [`Output`] and [`Requestor`] objects as members. They are parametrized by
//! the event, request and reply types.
//! [`Output`](crate::ports::Output) and [`Requestor`](crate::ports::Requestor)
//! objects as members. They are parametrized by the event, request and reply
//! types.
//!
//! Models are expected to expose their output and requestor ports as public
//! members so they can be connected to input and replier ports when assembling
@ -75,7 +76,8 @@
//! #### Example
//!
//! ```
//! use asynchronix::model::{Model, Output, Requestor};
//! use asynchronix::model::Model;
//! use asynchronix::ports::{Output, Requestor};
//!
//! pub struct MyModel {
//! pub my_output: Output<String>,
@ -90,9 +92,9 @@
//!
//! ### Input and replier ports
//!
//! Input ports and replier ports are methods that implement the [`InputFn`] or
//! [`ReplierFn`] traits with appropriate bounds on their argument and return
//! types.
//! Input ports and replier ports are methods that implement the
//! [`InputFn`](crate::ports::InputFn) or [`ReplierFn`](crate::ports::ReplierFn)
//! traits with appropriate bounds on their argument and return types.
//!
//! In practice, an input port method for an event of type `T` may have any of
//! the following signatures, where the futures returned by the `async` variants
@ -132,7 +134,7 @@
//! can be connected to input and requestor ports when assembling the simulation
//! bench. However, input ports may instead be defined as private methods if
//! they are only used by the model itself to schedule future actions (see the
//! [`Scheduler`](crate::time::Scheduler) examples).
//! [`Scheduler`] examples).
//!
//! Changing the signature of an input or replier port is not considered to
//! alter the public interface of a model provided that the event, request and
@ -165,13 +167,6 @@ use std::pin::Pin;
use crate::time::Scheduler;
pub use model_fn::{InputFn, ReplierFn};
pub use ports::{LineError, LineId, Output, Requestor};
pub mod markers;
mod model_fn;
mod ports;
/// Trait to be implemented by all models.
///
/// This trait enables models to perform specific actions in the

35
asynchronix/src/ports.rs Normal file
View File

@ -0,0 +1,35 @@
//! Model ports for event and query broadcasting.
//!
//! Models typically contain [`Output`] and/or [`Requestor`] ports, exposed as
//! public member variables. Output ports broadcast events to all connected
//! input ports, while requestor ports broadcast queries to, and retrieve
//! replies from, all connected replier ports.
//!
//! On the surface, output and requestor ports only differ in that sending a
//! query from a requestor port also returns an iterator over the replies from
//! all connected ports. Sending a query is more costly, however, because of the
//! need to wait until all connected models have processed the query. In
//! contrast, since events are buffered in the mailbox of the target model,
//! sending an event is a fire-and-forget operation. For this reason, output
//! ports should generally be preferred over requestor ports when possible.
mod input;
mod output;
mod sink;
mod source;
pub use input::markers;
pub use input::{InputFn, ReplierFn};
pub use output::{Output, Requestor};
pub use sink::{
event_buffer::EventBuffer, event_slot::EventSlot, EventSink, EventSinkStream, EventSinkWriter,
};
pub use source::{EventSource, QuerySource, ReplyReceiver};
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
/// Unique identifier for a connection between two ports.
pub struct LineId(u64);
/// Error raised when the specified line cannot be found.
#[derive(Copy, Clone, Debug)]
pub struct LineError {}

View File

@ -0,0 +1,4 @@
pub mod markers;
mod model_fn;
pub use model_fn::{InputFn, ReplierFn};

View File

@ -2,9 +2,11 @@
use std::future::{ready, Future, Ready};
use crate::model::{markers, Model};
use crate::model::Model;
use crate::time::Scheduler;
use super::markers;
/// A function, method or closures that can be used as an *input port*.
///
/// This trait is in particular implemented for any function or method with the

View File

@ -1,35 +1,16 @@
//! Model ports for event and query broadcasting.
//!
//! Models typically contain [`Output`] and/or [`Requestor`] ports, exposed as
//! public member variables. Output ports broadcast events to all connected
//! input ports, while requestor ports broadcast queries to, and retrieve
//! replies from, all connected replier ports.
//!
//! On the surface, output and requestor ports only differ in that sending a
//! query from a requestor port also returns an iterator over the replies from
//! all connected ports. Sending a query is more costly, however, because of the
//! need to wait until all connected models have processed the query. In
//! contrast, since events are buffered in the mailbox of the target model,
//! sending an event is a fire-and-forget operation. For this reason, output
//! ports should generally be preferred over requestor ports when possible.
use std::fmt;
use std::sync::{Arc, Mutex};
mod broadcaster;
mod sender;
use crate::model::{InputFn, Model, ReplierFn};
use crate::simulation::{Address, EventSlot, EventStream};
use crate::util::spsc_queue;
use std::fmt;
use broadcaster::Broadcaster;
use crate::model::Model;
use crate::ports::{EventSink, LineError, LineId};
use crate::ports::{InputFn, ReplierFn};
use crate::simulation::Address;
use self::sender::{EventSender, EventSlotSender, EventStreamSender, QuerySender};
use broadcaster::{EventBroadcaster, QueryBroadcaster};
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
/// Unique identifier for a connection between two ports.
pub struct LineId(u64);
use self::sender::{EventSinkSender, InputSender, ReplierSender};
/// An output port.
///
@ -37,7 +18,7 @@ pub struct LineId(u64);
/// methods that return no value. They broadcast events to all connected input
/// ports.
pub struct Output<T: Clone + Send + 'static> {
broadcaster: Broadcaster<T, ()>,
broadcaster: EventBroadcaster<T>,
next_line_id: u64,
}
@ -56,53 +37,37 @@ impl<T: Clone + Send + 'static> Output<T> {
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>) -> LineId
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Copy,
F: for<'a> InputFn<'a, M, T, S> + Clone,
S: Send + 'static,
{
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let sender = Box::new(EventSender::new(input, address.into().0));
let sender = Box::new(InputSender::new(input, address.into().0));
self.broadcaster.add(sender, line_id);
line_id
}
/// Adds a connection to an event stream iterator.
pub fn connect_stream(&mut self) -> (EventStream<T>, LineId) {
/// Adds a connection to an event sink such as an
/// [`EventSlot`](crate::ports::EventSlot) or
/// [`EventBuffer`](crate::ports::EventBuffer).
pub fn connect_sink<S: EventSink<T>>(&mut self, sink: &S) -> LineId {
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let (producer, consumer) = spsc_queue::spsc_queue();
let sender = Box::new(EventStreamSender::new(producer));
let event_stream = EventStream::new(consumer);
let sender = Box::new(EventSinkSender::new(sink.writer()));
self.broadcaster.add(sender, line_id);
(event_stream, line_id)
}
/// Adds a connection to an event slot.
pub fn connect_slot(&mut self) -> (EventSlot<T>, LineId) {
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let slot = Arc::new(Mutex::new(None));
let sender = Box::new(EventSlotSender::new(slot.clone()));
let event_slot = EventSlot::new(slot);
self.broadcaster.add(sender, line_id);
(event_slot, line_id)
line_id
}
/// Removes the connection specified by the `LineId` parameter.
///
/// It is a logic error to specify a line identifier from another [`Output`]
/// or [`Requestor`] instance and may result in the disconnection of an
/// arbitrary endpoint.
/// It is a logic error to specify a line identifier from another
/// [`Output`], [`Requestor`], [`EventSource`](crate::ports::EventSource) or
/// [`QuerySource`](crate::ports::QuerySource) instance and may result in
/// the disconnection of an arbitrary endpoint.
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
if self.broadcaster.remove(line_id) {
Ok(())
@ -118,14 +83,14 @@ impl<T: Clone + Send + 'static> Output<T> {
/// Broadcasts an event to all connected input ports.
pub async fn send(&mut self, arg: T) {
self.broadcaster.broadcast_event(arg).await.unwrap();
self.broadcaster.broadcast(arg).await.unwrap();
}
}
impl<T: Clone + Send + 'static> Default for Output<T> {
fn default() -> Self {
Self {
broadcaster: Broadcaster::default(),
broadcaster: EventBroadcaster::default(),
next_line_id: 0,
}
}
@ -143,7 +108,7 @@ impl<T: Clone + Send + 'static> fmt::Debug for Output<T> {
/// model methods that return a value. They broadcast queries to all connected
/// replier ports.
pub struct Requestor<T: Clone + Send + 'static, R: Send + 'static> {
broadcaster: Broadcaster<T, R>,
broadcaster: QueryBroadcaster<T, R>,
next_line_id: u64,
}
@ -162,13 +127,13 @@ impl<T: Clone + Send + 'static, R: Send + 'static> Requestor<T, R> {
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>) -> LineId
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Copy,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
S: Send + 'static,
{
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let sender = Box::new(QuerySender::new(replier, address.into().0));
let sender = Box::new(ReplierSender::new(replier, address.into().0));
self.broadcaster.add(sender, line_id);
line_id
@ -176,9 +141,10 @@ impl<T: Clone + Send + 'static, R: Send + 'static> Requestor<T, R> {
/// Removes the connection specified by the `LineId` parameter.
///
/// It is a logic error to specify a line identifier from another [`Output`]
/// or [`Requestor`] instance and may result in the disconnection of an
/// arbitrary endpoint.
/// It is a logic error to specify a line identifier from another
/// [`Requestor`], [`Output`], [`EventSource`](crate::ports::EventSource) or
/// [`QuerySource`](crate::ports::QuerySource) instance and may result in
/// the disconnection of an arbitrary endpoint.
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
if self.broadcaster.remove(line_id) {
Ok(())
@ -194,14 +160,14 @@ impl<T: Clone + Send + 'static, R: Send + 'static> Requestor<T, R> {
/// Broadcasts a query to all connected replier ports.
pub async fn send(&mut self, arg: T) -> impl Iterator<Item = R> + '_ {
self.broadcaster.broadcast_query(arg).await.unwrap()
self.broadcaster.broadcast(arg).await.unwrap()
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> Default for Requestor<T, R> {
fn default() -> Self {
Self {
broadcaster: Broadcaster::default(),
broadcaster: QueryBroadcaster::default(),
next_line_id: 0,
}
}
@ -212,7 +178,3 @@ impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for Requestor<T, R
write!(f, "Requestor ({} connected ports)", self.broadcaster.len())
}
}
/// Error raised when the specified line cannot be found.
#[derive(Copy, Clone, Debug)]
pub struct LineError {}

View File

@ -8,46 +8,30 @@ use recycle_box::{coerce_box, RecycleBox};
use super::sender::{SendError, Sender};
use super::LineId;
use task_set::TaskSet;
mod task_set;
use crate::util::task_set::TaskSet;
/// An object that can efficiently broadcast messages to several addresses.
///
/// This is very similar to `source::broadcaster::BroadcasterInner`, but
/// generates non-owned futures instead.
///
/// This object maintains a list of senders associated to each target address.
/// When a message is broadcasted, the sender futures are awaited in parallel.
/// When a message is broadcast, the sender futures are awaited in parallel.
/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate
/// does, but with some key differences:
///
/// - tasks and future storage are reusable to avoid repeated allocation, so
/// allocation occurs only after a new sender is added,
/// - the outputs of all sender futures are returned all at once rather than
/// with an asynchronous iterator (a.k.a. async stream); the implementation
/// exploits this behavior by waking the main broadcast future only when all
/// sender futures have been awaken, which strongly reduces overhead since
/// waking a sender task does not actually schedule it on the executor.
pub(super) struct Broadcaster<T: Clone + 'static, R: 'static> {
/// with an asynchronous iterator (a.k.a. async stream).
pub(super) struct BroadcasterInner<T: Clone, R> {
/// The list of senders with their associated line identifier.
senders: Vec<(LineId, Box<dyn Sender<T, R>>)>,
/// Fields explicitly borrowed by the `BroadcastFuture`.
shared: Shared<R>,
}
impl<T: Clone + 'static> Broadcaster<T, ()> {
/// Broadcasts an event to all addresses.
pub(super) async fn broadcast_event(&mut self, arg: T) -> Result<(), BroadcastError> {
match self.senders.as_mut_slice() {
// No sender.
[] => Ok(()),
// One sender.
[sender] => sender.1.send(arg).await.map_err(|_| BroadcastError {}),
// Multiple senders.
_ => self.broadcast(arg).await,
}
}
}
impl<T: Clone + 'static, R> Broadcaster<T, R> {
impl<T: Clone, R> BroadcasterInner<T, R> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
@ -93,55 +77,25 @@ impl<T: Clone + 'static, R> Broadcaster<T, R> {
self.senders.len()
}
/// Broadcasts a query to all addresses and collect all responses.
pub(super) async fn broadcast_query(
&mut self,
arg: T,
) -> Result<impl Iterator<Item = R> + '_, BroadcastError> {
match self.senders.as_mut_slice() {
// No sender.
[] => {}
// One sender.
[sender] => {
let output = sender.1.send(arg).await.map_err(|_| BroadcastError {})?;
self.shared.futures_env[0].output = Some(output);
}
// Multiple senders.
_ => self.broadcast(arg).await?,
};
// At this point all outputs should be available so `unwrap` can be
// called on the output of each future.
let outputs = self
.shared
.futures_env
.iter_mut()
.map(|t| t.output.take().unwrap());
Ok(outputs)
}
/// Efficiently broadcasts a message or a query to multiple addresses.
///
/// This method does not collect the responses from queries.
fn broadcast(&mut self, arg: T) -> BroadcastFuture<'_, R> {
let futures_count = self.senders.len();
let mut futures = recycle_vec(self.shared.storage.take().unwrap_or_default());
// Broadcast the message and collect all futures.
for (i, (sender, futures_env)) in self
let mut iter = self
.senders
.iter_mut()
.zip(self.shared.futures_env.iter_mut())
.enumerate()
{
.zip(self.shared.futures_env.iter_mut());
while let Some((sender, futures_env)) = iter.next() {
let future_cache = futures_env
.storage
.take()
.unwrap_or_else(|| RecycleBox::new(()));
// Move the argument rather than clone it for the last future.
if i + 1 == futures_count {
if iter.len() == 0 {
let future: RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + '_> =
coerce_box!(RecycleBox::recycle(future_cache, sender.1.send(arg)));
@ -161,7 +115,7 @@ impl<T: Clone + 'static, R> Broadcaster<T, R> {
}
}
impl<T: Clone + 'static, R> Default for Broadcaster<T, R> {
impl<T: Clone, R> Default for BroadcasterInner<T, R> {
/// Creates an empty `Broadcaster` object.
fn default() -> Self {
let wake_sink = WakeSink::new();
@ -179,6 +133,145 @@ impl<T: Clone + 'static, R> Default for Broadcaster<T, R> {
}
}
/// An object that can efficiently broadcast events to several input ports.
///
/// This is very similar to `source::broadcaster::EventBroadcaster`, but
/// generates non-owned futures instead.
///
/// See `BroadcasterInner` for implementation details.
pub(super) struct EventBroadcaster<T: Clone> {
/// The broadcaster core object.
inner: BroadcasterInner<T, ()>,
}
impl<T: Clone> EventBroadcaster<T> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1`.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, ()>>, id: LineId) {
self.inner.add(sender, id);
}
/// Removes the first sender with the specified identifier, if any.
///
/// Returns `true` if there was indeed a sender associated to the specified
/// identifier.
pub(super) fn remove(&mut self, id: LineId) -> bool {
self.inner.remove(id)
}
/// Removes all senders.
pub(super) fn clear(&mut self) {
self.inner.clear();
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.inner.len()
}
/// Broadcasts an event to all addresses.
pub(super) async fn broadcast(&mut self, arg: T) -> Result<(), BroadcastError> {
match self.inner.senders.as_mut_slice() {
// No sender.
[] => Ok(()),
// One sender.
[sender] => sender.1.send(arg).await.map_err(|_| BroadcastError {}),
// Multiple senders.
_ => self.inner.broadcast(arg).await,
}
}
}
impl<T: Clone> Default for EventBroadcaster<T> {
fn default() -> Self {
Self {
inner: BroadcasterInner::default(),
}
}
}
/// An object that can efficiently broadcast queries to several replier ports.
///
/// This is very similar to `source::broadcaster::QueryBroadcaster`, but
/// generates non-owned futures instead.
///
/// See `BroadcasterInner` for implementation details.
pub(super) struct QueryBroadcaster<T: Clone, R> {
/// The broadcaster core object.
inner: BroadcasterInner<T, R>,
}
impl<T: Clone, R> QueryBroadcaster<T, R> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1`.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>, id: LineId) {
self.inner.add(sender, id);
}
/// Removes the first sender with the specified identifier, if any.
///
/// Returns `true` if there was indeed a sender associated to the specified
/// identifier.
pub(super) fn remove(&mut self, id: LineId) -> bool {
self.inner.remove(id)
}
/// Removes all senders.
pub(super) fn clear(&mut self) {
self.inner.clear();
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.inner.len()
}
/// Broadcasts a query to all addresses and collect all responses.
pub(super) async fn broadcast(
&mut self,
arg: T,
) -> Result<impl Iterator<Item = R> + '_, BroadcastError> {
match self.inner.senders.as_mut_slice() {
// No sender.
[] => {}
// One sender.
[sender] => {
let output = sender.1.send(arg).await.map_err(|_| BroadcastError {})?;
self.inner.shared.futures_env[0].output = Some(output);
}
// Multiple senders.
_ => self.inner.broadcast(arg).await?,
};
// At this point all outputs should be available so `unwrap` can be
// called on the output of each future.
let outputs = self
.inner
.shared
.futures_env
.iter_mut()
.map(|t| t.output.take().unwrap());
Ok(outputs)
}
}
impl<T: Clone, R> Default for QueryBroadcaster<T, R> {
fn default() -> Self {
Self {
inner: BroadcasterInner::default(),
}
}
}
/// Data related to a sender future.
struct FutureEnv<R> {
/// Cached storage for the future.
@ -212,8 +305,6 @@ struct Shared<R> {
///
/// - the sender futures are polled simultaneously rather than waiting for their
/// completion in a sequential manner,
/// - this future is never woken if it can be proven that at least one of the
/// individual sender task will still be awaken,
/// - the storage allocated for the sender futures is always returned to the
/// `Broadcast` object so it can be reused by the next future,
/// - the happy path (all futures immediately ready) is very fast.
@ -231,9 +322,9 @@ pub(super) struct BroadcastFuture<'a, R> {
impl<'a, R> BroadcastFuture<'a, R> {
/// Creates a new `BroadcastFuture`.
fn new(shared: &'a mut Shared<R>, futures: Vec<Pin<RecycleBoxFuture<'a, R>>>) -> Self {
let futures_count = futures.len();
let pending_futures_count = futures.len();
assert!(shared.futures_env.len() == futures_count);
assert!(shared.futures_env.len() == pending_futures_count);
for futures_env in shared.futures_env.iter_mut() {
// Drop the previous output if necessary.
@ -244,7 +335,7 @@ impl<'a, R> BroadcastFuture<'a, R> {
shared,
futures: ManuallyDrop::new(futures),
state: FutureState::Uninit,
pending_futures_count: futures_count,
pending_futures_count,
}
}
}
@ -276,7 +367,10 @@ impl<'a, R> Future for BroadcastFuture<'a, R> {
// Poll all sender futures once if this is the first time the broadcast
// future is polled.
if this.state == FutureState::Uninit {
// Prevent spurious wake-ups.
// The task set is re-used for each broadcast, so it may have some
// task scheduled due to e.g. spurious wake-ups that were triggered
// after the previous broadcast was completed. Discarding scheduled
// tasks can prevent unnecessary wake-ups.
this.shared.task_set.discard_scheduled();
for task_idx in 0..this.futures.len() {
@ -311,20 +405,22 @@ impl<'a, R> Future for BroadcastFuture<'a, R> {
// Repeatedly poll the futures of all scheduled tasks until there are no
// more scheduled tasks.
loop {
// Only register the waker if it is probable that we won't find any
// scheduled task.
// No need to register the waker if some tasks have been scheduled.
if !this.shared.task_set.has_scheduled() {
this.shared.wake_sink.register(cx.waker());
}
// Retrieve the indices of the scheduled tasks if any. If there are
// no scheduled tasks, `Poll::Pending` is returned and this future
// will be awaken again when enough tasks have been scheduled.
let scheduled_tasks = match this
.shared
.task_set
.steal_scheduled(this.pending_futures_count)
{
// will be awaken again when enough tasks have been awaken.
//
// NOTE: the current implementation requires a notification to be
// sent each time a sub-future has made progress. We may try at some
// point to benchmark an alternative strategy where a notification
// is requested only when all pending sub-futures have made progress,
// using `take_scheduled(this.pending_futures_count)`. This would
// reduce the cost of context switch but could hurt latency.
let scheduled_tasks = match this.shared.task_set.take_scheduled(1) {
Some(st) => st,
None => return Poll::Pending,
};
@ -403,6 +499,7 @@ fn recycle_vec<T, U>(mut v: Vec<T>) -> Vec<U> {
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::thread;
use futures_executor::block_on;
@ -413,8 +510,9 @@ mod tests {
use crate::util::priority_queue::PriorityQueue;
use crate::util::sync_cell::SyncCell;
use super::super::*;
use super::super::sender::{InputSender, ReplierSender};
use super::*;
use crate::model::Model;
struct Counter {
inner: Arc<AtomicUsize>,
@ -438,18 +536,18 @@ mod tests {
const N_RECV: usize = 4;
let mut mailboxes = Vec::new();
let mut broadcaster = Broadcaster::default();
let mut broadcaster = EventBroadcaster::default();
for id in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(EventSender::new(Counter::inc, address));
let sender = Box::new(InputSender::new(Counter::inc, address));
broadcaster.add(sender, LineId(id as u64));
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
block_on(broadcaster.broadcast_event(1)).unwrap();
block_on(broadcaster.broadcast(1)).unwrap();
});
let counter = Arc::new(AtomicUsize::new(0));
@ -486,18 +584,18 @@ mod tests {
const N_RECV: usize = 4;
let mut mailboxes = Vec::new();
let mut broadcaster = Broadcaster::default();
let mut broadcaster = QueryBroadcaster::default();
for id in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(QuerySender::new(Counter::fetch_inc, address));
let sender = Box::new(ReplierSender::new(Counter::fetch_inc, address));
broadcaster.add(sender, LineId(id as u64));
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
let iter = block_on(broadcaster.broadcast_query(1)).unwrap();
let iter = block_on(broadcaster.broadcast(1)).unwrap();
let sum = iter.fold(0, |acc, val| acc + val);
assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)}
@ -606,12 +704,12 @@ mod tests {
let (test_event2, waker2) = test_event::<usize>();
let (test_event3, waker3) = test_event::<usize>();
let mut broadcaster = Broadcaster::default();
let mut broadcaster = QueryBroadcaster::default();
broadcaster.add(Box::new(test_event1), LineId(1));
broadcaster.add(Box::new(test_event2), LineId(2));
broadcaster.add(Box::new(test_event3), LineId(3));
let mut fut = Box::pin(broadcaster.broadcast_query(()));
let mut fut = Box::pin(broadcaster.broadcast(()));
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
let is_scheduled_waker = is_scheduled.clone();
@ -626,7 +724,6 @@ mod tests {
let th2 = thread::spawn(move || waker2.wake_final(7));
let th3 = thread::spawn(move || waker3.wake_final(42));
let mut schedule_count = 0;
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
@ -645,8 +742,6 @@ mod tests {
if !is_scheduled.swap(false, Ordering::Acquire) {
break;
}
schedule_count += 1;
assert!(schedule_count <= 1);
}
th1.join().unwrap();
@ -681,11 +776,11 @@ mod tests {
let (test_event1, waker1) = test_event::<usize>();
let (test_event2, waker2) = test_event::<usize>();
let mut broadcaster = Broadcaster::default();
let mut broadcaster = QueryBroadcaster::default();
broadcaster.add(Box::new(test_event1), LineId(1));
broadcaster.add(Box::new(test_event2), LineId(2));
let mut fut = Box::pin(broadcaster.broadcast_query(()));
let mut fut = Box::pin(broadcaster.broadcast(()));
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
let is_scheduled_waker = is_scheduled.clone();
@ -701,7 +796,6 @@ mod tests {
let th2 = thread::spawn(move || waker2.wake_final(7));
let th_spurious = thread::spawn(move || spurious_waker.wake_spurious());
let mut schedule_count = 0;
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
@ -719,8 +813,6 @@ mod tests {
if !is_scheduled.swap(false, Ordering::Acquire) {
break;
}
schedule_count += 1;
assert!(schedule_count <= 2);
}
th1.join().unwrap();

View File

@ -4,22 +4,28 @@ use std::future::Future;
use std::marker::PhantomData;
use std::mem::ManuallyDrop;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
use recycle_box::{coerce_box, RecycleBox};
use crate::channel;
use crate::model::{InputFn, Model, ReplierFn};
use crate::util::spsc_queue;
use crate::model::Model;
use crate::ports::{EventSinkWriter, InputFn, ReplierFn};
/// Abstraction over `EventSender` and `QuerySender`.
/// An event or query sender abstracting over the target model and input or
/// replier method.
pub(super) trait Sender<T, R>: Send {
/// Asynchronously send the event or request.
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>>;
}
/// An object that can send a payload to a model.
pub(super) struct EventSender<M: 'static, F, T, S> {
/// An object that can send events to an input port.
pub(super) struct InputSender<M: 'static, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + 'static,
{
func: F,
sender: channel::Sender<M>,
fut_storage: Option<RecycleBox<()>>,
@ -27,7 +33,7 @@ pub(super) struct EventSender<M: 'static, F, T, S> {
_phantom_closure_marker: PhantomData<S>,
}
impl<M: Send, F, T, S> EventSender<M, F, T, S>
impl<M: Send, F, T, S> InputSender<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
@ -44,15 +50,15 @@ where
}
}
impl<M: Send, F, T, S> Sender<T, ()> for EventSender<M, F, T, S>
impl<M: Send, F, T, S> Sender<T, ()> for InputSender<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Copy,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + 'static,
S: Send,
S: Send + 'static,
{
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
let func = self.func;
let func = self.func.clone();
let fut = self.sender.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
@ -66,8 +72,8 @@ where
}
}
/// An object that can send a payload to a model and retrieve a response.
pub(super) struct QuerySender<M: 'static, F, T, R, S> {
/// An object that can send a request to a replier port and retrieve a response.
pub(super) struct ReplierSender<M: 'static, F, T, R, S> {
func: F,
sender: channel::Sender<M>,
receiver: multishot::Receiver<R>,
@ -76,7 +82,7 @@ pub(super) struct QuerySender<M: 'static, F, T, R, S> {
_phantom_closure_marker: PhantomData<S>,
}
impl<M, F, T, R, S> QuerySender<M, F, T, R, S>
impl<M, F, T, R, S> ReplierSender<M, F, T, R, S>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S>,
@ -95,16 +101,16 @@ where
}
}
impl<M, F, T, R, S> Sender<T, R> for QuerySender<M, F, T, R, S>
impl<M, F, T, R, S> Sender<T, R> for ReplierSender<M, F, T, R, S>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Copy,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
T: Send + 'static,
R: Send + 'static,
S: Send,
{
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>> {
let func = self.func;
let func = self.func.clone();
let sender = &mut self.sender;
let reply_receiver = &mut self.receiver;
let fut_storage = &mut self.fut_storage;
@ -134,67 +140,40 @@ where
}
}
/// An object that can send a payload to an unbounded queue.
pub(super) struct EventStreamSender<T> {
producer: spsc_queue::Producer<T>,
/// An object that can send a payload to an event sink.
pub(super) struct EventSinkSender<T: Send + 'static, W: EventSinkWriter<T>> {
writer: W,
fut_storage: Option<RecycleBox<()>>,
_phantom_event: PhantomData<T>,
}
impl<T> EventStreamSender<T> {
pub(super) fn new(producer: spsc_queue::Producer<T>) -> Self {
impl<T: Send + 'static, W: EventSinkWriter<T>> EventSinkSender<T, W> {
pub(super) fn new(writer: W) -> Self {
Self {
producer,
writer,
fut_storage: None,
_phantom_event: PhantomData,
}
}
}
impl<T> Sender<T, ()> for EventStreamSender<T>
impl<T, W: EventSinkWriter<T>> Sender<T, ()> for EventSinkSender<T, W>
where
T: Send + 'static,
{
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
let producer = &mut self.producer;
let writer = &mut self.writer;
RecycledFuture::new(&mut self.fut_storage, async move {
producer.push(arg).map_err(|_| SendError {})
})
}
}
/// An object that can send a payload to a mutex-protected slot.
pub(super) struct EventSlotSender<T> {
slot: Arc<Mutex<Option<T>>>,
fut_storage: Option<RecycleBox<()>>,
}
impl<T> EventSlotSender<T> {
pub(super) fn new(slot: Arc<Mutex<Option<T>>>) -> Self {
Self {
slot,
fut_storage: None,
}
}
}
impl<T> Sender<T, ()> for EventSlotSender<T>
where
T: Send + 'static,
{
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
let slot = &*self.slot;
RecycledFuture::new(&mut self.fut_storage, async move {
let mut slot = slot.lock().unwrap();
*slot = Some(arg);
writer.write(arg);
Ok(())
})
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
/// Error returned when the mailbox was closed or dropped.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub(super) struct SendError {}
impl fmt::Display for SendError {

View File

@ -0,0 +1,54 @@
pub(crate) mod event_buffer;
pub(crate) mod event_slot;
/// A simulation endpoint that can receive events sent by model outputs.
///
/// An `EventSink` can be thought of as a self-standing input meant to
/// externally monitor the simulated system.
pub trait EventSink<T> {
/// Writer handle to an event sink.
type Writer: EventSinkWriter<T>;
/// Returns the writer handle associated to this sink.
fn writer(&self) -> Self::Writer;
}
/// A writer handle to an event sink.
pub trait EventSinkWriter<T>: Send + Sync + 'static {
/// Writes a value to the associated sink.
fn write(&self, event: T);
}
/// An iterator over collected events with the ability to pause and resume event
/// collection.
///
/// An `EventSinkStream` will typically be implemented on an `EventSink` for
/// which it will constitute a draining iterator.
pub trait EventSinkStream: Iterator {
/// Starts or resumes the collection of new events.
fn open(&mut self);
/// Pauses the collection of new events.
///
/// Events that were previously in the stream remain available.
fn close(&mut self);
/// This is a stop-gap method that shadows `Iterator::try_fold` until the
/// latter can be implemented by user-defined types on stable Rust.
///
/// It serves the exact same purpose as `Iterator::try_fold` but is
/// specialized for `Result` to avoid depending on the unstable `Try` trait.
///
/// Implementors may elect to override the default implementation when the
/// event sink stream can be iterated over more rapidly than by repeatably
/// calling `Iterator::next`, for instance if the implementation of the
/// stream relies on a mutex that must be locked on each call.
#[doc(hidden)]
fn try_fold<B, F, E>(&mut self, init: B, f: F) -> Result<B, E>
where
Self: Sized,
F: FnMut(B, Self::Item) -> Result<B, E>,
{
Iterator::try_fold(self, init, f)
}
}

View File

@ -0,0 +1,138 @@
use std::collections::VecDeque;
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use super::{EventSink, EventSinkStream, EventSinkWriter};
/// The shared data of an `EventBuffer`.
struct Inner<T> {
capacity: usize,
is_open: AtomicBool,
buffer: Mutex<VecDeque<T>>,
}
/// An [`EventSink`] and [`EventSinkStream`] with a bounded size.
///
/// If the maximum capacity is exceeded, older events are overwritten. Events
/// are returned in first-in-first-out order. Note that even if the iterator
/// returns `None`, it may still produce more items in the future (in other
/// words, it is not a [`FusedIterator`](std::iter::FusedIterator)).
pub struct EventBuffer<T> {
inner: Arc<Inner<T>>,
}
impl<T> EventBuffer<T> {
/// Default capacity when constructed with `new`.
pub const DEFAULT_CAPACITY: usize = 16;
/// Creates an open `EventBuffer` with the default capacity.
pub fn new() -> Self {
Self::with_capacity(Self::DEFAULT_CAPACITY)
}
/// Creates a closed `EventBuffer` with the default capacity.
pub fn new_closed() -> Self {
Self::with_capacity_closed(Self::DEFAULT_CAPACITY)
}
/// Creates an open `EventBuffer` with the specified capacity.
pub fn with_capacity(capacity: usize) -> Self {
Self {
inner: Arc::new(Inner {
capacity,
is_open: AtomicBool::new(true),
buffer: Mutex::new(VecDeque::new()),
}),
}
}
/// Creates a closed `EventBuffer` with the specified capacity.
pub fn with_capacity_closed(capacity: usize) -> Self {
Self {
inner: Arc::new(Inner {
capacity,
is_open: AtomicBool::new(false),
buffer: Mutex::new(VecDeque::new()),
}),
}
}
}
impl<T: Send + 'static> EventSink<T> for EventBuffer<T> {
type Writer = EventBufferWriter<T>;
fn writer(&self) -> Self::Writer {
EventBufferWriter {
inner: self.inner.clone(),
}
}
}
impl<T> Iterator for EventBuffer<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.inner.buffer.lock().unwrap().pop_front()
}
}
impl<T: Send + 'static> EventSinkStream for EventBuffer<T> {
fn open(&mut self) {
self.inner.is_open.store(true, Ordering::Relaxed);
}
fn close(&mut self) {
self.inner.is_open.store(false, Ordering::Relaxed);
}
fn try_fold<B, F, E>(&mut self, init: B, f: F) -> Result<B, E>
where
Self: Sized,
F: FnMut(B, Self::Item) -> Result<B, E>,
{
let mut inner = self.inner.buffer.lock().unwrap();
let mut drain = inner.drain(..);
drain.try_fold(init, f)
}
}
impl<T> Default for EventBuffer<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> fmt::Debug for EventBuffer<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventBuffer").finish_non_exhaustive()
}
}
/// A producer handle of an `EventStream`.
pub struct EventBufferWriter<T> {
inner: Arc<Inner<T>>,
}
impl<T: Send + 'static> EventSinkWriter<T> for EventBufferWriter<T> {
/// Pushes an event onto the queue.
fn write(&self, event: T) {
if !self.inner.is_open.load(Ordering::Relaxed) {
return;
}
let mut buffer = self.inner.buffer.lock().unwrap();
if buffer.len() == self.inner.capacity {
buffer.pop_front();
}
buffer.push_back(event);
}
}
impl<T> fmt::Debug for EventBufferWriter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventBufferWriter").finish_non_exhaustive()
}
}

View File

@ -0,0 +1,120 @@
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, TryLockError, TryLockResult};
use super::{EventSink, EventSinkStream, EventSinkWriter};
/// The shared data of an `EventBuffer`.
struct Inner<T> {
is_open: AtomicBool,
slot: Mutex<Option<T>>,
}
/// An `EventSink` and `EventSinkStream` that only keeps the last event.
///
/// Once the value is read, the iterator will return `None` until a new value is
/// received. If the slot contains a value when a new value is received, the
/// previous value is overwritten.
pub struct EventSlot<T> {
inner: Arc<Inner<T>>,
}
impl<T> EventSlot<T> {
/// Creates an open `EventSlot`.
pub fn new() -> Self {
Self {
inner: Arc::new(Inner {
is_open: AtomicBool::new(true),
slot: Mutex::new(None),
}),
}
}
/// Creates a closed `EventSlot`.
pub fn new_closed() -> Self {
Self {
inner: Arc::new(Inner {
is_open: AtomicBool::new(false),
slot: Mutex::new(None),
}),
}
}
}
impl<T: Send + 'static> EventSink<T> for EventSlot<T> {
type Writer = EventSlotWriter<T>;
/// Returns a writer handle.
fn writer(&self) -> EventSlotWriter<T> {
EventSlotWriter {
inner: self.inner.clone(),
}
}
}
impl<T> Iterator for EventSlot<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self.inner.slot.try_lock() {
TryLockResult::Ok(mut v) => v.take(),
TryLockResult::Err(TryLockError::WouldBlock) => None,
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
}
}
}
impl<T: Send + 'static> EventSinkStream for EventSlot<T> {
fn open(&mut self) {
self.inner.is_open.store(true, Ordering::Relaxed);
}
fn close(&mut self) {
self.inner.is_open.store(false, Ordering::Relaxed);
}
}
impl<T> Default for EventSlot<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> fmt::Debug for EventSlot<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventSlot").finish_non_exhaustive()
}
}
/// A writer handle of an `EventSlot`.
pub struct EventSlotWriter<T> {
inner: Arc<Inner<T>>,
}
impl<T: Send + 'static> EventSinkWriter<T> for EventSlotWriter<T> {
/// Write an event into the slot.
fn write(&self, event: T) {
// Ignore if the sink is closed.
if !self.inner.is_open.load(Ordering::Relaxed) {
return;
}
// Why do we just use `try_lock` and abandon if the lock is taken? The
// reason is that (i) the reader is never supposed to access the slot
// when the simulation runs and (ii) as a rule the simulator does not
// warrant fairness when concurrently writing to an input. Therefore, if
// the mutex is already locked when this writer attempts to lock it, it
// means another writer is concurrently writing an event, and that event
// is just as legitimate as ours so there is not need to overwrite it.
match self.inner.slot.try_lock() {
TryLockResult::Ok(mut v) => *v = Some(event),
TryLockResult::Err(TryLockError::WouldBlock) => {}
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
}
}
}
impl<T> fmt::Debug for EventSlotWriter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventStreamWriter").finish_non_exhaustive()
}
}

View File

@ -0,0 +1,295 @@
mod broadcaster;
mod sender;
use std::fmt;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use crate::model::Model;
use crate::ports::InputFn;
use crate::ports::{LineError, LineId};
use crate::simulation::Address;
use crate::time::{
Action, ActionKey, KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction,
};
use crate::util::slot;
use broadcaster::ReplyIterator;
use broadcaster::{EventBroadcaster, QueryBroadcaster};
use sender::{InputSender, ReplierSender};
use super::ReplierFn;
/// An event source port.
///
/// The `EventSource` port is similar to an [`Output`](crate::ports::Output)
/// port in that it can send events to connected input ports. It is not meant,
/// however, to be instantiated as a member of a model, but rather as a
/// simulation monitoring endpoint instantiated during bench assembly.
pub struct EventSource<T: Clone + Send + 'static> {
broadcaster: Arc<Mutex<EventBroadcaster<T>>>,
next_line_id: u64,
}
impl<T: Clone + Send + 'static> EventSource<T> {
/// Creates a new, disconnected `EventSource` port.
pub fn new() -> Self {
Self::default()
}
/// Adds a connection to an input port of the model specified by the
/// address.
///
/// The input port must be an asynchronous method of a model of type `M`
/// taking as argument a value of type `T` plus, optionally, a scheduler
/// reference.
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>) -> LineId
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
S: Send + 'static,
{
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let sender = Box::new(InputSender::new(input, address.into().0));
self.broadcaster.lock().unwrap().add(sender, line_id);
line_id
}
/// Removes the connection specified by the `LineId` parameter.
///
/// It is a logic error to specify a line identifier from another
/// [`EventSource`], [`QuerySource`], [`Output`](crate::ports::Output) or
/// [`Requestor`](crate::ports::Requestor) instance and may result in the
/// disconnection of an arbitrary endpoint.
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
if self.broadcaster.lock().unwrap().remove(line_id) {
Ok(())
} else {
Err(LineError {})
}
}
/// Removes all connections.
pub fn disconnect_all(&mut self) {
self.broadcaster.lock().unwrap().clear();
}
/// Returns an action which, when processed, broadcasts an event to all
/// connected input ports.
///
/// Note that the action broadcasts the event to those models that are
/// connected to the event source at the time the action is processed.
pub fn event(&mut self, arg: T) -> Action {
let fut = self.broadcaster.lock().unwrap().broadcast(arg);
let fut = async {
fut.await.unwrap();
};
Action::new(OnceAction::new(fut))
}
/// Returns a cancellable action and a cancellation key; when processed, the
/// action broadcasts an event to all connected input ports.
///
/// Note that the action broadcasts the event to those models that are
/// connected to the event source at the time the action is processed.
pub fn keyed_event(&mut self, arg: T) -> (Action, ActionKey) {
let action_key = ActionKey::new();
let fut = self.broadcaster.lock().unwrap().broadcast(arg);
let action = Action::new(KeyedOnceAction::new(
// Cancellation is ignored once the action is already spawned on the
// executor. This means the action cannot be cancelled while the
// simulation is running, but since an event source is meant to be
// used outside the simulator, this shouldn't be an issue in
// practice.
|_| async {
fut.await.unwrap();
},
action_key.clone(),
));
(action, action_key)
}
/// Returns a periodically recurring action which, when processed,
/// broadcasts an event to all connected input ports.
///
/// Note that the action broadcasts the event to those models that are
/// connected to the event source at the time the action is processed.
pub fn periodic_event(&mut self, period: Duration, arg: T) -> Action {
let broadcaster = self.broadcaster.clone();
Action::new(PeriodicAction::new(
|| async move {
let fut = broadcaster.lock().unwrap().broadcast(arg);
fut.await.unwrap();
},
period,
))
}
/// Returns a cancellable, periodically recurring action and a cancellation
/// key; when processed, the action broadcasts an event to all connected
/// input ports.
///
/// Note that the action broadcasts the event to those models that are
/// connected to the event source at the time the action is processed.
pub fn keyed_periodic_event(&mut self, period: Duration, arg: T) -> (Action, ActionKey) {
let action_key = ActionKey::new();
let broadcaster = self.broadcaster.clone();
let action = Action::new(KeyedPeriodicAction::new(
// Cancellation is ignored once the action is already spawned on the
// executor. This means the action cannot be cancelled while the
// simulation is running, but since an event source is meant to be
// used outside the simulator, this shouldn't be an issue in
// practice.
|_| async move {
let fut = broadcaster.lock().unwrap().broadcast(arg);
fut.await.unwrap();
},
period,
action_key.clone(),
));
(action, action_key)
}
}
impl<T: Clone + Send + 'static> Default for EventSource<T> {
fn default() -> Self {
Self {
broadcaster: Arc::new(Mutex::new(EventBroadcaster::default())),
next_line_id: 0,
}
}
}
impl<T: Clone + Send + 'static> fmt::Debug for EventSource<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Event source ({} connected ports)",
self.broadcaster.lock().unwrap().len()
)
}
}
/// A request source port.
///
/// The `QuerySource` port is similar to an
/// [`Requestor`](crate::ports::Requestor) port in that it can send events to
/// connected input ports. It is not meant, however, to be instantiated as a
/// member of a model, but rather as a simulation monitoring endpoint
/// instantiated during bench assembly.
pub struct QuerySource<T: Clone + Send + 'static, R: Send + 'static> {
broadcaster: Arc<Mutex<QueryBroadcaster<T, R>>>,
next_line_id: u64,
}
impl<T: Clone + Send + 'static, R: Send + 'static> QuerySource<T, R> {
/// Creates a new, disconnected `EventSource` port.
pub fn new() -> Self {
Self::default()
}
/// Adds a connection to a replier port of the model specified by the
/// address.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of type `R` and taking as argument a value of type `T`
/// plus, optionally, a scheduler reference.
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>) -> LineId
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
S: Send + 'static,
{
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let sender = Box::new(ReplierSender::new(replier, address.into().0));
self.broadcaster.lock().unwrap().add(sender, line_id);
line_id
}
/// Removes the connection specified by the `LineId` parameter.
///
/// It is a logic error to specify a line identifier from another
/// [`QuerySource`], [`EventSource`], [`Output`](crate::ports::Output) or
/// [`Requestor`](crate::ports::Requestor) instance and may result in the
/// disconnection of an arbitrary endpoint.
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
if self.broadcaster.lock().unwrap().remove(line_id) {
Ok(())
} else {
Err(LineError {})
}
}
/// Removes all connections.
pub fn disconnect_all(&mut self) {
self.broadcaster.lock().unwrap().clear();
}
/// Returns an action which, when processed, broadcasts a query to all
/// connected replier ports.
///
/// Note that the action broadcasts the query to those models that are
/// connected to the query source at the time the action is processed.
pub fn query(&mut self, arg: T) -> (Action, ReplyReceiver<R>) {
let (writer, reader) = slot::slot();
let fut = self.broadcaster.lock().unwrap().broadcast(arg);
let fut = async move {
let replies = fut.await.unwrap();
let _ = writer.write(replies);
};
let action = Action::new(OnceAction::new(fut));
(action, ReplyReceiver::<R>(reader))
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> Default for QuerySource<T, R> {
fn default() -> Self {
Self {
broadcaster: Arc::new(Mutex::new(QueryBroadcaster::default())),
next_line_id: 0,
}
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for QuerySource<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Query source ({} connected ports)",
self.broadcaster.lock().unwrap().len()
)
}
}
/// A receiver for all replies collected from a single query broadcast.
pub struct ReplyReceiver<R>(slot::SlotReader<ReplyIterator<R>>);
impl<R> ReplyReceiver<R> {
/// Returns all replies to a query.
///
/// Returns `None` if the replies are not yet available or if they were
/// already taken in a previous call to `take`.
pub fn take(&mut self) -> Option<impl Iterator<Item = R>> {
self.0.try_read().ok()
}
}
impl<R> fmt::Debug for ReplyReceiver<R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Replies")
}
}

View File

@ -0,0 +1,759 @@
use std::future::Future;
use std::mem;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::vec;
use pin_project_lite::pin_project;
use diatomic_waker::WakeSink;
use super::sender::{Sender, SenderFuture};
use crate::ports::LineId;
use crate::util::task_set::TaskSet;
/// An object that can efficiently broadcast messages to several addresses.
///
/// This is very similar to `output::broadcaster::BroadcasterInner`, but
/// generates owned futures instead.
///
/// This object maintains a list of senders associated to each target address.
/// When a message is broadcast, the sender futures are awaited in parallel.
/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate
/// does, but the outputs of all sender futures are returned all at once rather
/// than with an asynchronous iterator (a.k.a. async stream).
pub(super) struct BroadcasterInner<T: Clone, R> {
/// The list of senders with their associated line identifier.
senders: Vec<(LineId, Box<dyn Sender<T, R>>)>,
}
impl<T: Clone, R> BroadcasterInner<T, R> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1`.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>, id: LineId) {
self.senders.push((id, sender));
}
/// Removes the first sender with the specified identifier, if any.
///
/// Returns `true` if there was indeed a sender associated to the specified
/// identifier.
pub(super) fn remove(&mut self, id: LineId) -> bool {
if let Some(pos) = self.senders.iter().position(|s| s.0 == id) {
self.senders.swap_remove(pos);
return true;
}
false
}
/// Removes all senders.
pub(super) fn clear(&mut self) {
self.senders.clear();
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.senders.len()
}
/// Efficiently broadcasts a message or a query to multiple addresses.
///
/// This method does not collect the responses from queries.
fn broadcast(&mut self, arg: T) -> BroadcastFuture<R> {
let mut future_states = Vec::with_capacity(self.senders.len());
// Broadcast the message and collect all futures.
let mut iter = self.senders.iter_mut();
while let Some(sender) = iter.next() {
// Move the argument rather than clone it for the last future.
if iter.len() == 0 {
future_states.push(SenderFutureState::Pending(sender.1.send(arg)));
break;
}
future_states.push(SenderFutureState::Pending(sender.1.send(arg.clone())));
}
// Generate the global future.
BroadcastFuture::new(future_states)
}
}
impl<T: Clone, R> Default for BroadcasterInner<T, R> {
fn default() -> Self {
Self {
senders: Vec::new(),
}
}
}
/// An object that can efficiently broadcast events to several input ports.
///
/// This is very similar to `output::broadcaster::EventBroadcaster`, but
/// generates owned futures instead.
///
/// See `BroadcasterInner` for implementation details.
pub(super) struct EventBroadcaster<T: Clone> {
/// The broadcaster core object.
inner: BroadcasterInner<T, ()>,
}
impl<T: Clone + Send> EventBroadcaster<T> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1`.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, ()>>, id: LineId) {
self.inner.add(sender, id);
}
/// Removes the first sender with the specified identifier, if any.
///
/// Returns `true` if there was indeed a sender associated to the specified
/// identifier.
pub(super) fn remove(&mut self, id: LineId) -> bool {
self.inner.remove(id)
}
/// Removes all senders.
pub(super) fn clear(&mut self) {
self.inner.clear();
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.inner.len()
}
/// Broadcasts an event to all addresses.
pub(super) fn broadcast(
&mut self,
arg: T,
) -> impl Future<Output = Result<(), BroadcastError>> + Send {
enum Fut<F1, F2> {
Empty,
Single(F1),
Multiple(F2),
}
let fut = match self.inner.senders.as_mut_slice() {
// No sender.
[] => Fut::Empty,
// One sender.
[sender] => Fut::Single(sender.1.send(arg)),
// Multiple senders.
_ => Fut::Multiple(self.inner.broadcast(arg)),
};
async {
match fut {
Fut::Empty => Ok(()),
Fut::Single(fut) => fut.await.map_err(|_| BroadcastError {}),
Fut::Multiple(fut) => fut.await.map(|_| ()),
}
}
}
}
impl<T: Clone> Default for EventBroadcaster<T> {
fn default() -> Self {
Self {
inner: BroadcasterInner::default(),
}
}
}
/// An object that can efficiently broadcast queries to several replier ports.
///
/// This is very similar to `output::broadcaster::QueryBroadcaster`, but
/// generates owned futures instead.
///
/// See `BroadcasterInner` for implementation details.
pub(super) struct QueryBroadcaster<T: Clone, R> {
/// The broadcaster core object.
inner: BroadcasterInner<T, R>,
}
impl<T: Clone + Send, R: Send> QueryBroadcaster<T, R> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1`.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>, id: LineId) {
self.inner.add(sender, id);
}
/// Removes the first sender with the specified identifier, if any.
///
/// Returns `true` if there was indeed a sender associated to the specified
/// identifier.
pub(super) fn remove(&mut self, id: LineId) -> bool {
self.inner.remove(id)
}
/// Removes all senders.
pub(super) fn clear(&mut self) {
self.inner.clear();
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.inner.len()
}
/// Broadcasts an event to all addresses.
pub(super) fn broadcast(
&mut self,
arg: T,
) -> impl Future<Output = Result<ReplyIterator<R>, BroadcastError>> + Send {
enum Fut<F1, F2> {
Empty,
Single(F1),
Multiple(F2),
}
let fut = match self.inner.senders.as_mut_slice() {
// No sender.
[] => Fut::Empty,
// One sender.
[sender] => Fut::Single(sender.1.send(arg)),
// Multiple senders.
_ => Fut::Multiple(self.inner.broadcast(arg)),
};
async {
match fut {
Fut::Empty => Ok(ReplyIterator(Vec::new().into_iter())),
Fut::Single(fut) => fut
.await
.map(|reply| ReplyIterator(vec![SenderFutureState::Ready(reply)].into_iter()))
.map_err(|_| BroadcastError {}),
Fut::Multiple(fut) => fut.await.map_err(|_| BroadcastError {}),
}
}
}
}
impl<T: Clone, R> Default for QueryBroadcaster<T, R> {
fn default() -> Self {
Self {
inner: BroadcasterInner::default(),
}
}
}
pin_project! {
/// A future aggregating the outputs of a collection of sender futures.
///
/// The idea is to join all sender futures as efficiently as possible, meaning:
///
/// - the sender futures are polled simultaneously rather than waiting for their
/// completion in a sequential manner,
/// - the happy path (all futures immediately ready) is very fast.
pub(super) struct BroadcastFuture<R> {
// Thread-safe waker handle.
wake_sink: WakeSink,
// Tasks associated to the sender futures.
task_set: TaskSet,
// List of all sender futures or their outputs.
future_states: Vec<SenderFutureState<R>>,
// The total count of futures that have not yet been polled to completion.
pending_futures_count: usize,
// State of completion of the future.
state: FutureState,
}
}
impl<R> BroadcastFuture<R> {
/// Creates a new `BroadcastFuture`.
fn new(future_states: Vec<SenderFutureState<R>>) -> Self {
let wake_sink = WakeSink::new();
let wake_src = wake_sink.source();
let pending_futures_count = future_states.len();
BroadcastFuture {
wake_sink,
task_set: TaskSet::with_len(wake_src, pending_futures_count),
future_states,
pending_futures_count,
state: FutureState::Uninit,
}
}
}
impl<R> Future for BroadcastFuture<R> {
type Output = Result<ReplyIterator<R>, BroadcastError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = &mut *self;
assert_ne!(this.state, FutureState::Completed);
// Poll all sender futures once if this is the first time the broadcast
// future is polled.
if this.state == FutureState::Uninit {
for task_idx in 0..this.future_states.len() {
if let SenderFutureState::Pending(future) = &mut this.future_states[task_idx] {
let task_waker_ref = this.task_set.waker_of(task_idx);
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
match future.as_mut().poll(task_cx_ref) {
Poll::Ready(Ok(output)) => {
this.future_states[task_idx] = SenderFutureState::Ready(output);
this.pending_futures_count -= 1;
}
Poll::Ready(Err(_)) => {
this.state = FutureState::Completed;
return Poll::Ready(Err(BroadcastError {}));
}
Poll::Pending => {}
}
}
}
if this.pending_futures_count == 0 {
this.state = FutureState::Completed;
let outputs = mem::take(&mut this.future_states).into_iter();
return Poll::Ready(Ok(ReplyIterator(outputs)));
}
this.state = FutureState::Pending;
}
// Repeatedly poll the futures of all scheduled tasks until there are no
// more scheduled tasks.
loop {
// No need to register the waker if some tasks have been scheduled.
if !this.task_set.has_scheduled() {
this.wake_sink.register(cx.waker());
}
// Retrieve the indices of the scheduled tasks if any. If there are
// no scheduled tasks, `Poll::Pending` is returned and this future
// will be awaken again when enough tasks have been scheduled.
//
// NOTE: the current implementation requires a notification to be
// sent each time a sub-future has made progress. We may try at some
// point to benchmark an alternative strategy where a notification
// is requested only when all pending sub-futures have made progress,
// using `take_scheduled(this.pending_futures_count)`. This would
// reduce the cost of context switch but could hurt latency.
let scheduled_tasks = match this.task_set.take_scheduled(1) {
Some(st) => st,
None => return Poll::Pending,
};
for task_idx in scheduled_tasks {
if let SenderFutureState::Pending(future) = &mut this.future_states[task_idx] {
let task_waker_ref = this.task_set.waker_of(task_idx);
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
match future.as_mut().poll(task_cx_ref) {
Poll::Ready(Ok(output)) => {
this.future_states[task_idx] = SenderFutureState::Ready(output);
this.pending_futures_count -= 1;
}
Poll::Ready(Err(_)) => {
this.state = FutureState::Completed;
return Poll::Ready(Err(BroadcastError {}));
}
Poll::Pending => {}
}
}
}
if this.pending_futures_count == 0 {
this.state = FutureState::Completed;
let outputs = mem::take(&mut this.future_states).into_iter();
return Poll::Ready(Ok(ReplyIterator(outputs)));
}
}
}
}
/// Error returned when a message could not be delivered.
#[derive(Debug)]
pub(super) struct BroadcastError {}
#[derive(Debug, PartialEq)]
enum FutureState {
Uninit,
Pending,
Completed,
}
/// The state of a `SenderFuture`.
enum SenderFutureState<R> {
Pending(SenderFuture<R>),
Ready(R),
}
/// An iterator over the replies to a broadcasted request.
pub(crate) struct ReplyIterator<R>(vec::IntoIter<SenderFutureState<R>>);
impl<R> Iterator for ReplyIterator<R> {
type Item = R;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|state| match state {
SenderFutureState::Ready(reply) => reply,
_ => panic!("reply missing in replies iterator"),
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::{Arc, Mutex};
use std::thread;
use futures_executor::block_on;
use crate::channel::Receiver;
use crate::time::Scheduler;
use crate::time::{MonotonicTime, TearableAtomicTime};
use crate::util::priority_queue::PriorityQueue;
use crate::util::sync_cell::SyncCell;
use super::super::sender::{InputSender, ReplierSender};
use super::*;
use crate::model::Model;
struct Counter {
inner: Arc<AtomicUsize>,
}
impl Counter {
fn new(counter: Arc<AtomicUsize>) -> Self {
Self { inner: counter }
}
async fn inc(&mut self, by: usize) {
self.inner.fetch_add(by, Ordering::Relaxed);
}
async fn fetch_inc(&mut self, by: usize) -> usize {
let res = self.inner.fetch_add(by, Ordering::Relaxed);
res
}
}
impl Model for Counter {}
#[test]
fn broadcast_event_smoke() {
const N_RECV: usize = 4;
let mut mailboxes = Vec::new();
let mut broadcaster = EventBroadcaster::default();
for id in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(InputSender::new(Counter::inc, address));
broadcaster.add(sender, LineId(id as u64));
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
block_on(broadcaster.broadcast(1)).unwrap();
});
let counter = Arc::new(AtomicUsize::new(0));
let th_recv: Vec<_> = mailboxes
.into_iter()
.map(|mut mailbox| {
thread::spawn({
let mut counter = Counter::new(counter.clone());
move || {
let dummy_address = Receiver::new(1).sender();
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
let dummy_time =
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
let dummy_scheduler =
Scheduler::new(dummy_address, dummy_priority_queue, dummy_time);
block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap();
}
})
})
.collect();
th_broadcast.join().unwrap();
for th in th_recv {
th.join().unwrap();
}
assert_eq!(counter.load(Ordering::Relaxed), N_RECV);
}
#[test]
fn broadcast_query_smoke() {
const N_RECV: usize = 4;
let mut mailboxes = Vec::new();
let mut broadcaster = QueryBroadcaster::default();
for id in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(ReplierSender::new(Counter::fetch_inc, address));
broadcaster.add(sender, LineId(id as u64));
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
let iter = block_on(broadcaster.broadcast(1)).unwrap();
let sum = iter.fold(0, |acc, val| acc + val);
assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)}
});
let counter = Arc::new(AtomicUsize::new(0));
let th_recv: Vec<_> = mailboxes
.into_iter()
.map(|mut mailbox| {
thread::spawn({
let mut counter = Counter::new(counter.clone());
move || {
let dummy_address = Receiver::new(1).sender();
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
let dummy_time =
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
let dummy_scheduler =
Scheduler::new(dummy_address, dummy_priority_queue, dummy_time);
block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap();
thread::sleep(std::time::Duration::from_millis(100));
}
})
})
.collect();
th_broadcast.join().unwrap();
for th in th_recv {
th.join().unwrap();
}
assert_eq!(counter.load(Ordering::Relaxed), N_RECV);
}
}
#[cfg(all(test, asynchronix_loom))]
mod tests {
use futures_channel::mpsc;
use futures_util::StreamExt;
use loom::model::Builder;
use loom::sync::atomic::{AtomicBool, Ordering};
use loom::thread;
use waker_fn::waker_fn;
use super::super::sender::SendError;
use super::*;
// An event that may be waken spuriously.
struct TestEvent<R> {
// The receiver is actually used only once in tests, so it is moved out
// of the `Option` on first use.
receiver: Option<mpsc::UnboundedReceiver<Option<R>>>,
}
impl<R: Send + 'static> Sender<(), R> for TestEvent<R> {
fn send(&mut self, _arg: ()) -> Pin<Box<dyn Future<Output = Result<R, SendError>> + Send>> {
let receiver = self.receiver.take().unwrap();
Box::pin(async move {
let mut stream = Box::pin(receiver.filter_map(|item| async { item }));
Ok(stream.next().await.unwrap())
})
}
}
// An object that can wake a `TestEvent`.
#[derive(Clone)]
struct TestEventWaker<R> {
sender: mpsc::UnboundedSender<Option<R>>,
}
impl<R> TestEventWaker<R> {
fn wake_spurious(&self) {
let _ = self.sender.unbounded_send(None);
}
fn wake_final(&self, value: R) {
let _ = self.sender.unbounded_send(Some(value));
}
}
fn test_event<R>() -> (TestEvent<R>, TestEventWaker<R>) {
let (sender, receiver) = mpsc::unbounded();
(
TestEvent {
receiver: Some(receiver),
},
TestEventWaker { sender },
)
}
#[test]
fn loom_broadcast_basic() {
const DEFAULT_PREEMPTION_BOUND: usize = 3;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (test_event1, waker1) = test_event::<usize>();
let (test_event2, waker2) = test_event::<usize>();
let (test_event3, waker3) = test_event::<usize>();
let mut broadcaster = QueryBroadcaster::default();
broadcaster.add(Box::new(test_event1), LineId(1));
broadcaster.add(Box::new(test_event2), LineId(2));
broadcaster.add(Box::new(test_event3), LineId(3));
let mut fut = Box::pin(broadcaster.broadcast(()));
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
let is_scheduled_waker = is_scheduled.clone();
let waker = waker_fn(move || {
// We use swap rather than a plain store to work around this
// bug: <https://github.com/tokio-rs/loom/issues/254>
is_scheduled_waker.swap(true, Ordering::Release);
});
let mut cx = Context::from_waker(&waker);
let th1 = thread::spawn(move || waker1.wake_final(3));
let th2 = thread::spawn(move || waker2.wake_final(7));
let th3 = thread::spawn(move || waker3.wake_final(42));
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), Some(42));
assert_eq!(res.next(), None);
return;
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => {}
}
// If the task has not been scheduled, exit the polling loop.
if !is_scheduled.swap(false, Ordering::Acquire) {
break;
}
}
th1.join().unwrap();
th2.join().unwrap();
th3.join().unwrap();
assert!(is_scheduled.load(Ordering::Acquire));
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), Some(42));
assert_eq!(res.next(), None);
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => panic!("the future has not completed"),
};
});
}
#[test]
fn loom_broadcast_spurious() {
const DEFAULT_PREEMPTION_BOUND: usize = 3;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (test_event1, waker1) = test_event::<usize>();
let (test_event2, waker2) = test_event::<usize>();
let mut broadcaster = QueryBroadcaster::default();
broadcaster.add(Box::new(test_event1), LineId(1));
broadcaster.add(Box::new(test_event2), LineId(2));
let mut fut = Box::pin(broadcaster.broadcast(()));
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
let is_scheduled_waker = is_scheduled.clone();
let waker = waker_fn(move || {
// We use swap rather than a plain store to work around this
// bug: <https://github.com/tokio-rs/loom/issues/254>
is_scheduled_waker.swap(true, Ordering::Release);
});
let mut cx = Context::from_waker(&waker);
let spurious_waker = waker1.clone();
let th1 = thread::spawn(move || waker1.wake_final(3));
let th2 = thread::spawn(move || waker2.wake_final(7));
let th_spurious = thread::spawn(move || spurious_waker.wake_spurious());
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), None);
return;
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => {}
}
// If the task has not been scheduled, exit the polling loop.
if !is_scheduled.swap(false, Ordering::Acquire) {
break;
}
}
th1.join().unwrap();
th2.join().unwrap();
th_spurious.join().unwrap();
assert!(is_scheduled.load(Ordering::Acquire));
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), None);
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => panic!("the future has not completed"),
};
});
}
}

View File

@ -0,0 +1,136 @@
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use futures_channel::oneshot;
use recycle_box::{coerce_box, RecycleBox};
use crate::channel;
use crate::model::Model;
use crate::ports::{InputFn, ReplierFn};
pub(super) type SenderFuture<R> = Pin<Box<dyn Future<Output = Result<R, SendError>> + Send>>;
/// An event or query sender abstracting over the target model and input method.
pub(super) trait Sender<T, R>: Send {
/// Asynchronously send the event or request.
fn send(&mut self, arg: T) -> SenderFuture<R>;
}
/// An object that can send events to an input port.
pub(super) struct InputSender<M: 'static, F, T, S> {
func: F,
sender: channel::Sender<M>,
_phantom_closure: PhantomData<fn(&mut M, T)>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M: Send, F, T, S> InputSender<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + 'static,
{
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
Self {
func,
sender,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M: Send, F, T, S> Sender<T, ()> for InputSender<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + 'static,
S: Send + 'static,
{
fn send(&mut self, arg: T) -> SenderFuture<()> {
let func = self.func.clone();
let sender = self.sender.clone();
Box::pin(async move {
sender
.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
})
.await
.map_err(|_| SendError {})
})
}
}
/// An object that can send a request to a replier port and retrieve a response.
pub(super) struct ReplierSender<M: 'static, F, T, R, S> {
func: F,
sender: channel::Sender<M>,
_phantom_closure: PhantomData<fn(&mut M, T) -> R>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, F, T, R, S> ReplierSender<M, F, T, R, S>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S>,
T: Send + 'static,
R: Send + 'static,
{
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
Self {
func,
sender,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, F, T, R, S> Sender<T, R> for ReplierSender<M, F, T, R, S>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
T: Send + 'static,
R: Send + 'static,
S: Send,
{
fn send(&mut self, arg: T) -> SenderFuture<R> {
let func = self.func.clone();
let sender = self.sender.clone();
let (reply_sender, reply_receiver) = oneshot::channel();
Box::pin(async move {
sender
.send(move |model, scheduler, recycle_box| {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
let _ = reply_sender.send(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
})
.await
.map_err(|_| SendError {})?;
reply_receiver.await.map_err(|_| SendError {})
})
}
}
/// Error returned when the mailbox was closed or dropped.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub(super) struct SendError {}
impl fmt::Display for SendError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "sending message into a closed mailbox")
}
}
impl Error for SendError {}

10
asynchronix/src/rpc.rs Normal file
View File

@ -0,0 +1,10 @@
//! Simulation management through remote procedure calls.
mod codegen;
mod endpoint_registry;
mod generic_server;
#[cfg(feature = "grpc-server")]
pub mod grpc;
mod key_registry;
pub use endpoint_registry::EndpointRegistry;

View File

@ -0,0 +1,50 @@
// Additional types for transport implementations which, unlike gRPC, do not
// support auto-generation from the `Simulation` service description.
syntax = "proto3";
package custom_transport;
import "simulation.proto";
enum ServerErrorCode {
UNKNOWN_REQUEST = 0;
EMPTY_REQUEST = 1;
}
message ServerError {
ServerErrorCode code = 1;
string message = 2;
}
message AnyRequest {
oneof request { // Expects exactly 1 variant.
simulation.InitRequest init_request = 1;
simulation.TimeRequest time_request = 2;
simulation.StepRequest step_request = 3;
simulation.StepUntilRequest step_until_request = 4;
simulation.ScheduleEventRequest schedule_event_request = 5;
simulation.CancelEventRequest cancel_event_request = 6;
simulation.ProcessEventRequest process_event_request = 7;
simulation.ProcessQueryRequest process_query_request = 8;
simulation.ReadEventsRequest read_events_request = 9;
simulation.OpenSinkRequest open_sink_request = 10;
simulation.CloseSinkRequest close_sink_request = 11;
}
}
message AnyReply {
oneof reply { // Contains exactly 1 variant.
simulation.InitReply init_reply = 1;
simulation.TimeReply time_reply = 2;
simulation.StepReply step_reply = 3;
simulation.StepUntilReply step_until_reply = 4;
simulation.ScheduleEventReply schedule_event_reply = 5;
simulation.CancelEventReply cancel_event_reply = 6;
simulation.ProcessEventReply process_event_reply = 7;
simulation.ProcessQueryReply process_query_reply = 8;
simulation.ReadEventsReply read_events_reply = 9;
simulation.OpenSinkReply open_sink_reply = 10;
simulation.CloseSinkReply close_sink_reply = 11;
ServerError error = 100;
}
}

View File

@ -0,0 +1,161 @@
// The main simulation protocol.
syntax = "proto3";
package simulation;
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/empty.proto";
enum ErrorCode {
INTERNAL_ERROR = 0;
SIMULATION_NOT_STARTED = 1;
MISSING_ARGUMENT = 2;
INVALID_TIME = 3;
INVALID_DURATION = 4;
INVALID_MESSAGE = 5;
INVALID_KEY = 6;
SOURCE_NOT_FOUND = 10;
SINK_NOT_FOUND = 11;
KEY_NOT_FOUND = 12;
SIMULATION_TIME_OUT_OF_RANGE = 13;
}
message Error {
ErrorCode code = 1;
string message = 2;
}
message EventKey {
uint64 subkey1 = 1;
uint64 subkey2 = 2;
}
message InitRequest { optional google.protobuf.Timestamp time = 1; }
message InitReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
Error error = 100;
}
}
message TimeRequest {}
message TimeReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Timestamp time = 1;
Error error = 100;
}
}
message StepRequest {}
message StepReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Timestamp time = 1;
Error error = 100;
}
}
message StepUntilRequest {
oneof deadline { // Always returns exactly 1 variant.
google.protobuf.Timestamp time = 1;
google.protobuf.Duration duration = 2;
}
}
message StepUntilReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Timestamp time = 1;
Error error = 100;
}
}
message ScheduleEventRequest {
oneof deadline { // Expects exactly 1 variant.
google.protobuf.Timestamp time = 1;
google.protobuf.Duration duration = 2;
}
string source_name = 3;
bytes event = 4;
optional google.protobuf.Duration period = 5;
optional bool with_key = 6;
}
message ScheduleEventReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
EventKey key = 2;
Error error = 100;
}
}
message CancelEventRequest { EventKey key = 1; }
message CancelEventReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
Error error = 100;
}
}
message ProcessEventRequest {
string source_name = 1;
bytes event = 2;
}
message ProcessEventReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
Error error = 100;
}
}
message ProcessQueryRequest {
string source_name = 1;
bytes request = 2;
}
message ProcessQueryReply {
// This field is hoisted because protobuf3 does not support `repeated` within
// a `oneof`. It is Always empty if an error is returned
repeated bytes replies = 1;
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 10;
Error error = 100;
}
}
message ReadEventsRequest { string sink_name = 1; }
message ReadEventsReply {
// This field is hoisted because protobuf3 does not support `repeated` within
// a `oneof`. It is Always empty if an error is returned
repeated bytes events = 1;
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 10;
Error error = 100;
}
}
message OpenSinkRequest { string sink_name = 1; }
message OpenSinkReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 10;
Error error = 100;
}
}
message CloseSinkRequest { string sink_name = 1; }
message CloseSinkReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 10;
Error error = 100;
}
}
service Simulation {
rpc Init(InitRequest) returns (InitReply);
rpc Time(TimeRequest) returns (TimeReply);
rpc Step(StepRequest) returns (StepReply);
rpc StepUntil(StepUntilRequest) returns (StepUntilReply);
rpc ScheduleEvent(ScheduleEventRequest) returns (ScheduleEventReply);
rpc CancelEvent(CancelEventRequest) returns (CancelEventReply);
rpc ProcessEvent(ProcessEventRequest) returns (ProcessEventReply);
rpc ProcessQuery(ProcessQueryRequest) returns (ProcessQueryReply);
rpc ReadEvents(ReadEventsRequest) returns (ReadEventsReply);
rpc OpenSink(OpenSinkRequest) returns (OpenSinkReply);
rpc CloseSink(CloseSinkRequest) returns (CloseSinkReply);
}

View File

@ -0,0 +1,5 @@
#![allow(unreachable_pub)]
#![allow(clippy::enum_variant_names)]
pub(crate) mod custom_transport;
pub(crate) mod simulation;

View File

View File

@ -0,0 +1,111 @@
// This file is @generated by prost-build.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct ServerError {
#[prost(enumeration = "ServerErrorCode", tag = "1")]
pub code: i32,
#[prost(string, tag = "2")]
pub message: ::prost::alloc::string::String,
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnyRequest {
/// Expects exactly 1 variant.
#[prost(oneof = "any_request::Request", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11")]
pub request: ::core::option::Option<any_request::Request>,
}
/// Nested message and enum types in `AnyRequest`.
pub mod any_request {
/// Expects exactly 1 variant.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Request {
#[prost(message, tag = "1")]
InitRequest(super::super::simulation::InitRequest),
#[prost(message, tag = "2")]
TimeRequest(super::super::simulation::TimeRequest),
#[prost(message, tag = "3")]
StepRequest(super::super::simulation::StepRequest),
#[prost(message, tag = "4")]
StepUntilRequest(super::super::simulation::StepUntilRequest),
#[prost(message, tag = "5")]
ScheduleEventRequest(super::super::simulation::ScheduleEventRequest),
#[prost(message, tag = "6")]
CancelEventRequest(super::super::simulation::CancelEventRequest),
#[prost(message, tag = "7")]
ProcessEventRequest(super::super::simulation::ProcessEventRequest),
#[prost(message, tag = "8")]
ProcessQueryRequest(super::super::simulation::ProcessQueryRequest),
#[prost(message, tag = "9")]
ReadEventsRequest(super::super::simulation::ReadEventsRequest),
#[prost(message, tag = "10")]
OpenSinkRequest(super::super::simulation::OpenSinkRequest),
#[prost(message, tag = "11")]
CloseSinkRequest(super::super::simulation::CloseSinkRequest),
}
}
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Message)]
pub struct AnyReply {
/// Contains exactly 1 variant.
#[prost(oneof = "any_reply::Reply", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 100")]
pub reply: ::core::option::Option<any_reply::Reply>,
}
/// Nested message and enum types in `AnyReply`.
pub mod any_reply {
/// Contains exactly 1 variant.
#[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Clone, PartialEq, ::prost::Oneof)]
pub enum Reply {
#[prost(message, tag = "1")]
InitReply(super::super::simulation::InitReply),
#[prost(message, tag = "2")]
TimeReply(super::super::simulation::TimeReply),
#[prost(message, tag = "3")]
StepReply(super::super::simulation::StepReply),
#[prost(message, tag = "4")]
StepUntilReply(super::super::simulation::StepUntilReply),
#[prost(message, tag = "5")]
ScheduleEventReply(super::super::simulation::ScheduleEventReply),
#[prost(message, tag = "6")]
CancelEventReply(super::super::simulation::CancelEventReply),
#[prost(message, tag = "7")]
ProcessEventReply(super::super::simulation::ProcessEventReply),
#[prost(message, tag = "8")]
ProcessQueryReply(super::super::simulation::ProcessQueryReply),
#[prost(message, tag = "9")]
ReadEventsReply(super::super::simulation::ReadEventsReply),
#[prost(message, tag = "10")]
OpenSinkReply(super::super::simulation::OpenSinkReply),
#[prost(message, tag = "11")]
CloseSinkReply(super::super::simulation::CloseSinkReply),
#[prost(message, tag = "100")]
Error(super::ServerError),
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)]
#[repr(i32)]
pub enum ServerErrorCode {
UnknownRequest = 0,
EmptyRequest = 1,
}
impl ServerErrorCode {
/// String value of the enum field names used in the ProtoBuf definition.
///
/// The values are not transformed in any way and thus are considered stable
/// (if the ProtoBuf definition does not change) and safe for programmatic use.
pub fn as_str_name(&self) -> &'static str {
match self {
ServerErrorCode::UnknownRequest => "UNKNOWN_REQUEST",
ServerErrorCode::EmptyRequest => "EMPTY_REQUEST",
}
}
/// Creates an enum from field names used in the ProtoBuf definition.
pub fn from_str_name(value: &str) -> ::core::option::Option<Self> {
match value {
"UNKNOWN_REQUEST" => Some(Self::UnknownRequest),
"EMPTY_REQUEST" => Some(Self::EmptyRequest),
_ => None,
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,307 @@
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt;
use std::time::Duration;
use rmp_serde::decode::Error as RmpDecodeError;
use rmp_serde::encode::Error as RmpEncodeError;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::ports::{EventSinkStream, EventSource, QuerySource, ReplyReceiver};
use crate::time::{Action, ActionKey};
/// A registry that holds all sources and sinks meant to be accessed through
/// remote procedure calls.
#[derive(Default)]
pub struct EndpointRegistry {
event_sources: HashMap<String, Box<dyn EventSourceAny>>,
query_sources: HashMap<String, Box<dyn QuerySourceAny>>,
sinks: HashMap<String, Box<dyn EventSinkStreamAny>>,
}
impl EndpointRegistry {
/// Creates an empty `EndpointRegistry`.
pub fn new() -> Self {
Self::default()
}
/// Adds an event source to the registry.
///
/// If the specified name is already in use for another event source, the source
/// provided as argument is returned in the error.
pub fn add_event_source<T>(
&mut self,
source: EventSource<T>,
name: impl Into<String>,
) -> Result<(), EventSource<T>>
where
T: DeserializeOwned + Clone + Send + 'static,
{
match self.event_sources.entry(name.into()) {
Entry::Vacant(s) => {
s.insert(Box::new(source));
Ok(())
}
Entry::Occupied(_) => Err(source),
}
}
/// Returns a mutable reference to the specified event source if it is in
/// the registry.
pub(crate) fn get_event_source_mut(&mut self, name: &str) -> Option<&mut dyn EventSourceAny> {
self.event_sources.get_mut(name).map(|s| s.as_mut())
}
/// Adds an query source to the registry.
///
/// If the specified name is already in use for another query source, the source
/// provided as argument is returned in the error.
pub fn add_query_source<T, R>(
&mut self,
source: QuerySource<T, R>,
name: impl Into<String>,
) -> Result<(), QuerySource<T, R>>
where
T: DeserializeOwned + Clone + Send + 'static,
R: Serialize + Send + 'static,
{
match self.query_sources.entry(name.into()) {
Entry::Vacant(s) => {
s.insert(Box::new(source));
Ok(())
}
Entry::Occupied(_) => Err(source),
}
}
/// Returns a mutable reference to the specified query source if it is in
/// the registry.
pub(crate) fn get_query_source_mut(&mut self, name: &str) -> Option<&mut dyn QuerySourceAny> {
self.query_sources.get_mut(name).map(|s| s.as_mut())
}
/// Adds a sink to the registry.
///
/// If the specified name is already in use for another sink, the sink
/// provided as argument is returned in the error.
pub fn add_sink<S>(&mut self, sink: S, name: impl Into<String>) -> Result<(), S>
where
S: EventSinkStream + Send + 'static,
S::Item: Serialize,
{
match self.sinks.entry(name.into()) {
Entry::Vacant(s) => {
s.insert(Box::new(sink));
Ok(())
}
Entry::Occupied(_) => Err(sink),
}
}
/// Returns a mutable reference to the specified sink if it is in the
/// registry.
pub(crate) fn get_sink_mut(&mut self, name: &str) -> Option<&mut dyn EventSinkStreamAny> {
self.sinks.get_mut(name).map(|s| s.as_mut())
}
}
impl fmt::Debug for EndpointRegistry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"EndpointRegistry ({} sources, {} sinks)",
self.event_sources.len(),
self.sinks.len()
)
}
}
/// A type-erased `EventSource` that operates on MessagePack-encoded serialized
/// events.
pub(crate) trait EventSourceAny: Send + 'static {
/// Returns an action which, when processed, broadcasts an event to all
/// connected input ports.
///
/// The argument is expected to conform to the serde MessagePack encoding.
fn event(&mut self, msgpack_arg: &[u8]) -> Result<Action, RmpDecodeError>;
/// Returns a cancellable action and a cancellation key; when processed, the
/// action broadcasts an event to all connected input ports.
///
/// The argument is expected to conform to the serde MessagePack encoding.
fn keyed_event(&mut self, msgpack_arg: &[u8]) -> Result<(Action, ActionKey), RmpDecodeError>;
/// Returns a periodically recurring action which, when processed,
/// broadcasts an event to all connected input ports.
///
/// The argument is expected to conform to the serde MessagePack encoding.
fn periodic_event(
&mut self,
period: Duration,
msgpack_arg: &[u8],
) -> Result<Action, RmpDecodeError>;
/// Returns a cancellable, periodically recurring action and a cancellation
/// key; when processed, the action broadcasts an event to all connected
/// input ports.
///
/// The argument is expected to conform to the serde MessagePack encoding.
fn keyed_periodic_event(
&mut self,
period: Duration,
msgpack_arg: &[u8],
) -> Result<(Action, ActionKey), RmpDecodeError>;
/// Human-readable name of the event type, as returned by
/// `any::type_name()`.
fn event_type_name(&self) -> &'static str;
}
impl<T> EventSourceAny for EventSource<T>
where
T: DeserializeOwned + Clone + Send + 'static,
{
fn event(&mut self, msgpack_arg: &[u8]) -> Result<Action, RmpDecodeError> {
rmp_serde::from_read(msgpack_arg).map(|arg| self.event(arg))
}
fn keyed_event(&mut self, msgpack_arg: &[u8]) -> Result<(Action, ActionKey), RmpDecodeError> {
rmp_serde::from_read(msgpack_arg).map(|arg| self.keyed_event(arg))
}
fn periodic_event(
&mut self,
period: Duration,
msgpack_arg: &[u8],
) -> Result<Action, RmpDecodeError> {
rmp_serde::from_read(msgpack_arg).map(|arg| self.periodic_event(period, arg))
}
fn keyed_periodic_event(
&mut self,
period: Duration,
msgpack_arg: &[u8],
) -> Result<(Action, ActionKey), RmpDecodeError> {
rmp_serde::from_read(msgpack_arg).map(|arg| self.keyed_periodic_event(period, arg))
}
fn event_type_name(&self) -> &'static str {
std::any::type_name::<T>()
}
}
/// A type-erased `QuerySource` that operates on MessagePack-encoded serialized
/// queries and returns MessagePack-encoded replies.
pub(crate) trait QuerySourceAny: Send + 'static {
/// Returns an action which, when processed, broadcasts a query to all
/// connected replier ports.
///
///
/// The argument is expected to conform to the serde MessagePack encoding.
fn query(
&mut self,
msgpack_arg: &[u8],
) -> Result<(Action, Box<dyn ReplyReceiverAny>), RmpDecodeError>;
/// Human-readable name of the request type, as returned by
/// `any::type_name()`.
fn request_type_name(&self) -> &'static str;
/// Human-readable name of the reply type, as returned by
/// `any::type_name()`.
fn reply_type_name(&self) -> &'static str;
}
impl<T, R> QuerySourceAny for QuerySource<T, R>
where
T: DeserializeOwned + Clone + Send + 'static,
R: Serialize + Send + 'static,
{
fn query(
&mut self,
msgpack_arg: &[u8],
) -> Result<(Action, Box<dyn ReplyReceiverAny>), RmpDecodeError> {
rmp_serde::from_read(msgpack_arg).map(|arg| {
let (action, reply_recv) = self.query(arg);
let reply_recv: Box<dyn ReplyReceiverAny> = Box::new(reply_recv);
(action, reply_recv)
})
}
fn request_type_name(&self) -> &'static str {
std::any::type_name::<T>()
}
fn reply_type_name(&self) -> &'static str {
std::any::type_name::<R>()
}
}
/// A type-erased `EventSinkStream`.
pub(crate) trait EventSinkStreamAny: Send + 'static {
/// Human-readable name of the event type, as returned by
/// `any::type_name()`.
fn event_type_name(&self) -> &'static str;
/// Starts or resumes the collection of new events.
fn open(&mut self);
/// Pauses the collection of new events.
fn close(&mut self);
/// Encode and collect all events in a vector.
fn collect(&mut self) -> Result<Vec<Vec<u8>>, RmpEncodeError>;
}
impl<E> EventSinkStreamAny for E
where
E: EventSinkStream + Send + 'static,
E::Item: Serialize,
{
fn event_type_name(&self) -> &'static str {
std::any::type_name::<E::Item>()
}
fn open(&mut self) {
self.open();
}
fn close(&mut self) {
self.close();
}
fn collect(&mut self) -> Result<Vec<Vec<u8>>, RmpEncodeError> {
EventSinkStream::try_fold(self, Vec::new(), |mut encoded_events, event| {
rmp_serde::to_vec_named(&event).map(|encoded_event| {
encoded_events.push(encoded_event);
encoded_events
})
})
}
}
/// A type-erased `ReplyReceiver` that returns MessagePack-encoded replies..
pub(crate) trait ReplyReceiverAny {
/// Take the replies, if any, encode them and collect them in a vector.
fn take_collect(&mut self) -> Option<Result<Vec<Vec<u8>>, RmpEncodeError>>;
}
impl<R: Serialize + 'static> ReplyReceiverAny for ReplyReceiver<R> {
fn take_collect(&mut self) -> Option<Result<Vec<Vec<u8>>, RmpEncodeError>> {
let replies = self.take()?;
let encoded_replies = (move || {
let mut encoded_replies = Vec::new();
for reply in replies {
let encoded_reply = rmp_serde::to_vec_named(&reply)?;
encoded_replies.push(encoded_reply);
}
Ok(encoded_replies)
})();
Some(encoded_replies)
}
}

View File

@ -0,0 +1,673 @@
use std::time::Duration;
use bytes::Buf;
use prost::Message;
use prost_types::Timestamp;
use tai_time::MonotonicTime;
use crate::rpc::key_registry::{KeyRegistry, KeyRegistryId};
use crate::rpc::EndpointRegistry;
use crate::simulation::{SimInit, Simulation};
use super::codegen::custom_transport::*;
use super::codegen::simulation::*;
/// Transport-independent server implementation.
///
/// This implementation implements the protobuf services without any
/// transport-specific management.
pub(crate) struct GenericServer<F> {
sim_gen: F,
sim_context: Option<(Simulation, EndpointRegistry, KeyRegistry)>,
}
impl<F> GenericServer<F>
where
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
{
/// Creates a new `GenericServer` without any active simulation.
pub(crate) fn new(sim_gen: F) -> Self {
Self {
sim_gen,
sim_context: None,
}
}
/// Processes an encoded `AnyRequest` message and returns an encoded
/// `AnyReply`.
#[allow(dead_code)]
pub(crate) fn service_request<B>(&mut self, request_buf: B) -> Vec<u8>
where
B: Buf,
{
let reply = match AnyRequest::decode(request_buf) {
Ok(AnyRequest { request: Some(req) }) => match req {
any_request::Request::InitRequest(request) => {
any_reply::Reply::InitReply(self.init(request))
}
any_request::Request::TimeRequest(request) => {
any_reply::Reply::TimeReply(self.time(request))
}
any_request::Request::StepRequest(request) => {
any_reply::Reply::StepReply(self.step(request))
}
any_request::Request::StepUntilRequest(request) => {
any_reply::Reply::StepUntilReply(self.step_until(request))
}
any_request::Request::ScheduleEventRequest(request) => {
any_reply::Reply::ScheduleEventReply(self.schedule_event(request))
}
any_request::Request::CancelEventRequest(request) => {
any_reply::Reply::CancelEventReply(self.cancel_event(request))
}
any_request::Request::ProcessEventRequest(request) => {
any_reply::Reply::ProcessEventReply(self.process_event(request))
}
any_request::Request::ProcessQueryRequest(request) => {
any_reply::Reply::ProcessQueryReply(self.process_query(request))
}
any_request::Request::ReadEventsRequest(request) => {
any_reply::Reply::ReadEventsReply(self.read_events(request))
}
any_request::Request::OpenSinkRequest(request) => {
any_reply::Reply::OpenSinkReply(self.open_sink(request))
}
any_request::Request::CloseSinkRequest(request) => {
any_reply::Reply::CloseSinkReply(self.close_sink(request))
}
},
Ok(AnyRequest { request: None }) => any_reply::Reply::Error(ServerError {
code: ServerErrorCode::EmptyRequest as i32,
message: "the message did not contain any request".to_string(),
}),
Err(err) => any_reply::Reply::Error(ServerError {
code: ServerErrorCode::UnknownRequest as i32,
message: format!("bad request: {}", err),
}),
};
let reply = AnyReply { reply: Some(reply) };
reply.encode_to_vec()
}
/// Initialize a simulation with the provided time.
///
/// If a simulation is already active, it is destructed and replaced with a
/// new simulation.
///
/// If the initialization time is not provided, it is initialized with the
/// epoch of `MonotonicTime` (1970-01-01 00:00:00 TAI).
pub(crate) fn init(&mut self, request: InitRequest) -> InitReply {
let start_time = request.time.unwrap_or_default();
let reply = if let Some(start_time) = timestamp_to_monotonic(start_time) {
let (sim_init, endpoint_registry) = (self.sim_gen)();
let simulation = sim_init.init(start_time);
self.sim_context = Some((simulation, endpoint_registry, KeyRegistry::default()));
init_reply::Result::Empty(())
} else {
init_reply::Result::Error(Error {
code: ErrorCode::InvalidTime as i32,
message: "out-of-range nanosecond field".to_string(),
})
};
InitReply {
result: Some(reply),
}
}
/// Returns the current simulation time.
pub(crate) fn time(&mut self, _request: TimeRequest) -> TimeReply {
let reply = match &self.sim_context {
Some((simulation, ..)) => {
if let Some(timestamp) = monotonic_to_timestamp(simulation.time()) {
time_reply::Result::Time(timestamp)
} else {
time_reply::Result::Error(Error {
code: ErrorCode::SimulationTimeOutOfRange as i32,
message: "the final simulation time is out of range".to_string(),
})
}
}
None => time_reply::Result::Error(Error {
code: ErrorCode::SimulationNotStarted as i32,
message: "the simulation was not started".to_string(),
}),
};
TimeReply {
result: Some(reply),
}
}
/// Advances simulation time to that of the next scheduled event, processing
/// that event as well as all other event scheduled for the same time.
///
/// Processing is gated by a (possibly blocking) call to
/// [`Clock::synchronize()`](crate::time::Clock::synchronize) on the
/// configured simulation clock. This method blocks until all newly
/// processed events have completed.
pub(crate) fn step(&mut self, _request: StepRequest) -> StepReply {
let reply = match &mut self.sim_context {
Some((simulation, ..)) => {
simulation.step();
if let Some(timestamp) = monotonic_to_timestamp(simulation.time()) {
step_reply::Result::Time(timestamp)
} else {
step_reply::Result::Error(Error {
code: ErrorCode::SimulationTimeOutOfRange as i32,
message: "the final simulation time is out of range".to_string(),
})
}
}
None => step_reply::Result::Error(Error {
code: ErrorCode::SimulationNotStarted as i32,
message: "the simulation was not started".to_string(),
}),
};
StepReply {
result: Some(reply),
}
}
/// Iteratively advances the simulation time until the specified deadline,
/// as if by calling
/// [`Simulation::step()`](crate::simulation::Simulation::step) repeatedly.
///
/// This method blocks until all events scheduled up to the specified target
/// time have completed. The simulation time upon completion is equal to the
/// specified target time, whether or not an event was scheduled for that
/// time.
pub(crate) fn step_until(&mut self, request: StepUntilRequest) -> StepUntilReply {
let reply = move || -> Result<Timestamp, (ErrorCode, &str)> {
let deadline = request
.deadline
.ok_or((ErrorCode::MissingArgument, "missing deadline argument"))?;
let simulation = match deadline {
step_until_request::Deadline::Time(time) => {
let time = timestamp_to_monotonic(time)
.ok_or((ErrorCode::InvalidTime, "out-of-range nanosecond field"))?;
let (simulation, ..) = self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started",
))?;
simulation.step_until(time).map_err(|_| {
(
ErrorCode::InvalidTime,
"the specified deadline lies in the past",
)
})?;
simulation
}
step_until_request::Deadline::Duration(duration) => {
let duration = to_positive_duration(duration).ok_or((
ErrorCode::InvalidDuration,
"the specified deadline lies in the past",
))?;
let (simulation, ..) = self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started",
))?;
simulation.step_by(duration);
simulation
}
};
let timestamp = monotonic_to_timestamp(simulation.time()).ok_or((
ErrorCode::SimulationTimeOutOfRange,
"the final simulation time is out of range",
))?;
Ok(timestamp)
}();
StepUntilReply {
result: Some(match reply {
Ok(timestamp) => step_until_reply::Result::Time(timestamp),
Err((code, message)) => step_until_reply::Result::Error(Error {
code: code as i32,
message: message.to_string(),
}),
}),
}
}
/// Schedules an event at a future time.
pub(crate) fn schedule_event(&mut self, request: ScheduleEventRequest) -> ScheduleEventReply {
let reply = move || -> Result<Option<KeyRegistryId>, (ErrorCode, String)> {
let source_name = &request.source_name;
let msgpack_event = &request.event;
let with_key = request.with_key.unwrap_or_default();
let period = request
.period
.map(|period| {
to_strictly_positive_duration(period).ok_or((
ErrorCode::InvalidDuration,
"the specified event period is not strictly positive".to_string(),
))
})
.transpose()?;
let (simulation, endpoint_registry, key_registry) =
self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started".to_string(),
))?;
let deadline = request.deadline.ok_or((
ErrorCode::MissingArgument,
"missing deadline argument".to_string(),
))?;
let deadline = match deadline {
schedule_event_request::Deadline::Time(time) => timestamp_to_monotonic(time)
.ok_or((
ErrorCode::InvalidTime,
"out-of-range nanosecond field".to_string(),
))?,
schedule_event_request::Deadline::Duration(duration) => {
let duration = to_strictly_positive_duration(duration).ok_or((
ErrorCode::InvalidDuration,
"the specified scheduling deadline is not in the future".to_string(),
))?;
simulation.time() + duration
}
};
let source = endpoint_registry.get_event_source_mut(source_name).ok_or((
ErrorCode::SourceNotFound,
"no event source is registered with the name '{}'".to_string(),
))?;
let (action, action_key) = match (with_key, period) {
(false, None) => source.event(msgpack_event).map(|action| (action, None)),
(false, Some(period)) => source
.periodic_event(period, msgpack_event)
.map(|action| (action, None)),
(true, None) => source
.keyed_event(msgpack_event)
.map(|(action, key)| (action, Some(key))),
(true, Some(period)) => source
.keyed_periodic_event(period, msgpack_event)
.map(|(action, key)| (action, Some(key))),
}
.map_err(|_| {
(
ErrorCode::InvalidMessage,
format!(
"the event could not be deserialized as type '{}'",
source.event_type_name()
),
)
})?;
let key_id = action_key.map(|action_key| {
// Free stale keys from the registry.
key_registry.remove_expired_keys(simulation.time());
if period.is_some() {
key_registry.insert_eternal_key(action_key)
} else {
key_registry.insert_key(action_key, deadline)
}
});
simulation.process(action);
Ok(key_id)
}();
ScheduleEventReply {
result: Some(match reply {
Ok(Some(key_id)) => {
let (subkey1, subkey2) = key_id.into_raw_parts();
schedule_event_reply::Result::Key(EventKey {
subkey1: subkey1
.try_into()
.expect("action key index is too large to be serialized"),
subkey2,
})
}
Ok(None) => schedule_event_reply::Result::Empty(()),
Err((code, message)) => schedule_event_reply::Result::Error(Error {
code: code as i32,
message,
}),
}),
}
}
/// Cancels a keyed event.
pub(crate) fn cancel_event(&mut self, request: CancelEventRequest) -> CancelEventReply {
let reply = move || -> Result<(), (ErrorCode, String)> {
let key = request.key.ok_or((
ErrorCode::MissingArgument,
"missing key argument".to_string(),
))?;
let subkey1: usize = key
.subkey1
.try_into()
.map_err(|_| (ErrorCode::InvalidKey, "invalid event key".to_string()))?;
let subkey2 = key.subkey2;
let (simulation, _, key_registry) = self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started".to_string(),
))?;
let key_id = KeyRegistryId::from_raw_parts(subkey1, subkey2);
key_registry.remove_expired_keys(simulation.time());
let key = key_registry.extract_key(key_id).ok_or((
ErrorCode::InvalidKey,
"invalid or expired event key".to_string(),
))?;
key.cancel();
Ok(())
}();
CancelEventReply {
result: Some(match reply {
Ok(()) => cancel_event_reply::Result::Empty(()),
Err((code, message)) => cancel_event_reply::Result::Error(Error {
code: code as i32,
message,
}),
}),
}
}
/// Broadcasts an event from an event source immediately, blocking until
/// completion.
///
/// Simulation time remains unchanged.
pub(crate) fn process_event(&mut self, request: ProcessEventRequest) -> ProcessEventReply {
let reply = move || -> Result<(), (ErrorCode, String)> {
let source_name = &request.source_name;
let msgpack_event = &request.event;
let (simulation, registry, _) = self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started".to_string(),
))?;
let source = registry.get_event_source_mut(source_name).ok_or((
ErrorCode::SourceNotFound,
"no source is registered with the name '{}'".to_string(),
))?;
let event = source.event(msgpack_event).map_err(|_| {
(
ErrorCode::InvalidMessage,
format!(
"the event could not be deserialized as type '{}'",
source.event_type_name()
),
)
})?;
simulation.process(event);
Ok(())
}();
ProcessEventReply {
result: Some(match reply {
Ok(()) => process_event_reply::Result::Empty(()),
Err((code, message)) => process_event_reply::Result::Error(Error {
code: code as i32,
message,
}),
}),
}
}
/// Broadcasts an event from an event source immediately, blocking until
/// completion.
///
/// Simulation time remains unchanged.
pub(crate) fn process_query(&mut self, request: ProcessQueryRequest) -> ProcessQueryReply {
let reply = move || -> Result<Vec<Vec<u8>>, (ErrorCode, String)> {
let source_name = &request.source_name;
let msgpack_request = &request.request;
let (simulation, registry, _) = self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started".to_string(),
))?;
let source = registry.get_query_source_mut(source_name).ok_or((
ErrorCode::SourceNotFound,
"no source is registered with the name '{}'".to_string(),
))?;
let (query, mut promise) = source.query(msgpack_request).map_err(|_| {
(
ErrorCode::InvalidMessage,
format!(
"the request could not be deserialized as type '{}'",
source.request_type_name()
),
)
})?;
simulation.process(query);
let replies = promise.take_collect().ok_or((
ErrorCode::InternalError,
"a reply to the query was expected but none was available".to_string(),
))?;
replies.map_err(|_| {
(
ErrorCode::InvalidMessage,
format!(
"the reply could not be serialized as type '{}'",
source.reply_type_name()
),
)
})
}();
match reply {
Ok(replies) => ProcessQueryReply {
replies,
result: Some(process_query_reply::Result::Empty(())),
},
Err((code, message)) => ProcessQueryReply {
replies: Vec::new(),
result: Some(process_query_reply::Result::Error(Error {
code: code as i32,
message,
})),
},
}
}
/// Read all events from an event sink.
pub(crate) fn read_events(&mut self, request: ReadEventsRequest) -> ReadEventsReply {
let reply = move || -> Result<Vec<Vec<u8>>, (ErrorCode, String)> {
let sink_name = &request.sink_name;
let (_, registry, _) = self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started".to_string(),
))?;
let sink = registry.get_sink_mut(sink_name).ok_or((
ErrorCode::SinkNotFound,
"no sink is registered with the name '{}'".to_string(),
))?;
sink.collect().map_err(|_| {
(
ErrorCode::InvalidMessage,
format!(
"the event could not be serialized from type '{}'",
sink.event_type_name()
),
)
})
}();
match reply {
Ok(events) => ReadEventsReply {
events,
result: Some(read_events_reply::Result::Empty(())),
},
Err((code, message)) => ReadEventsReply {
events: Vec::new(),
result: Some(read_events_reply::Result::Error(Error {
code: code as i32,
message,
})),
},
}
}
/// Opens an event sink.
pub(crate) fn open_sink(&mut self, request: OpenSinkRequest) -> OpenSinkReply {
let reply = move || -> Result<(), (ErrorCode, String)> {
let sink_name = &request.sink_name;
let (_, registry, _) = self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started".to_string(),
))?;
let sink = registry.get_sink_mut(sink_name).ok_or((
ErrorCode::SinkNotFound,
"no sink is registered with the name '{}'".to_string(),
))?;
sink.open();
Ok(())
}();
match reply {
Ok(()) => OpenSinkReply {
result: Some(open_sink_reply::Result::Empty(())),
},
Err((code, message)) => OpenSinkReply {
result: Some(open_sink_reply::Result::Error(Error {
code: code as i32,
message,
})),
},
}
}
/// Closes an event sink.
pub(crate) fn close_sink(&mut self, request: CloseSinkRequest) -> CloseSinkReply {
let reply = move || -> Result<(), (ErrorCode, String)> {
let sink_name = &request.sink_name;
let (_, registry, _) = self.sim_context.as_mut().ok_or((
ErrorCode::SimulationNotStarted,
"the simulation was not started".to_string(),
))?;
let sink = registry.get_sink_mut(sink_name).ok_or((
ErrorCode::SinkNotFound,
"no sink is registered with the name '{}'".to_string(),
))?;
sink.close();
Ok(())
}();
match reply {
Ok(()) => CloseSinkReply {
result: Some(close_sink_reply::Result::Empty(())),
},
Err((code, message)) => CloseSinkReply {
result: Some(close_sink_reply::Result::Error(Error {
code: code as i32,
message,
})),
},
}
}
}
/// Attempts a cast from a `MonotonicTime` to a protobuf `Timestamp`.
///
/// This will fail if the time is outside the protobuf-specified range for
/// timestamps (0001-01-01 00:00:00 to 9999-12-31 23:59:59).
fn monotonic_to_timestamp(monotonic_time: MonotonicTime) -> Option<Timestamp> {
// Unix timestamp for 0001-01-01 00:00:00, the minimum accepted by
// protobuf's specification for the `Timestamp` type.
const MIN_SECS: i64 = -62135596800;
// Unix timestamp for 9999-12-31 23:59:59, the maximum accepted by
// protobuf's specification for the `Timestamp` type.
const MAX_SECS: i64 = 253402300799;
let secs = monotonic_time.as_secs();
if !(MIN_SECS..=MAX_SECS).contains(&secs) {
return None;
}
Some(Timestamp {
seconds: secs,
nanos: monotonic_time.subsec_nanos() as i32,
})
}
/// Attempts a cast from a protobuf `Timestamp` to a `MonotonicTime`.
///
/// This should never fail provided that the `Timestamp` complies with the
/// protobuf specification. It can only fail if the nanosecond part is negative
/// or greater than 999'999'999.
fn timestamp_to_monotonic(timestamp: Timestamp) -> Option<MonotonicTime> {
let nanos: u32 = timestamp.nanos.try_into().ok()?;
MonotonicTime::new(timestamp.seconds, nanos)
}
/// Attempts a cast from a protobuf `Duration` to a `std::time::Duration`.
///
/// If the `Duration` complies with the protobuf specification, this can only
/// fail if the duration is negative.
fn to_positive_duration(duration: prost_types::Duration) -> Option<Duration> {
if duration.seconds < 0 || duration.nanos < 0 {
return None;
}
Some(Duration::new(
duration.seconds as u64,
duration.nanos as u32,
))
}
/// Attempts a cast from a protobuf `Duration` to a strictly positive
/// `std::time::Duration`.
///
/// If the `Duration` complies with the protobuf specification, this can only
/// fail if the duration is negative or null.
fn to_strictly_positive_duration(duration: prost_types::Duration) -> Option<Duration> {
if duration.seconds < 0 || duration.nanos < 0 || (duration.seconds == 0 && duration.nanos == 0)
{
return None;
}
Some(Duration::new(
duration.seconds as u64,
duration.nanos as u32,
))
}

146
asynchronix/src/rpc/grpc.rs Normal file
View File

@ -0,0 +1,146 @@
//! GRPC simulation server.
use std::net::SocketAddr;
use std::sync::Mutex;
use std::sync::MutexGuard;
use tonic::{transport::Server, Request, Response, Status};
use crate::rpc::EndpointRegistry;
use crate::simulation::SimInit;
use super::codegen::simulation::*;
use super::generic_server::GenericServer;
/// Runs a GRPC simulation server.
///
/// The first argument is a closure that is called every time the simulation is
/// started by the remote client. It must create a new `SimInit` object
/// complemented by a registry that exposes the public event and query
/// interface.
pub fn run<F>(sim_gen: F, addr: SocketAddr) -> Result<(), Box<dyn std::error::Error>>
where
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
{
// Use a single-threaded server.
let rt = tokio::runtime::Builder::new_current_thread().build()?;
let sim_manager = GrpcServer::new(sim_gen);
rt.block_on(async move {
Server::builder()
.add_service(simulation_server::SimulationServer::new(sim_manager))
.serve(addr)
.await?;
Ok(())
})
}
struct GrpcServer<F>
where
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
{
inner: Mutex<GenericServer<F>>,
}
impl<F> GrpcServer<F>
where
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
{
fn new(sim_gen: F) -> Self {
Self {
inner: Mutex::new(GenericServer::new(sim_gen)),
}
}
fn inner(&self) -> MutexGuard<'_, GenericServer<F>> {
self.inner.lock().unwrap()
}
}
#[tonic::async_trait]
impl<F> simulation_server::Simulation for GrpcServer<F>
where
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
{
async fn init(&self, request: Request<InitRequest>) -> Result<Response<InitReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().init(request)))
}
async fn time(&self, request: Request<TimeRequest>) -> Result<Response<TimeReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().time(request)))
}
async fn step(&self, request: Request<StepRequest>) -> Result<Response<StepReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().step(request)))
}
async fn step_until(
&self,
request: Request<StepUntilRequest>,
) -> Result<Response<StepUntilReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().step_until(request)))
}
async fn schedule_event(
&self,
request: Request<ScheduleEventRequest>,
) -> Result<Response<ScheduleEventReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().schedule_event(request)))
}
async fn cancel_event(
&self,
request: Request<CancelEventRequest>,
) -> Result<Response<CancelEventReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().cancel_event(request)))
}
async fn process_event(
&self,
request: Request<ProcessEventRequest>,
) -> Result<Response<ProcessEventReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().process_event(request)))
}
async fn process_query(
&self,
request: Request<ProcessQueryRequest>,
) -> Result<Response<ProcessQueryReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().process_query(request)))
}
async fn read_events(
&self,
request: Request<ReadEventsRequest>,
) -> Result<Response<ReadEventsReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().read_events(request)))
}
async fn open_sink(
&self,
request: Request<OpenSinkRequest>,
) -> Result<Response<OpenSinkReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().open_sink(request)))
}
async fn close_sink(
&self,
request: Request<CloseSinkRequest>,
) -> Result<Response<CloseSinkReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.inner().close_sink(request)))
}
}

View File

@ -0,0 +1,47 @@
use crate::time::{ActionKey, MonotonicTime};
use crate::util::indexed_priority_queue::{IndexedPriorityQueue, InsertKey};
pub(crate) type KeyRegistryId = InsertKey;
/// A collection of `ActionKey`s indexed by a unique identifier.
#[derive(Default)]
pub(crate) struct KeyRegistry {
keys: IndexedPriorityQueue<MonotonicTime, ActionKey>,
}
impl KeyRegistry {
/// Inserts an `ActionKey` into the registry.
///
/// The provided expiration deadline is the latest time at which the key may
/// still be active.
pub(crate) fn insert_key(
&mut self,
action_key: ActionKey,
expiration: MonotonicTime,
) -> KeyRegistryId {
self.keys.insert(expiration, action_key)
}
/// Inserts a non-expiring `ActionKey` into the registry.
pub(crate) fn insert_eternal_key(&mut self, action_key: ActionKey) -> KeyRegistryId {
self.keys.insert(MonotonicTime::MAX, action_key)
}
/// Removes an `ActionKey` from the registry and returns it.
///
/// Returns `None` if the key was not found in the registry.
pub(crate) fn extract_key(&mut self, key_id: KeyRegistryId) -> Option<ActionKey> {
self.keys.extract(key_id).map(|(_, key)| key)
}
/// Remove keys with an expiration deadline strictly predating the argument.
pub(crate) fn remove_expired_keys(&mut self, now: MonotonicTime) {
while let Some(expiration) = self.keys.peek_key() {
if *expiration >= now {
return;
}
self.keys.pull();
}
}
}

View File

@ -14,8 +14,9 @@
//! using the [`Address`]es of the target models,
//! 3. instantiation of a [`SimInit`] simulation builder and migration of all
//! models and mailboxes to the builder with [`SimInit::add_model()`],
//! 4. initialization of a [`Simulation`] instance with [`SimInit::init()`] or
//! [`SimInit::init_with_clock()`],
//! 4. initialization of a [`Simulation`] instance with [`SimInit::init()`],
//! possibly preceded by the setup of a custom clock with
//! [`SimInit::set_clock()`],
//! 5. discrete-time simulation, which typically involves scheduling events and
//! incrementing simulation time while observing the models outputs.
//!
@ -76,7 +77,7 @@
//! such pathological deadlocks and the "expected" deadlock that occurs when all
//! events in a given time slice have completed and all models are starved on an
//! empty mailbox. Consequently, blocking method such as [`SimInit::init()`],
//! [`Simulation::step()`], [`Simulation::send_event()`], etc., will return
//! [`Simulation::step()`], [`Simulation::process_event()`], etc., will return
//! without error after a pathological deadlock, leaving the user responsible
//! for inferring the deadlock from the behavior of the simulation in the next
//! steps. This is obviously not ideal, but is hopefully only a temporary state
@ -86,17 +87,19 @@
//!
//! Although uncommon, there is sometimes a need for connecting and/or
//! disconnecting models after they have been migrated to the simulation.
//! Likewise, one may want to connect or disconnect an [`EventSlot`] or
//! [`EventStream`] after the simulation has been instantiated.
//! Likewise, one may want to connect or disconnect an
//! [`EventSlot`](crate::ports::EventSlot) or
//! [`EventBuffer`](crate::ports::EventBuffer) after the simulation has been
//! instantiated.
//!
//! There is actually a very simple solution to this problem: since the
//! [`InputFn`](crate::model::InputFn) trait also matches closures of type
//! `FnOnce(&mut impl Model)`, it is enough to invoke
//! [`Simulation::send_event()`] with a closure that connects or disconnects a
//! port, such as:
//! [`InputFn`] trait also matches closures of type `FnOnce(&mut impl Model)`,
//! it is enough to invoke [`Simulation::process_event()`] with a closure that
//! connects or disconnects a port, such as:
//!
//! ```
//! # use asynchronix::model::{Model, Output};
//! # use asynchronix::model::Model;
//! # use asynchronix::ports::Output;
//! # use asynchronix::time::{MonotonicTime, Scheduler};
//! # use asynchronix::simulation::{Mailbox, SimInit};
//! # pub struct ModelA {
@ -111,7 +114,7 @@
//! # let modelA_addr = Mailbox::<ModelA>::new().address();
//! # let modelB_addr = Mailbox::<ModelB>::new().address();
//! # let mut simu = SimInit::new().init(MonotonicTime::EPOCH);
//! simu.send_event(
//! simu.process_event(
//! |m: &mut ModelA| {
//! m.output.connect(ModelB::input, modelB_addr);
//! },
@ -119,11 +122,9 @@
//! &modelA_addr
//! );
//! ```
mod endpoints;
mod mailbox;
mod sim_init;
pub use endpoints::{EventSlot, EventStream};
pub use mailbox::{Address, Mailbox};
pub use sim_init::SimInit;
@ -136,23 +137,22 @@ use std::time::Duration;
use recycle_box::{coerce_box, RecycleBox};
use crate::executor::Executor;
use crate::model::{InputFn, Model, ReplierFn};
use crate::model::Model;
use crate::ports::{InputFn, ReplierFn};
use crate::time::{
self, Clock, Deadline, EventKey, MonotonicTime, NoClock, ScheduledEvent, SchedulerQueue,
SchedulingError, TearableAtomicTime,
self, Action, ActionKey, Clock, Deadline, MonotonicTime, SchedulerQueue, SchedulingError,
TearableAtomicTime,
};
use crate::util::futures::SeqFuture;
use crate::util::seq_futures::SeqFuture;
use crate::util::slot;
use crate::util::sync_cell::SyncCell;
/// Simulation environment.
///
/// A `Simulation` is created by calling
/// [`SimInit::init()`](crate::simulation::SimInit::init) or
/// [`SimInit::init_with_clock()`](crate::simulation::SimInit::init_with_clock)
/// method on a simulation initializer. It contains an asynchronous executor
/// that runs all simulation models added beforehand to
/// [`SimInit`](crate::simulation::SimInit).
/// [`SimInit::init()`](crate::simulation::SimInit::init) on a simulation
/// initializer. It contains an asynchronous executor that runs all simulation
/// models added beforehand to [`SimInit`].
///
/// A [`Simulation`] object also manages an event scheduling queue and
/// simulation time. The scheduling queue can be accessed from the simulation
@ -163,10 +163,10 @@ use crate::util::sync_cell::SyncCell;
/// method.
///
/// Events and queries can be scheduled immediately, *i.e.* for the current
/// simulation time, using [`send_event()`](Simulation::send_event) and
/// [`send_query()`](Simulation::send_query). Calling these methods will block
/// until all computations triggered by such event or query have completed. In
/// the case of queries, the response is returned.
/// simulation time, using [`process_event()`](Simulation::process_event) and
/// [`send_query()`](Simulation::process_query). Calling these methods will
/// block until all computations triggered by such event or query have
/// completed. In the case of queries, the response is returned.
///
/// Events can also be scheduled at a future simulation time using one of the
/// [`schedule_*()`](Simulation::schedule_event) method. These methods queue an
@ -193,32 +193,18 @@ pub struct Simulation {
}
impl Simulation {
/// Creates a new `Simulation`.
/// Creates a new `Simulation` with the specified clock.
pub(crate) fn new(
executor: Executor,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCell<TearableAtomicTime>,
clock: Box<dyn Clock + 'static>,
) -> Self {
Self {
executor,
scheduler_queue,
time,
clock: Box::new(NoClock::new()),
}
}
/// Creates a new `Simulation` with the specified clock.
pub(crate) fn with_clock(
executor: Executor,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCell<TearableAtomicTime>,
clock: impl Clock + 'static,
) -> Self {
Self {
executor,
scheduler_queue,
time,
clock: Box::new(clock),
clock,
}
}
@ -267,6 +253,37 @@ impl Simulation {
Ok(())
}
/// Schedules an action at a future time.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time.
///
/// If multiple actions send events at the same simulation time to the same
/// model, these events are guaranteed to be processed according to the
/// scheduling order of the actions.
pub fn schedule(
&mut self,
deadline: impl Deadline,
action: Action,
) -> Result<(), SchedulingError> {
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
// The channel ID is set to the same value for all actions. This
// ensures that the relative scheduling order of all source events is
// preserved, which is important if some of them target the same models.
// The value 0 was chosen as it prevents collisions with channel IDs as
// the latter are always non-zero.
scheduler_queue.insert((time, 0), action);
Ok(())
}
/// Schedules an event at a future time.
///
/// An error is returned if the specified time is not in the future of the
@ -294,6 +311,7 @@ impl Simulation {
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
time::schedule_event_at_unchecked(time, func, arg, address.into().0, &self.scheduler_queue);
Ok(())
@ -314,7 +332,7 @@ impl Simulation {
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<EventKey, SchedulingError>
) -> Result<ActionKey, SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
@ -397,7 +415,7 @@ impl Simulation {
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<EventKey, SchedulingError>
) -> Result<ActionKey, SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
@ -424,10 +442,19 @@ impl Simulation {
Ok(event_key)
}
/// Sends and processes an event, blocking until completion.
/// Processes an action immediately, blocking until completion.
///
/// Simulation time remains unchanged. The periodicity of the action, if
/// any, is ignored.
pub fn process(&mut self, action: Action) {
action.spawn_and_forget(&self.executor);
self.executor.run();
}
/// Processes an event immediately, blocking until completion.
///
/// Simulation time remains unchanged.
pub fn send_event<M, F, T, S>(&mut self, func: F, arg: T, address: impl Into<Address<M>>)
pub fn process_event<M, F, T, S>(&mut self, func: F, arg: T, address: impl Into<Address<M>>)
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
@ -454,10 +481,10 @@ impl Simulation {
self.executor.run();
}
/// Sends and processes a query, blocking until completion.
/// Processes a query immediately, blocking until completion.
///
/// Simulation time remains unchanged.
pub fn send_query<M, F, T, R, S>(
pub fn process_query<M, F, T, R, S>(
&mut self,
func: F,
arg: T,
@ -497,36 +524,34 @@ impl Simulation {
reply_reader.try_read().map_err(|_| QueryError {})
}
/// Advances simulation time to that of the next scheduled event if its
/// Advances simulation time to that of the next scheduled action if its
/// scheduling time does not exceed the specified bound, processing that
/// event as well as all other events scheduled for the same time.
/// action as well as all other actions scheduled for the same time.
///
/// If at least one event was found that satisfied the time bound, the
/// If at least one action was found that satisfied the time bound, the
/// corresponding new simulation time is returned.
fn step_to_next_bounded(&mut self, upper_time_bound: MonotonicTime) -> Option<MonotonicTime> {
// Function pulling the next event. If the event is periodic, it is
// Function pulling the next action. If the action is periodic, it is
// immediately re-scheduled.
fn pull_next_event(
scheduler_queue: &mut MutexGuard<SchedulerQueue>,
) -> Box<dyn ScheduledEvent> {
let ((time, channel_id), event) = scheduler_queue.pull().unwrap();
if let Some((event_clone, period)) = event.next() {
scheduler_queue.insert((time + period, channel_id), event_clone);
fn pull_next_action(scheduler_queue: &mut MutexGuard<SchedulerQueue>) -> Action {
let ((time, channel_id), action) = scheduler_queue.pull().unwrap();
if let Some((action_clone, period)) = action.next() {
scheduler_queue.insert((time + period, channel_id), action_clone);
}
event
action
}
// Closure returning the next key which time stamp is no older than the
// upper bound, if any. Cancelled events are pulled and discarded.
// upper bound, if any. Cancelled actions are pulled and discarded.
let peek_next_key = |scheduler_queue: &mut MutexGuard<SchedulerQueue>| {
loop {
match scheduler_queue.peek() {
Some((&k, t)) if k.0 <= upper_time_bound => {
if !t.is_cancelled() {
break Some(k);
Some((&key, action)) if key.0 <= upper_time_bound => {
if !action.is_cancelled() {
break Some(key);
}
// Discard cancelled events.
// Discard cancelled actions.
scheduler_queue.pull();
}
_ => break None,
@ -540,37 +565,37 @@ impl Simulation {
self.time.write(current_key.0);
loop {
let event = pull_next_event(&mut scheduler_queue);
let action = pull_next_action(&mut scheduler_queue);
let mut next_key = peek_next_key(&mut scheduler_queue);
if next_key != Some(current_key) {
// Since there are no other events targeting the same mailbox
// and the same time, the event is spawned immediately.
event.spawn_and_forget(&self.executor);
// Since there are no other actions targeting the same mailbox
// and the same time, the action is spawned immediately.
action.spawn_and_forget(&self.executor);
} else {
// To ensure that their relative order of execution is
// preserved, all event targeting the same mailbox are executed
// sequentially within a single compound future.
let mut event_sequence = SeqFuture::new();
event_sequence.push(event.into_future());
// preserved, all actions targeting the same mailbox are
// executed sequentially within a single compound future.
let mut action_sequence = SeqFuture::new();
action_sequence.push(action.into_future());
loop {
let event = pull_next_event(&mut scheduler_queue);
event_sequence.push(event.into_future());
let action = pull_next_action(&mut scheduler_queue);
action_sequence.push(action.into_future());
next_key = peek_next_key(&mut scheduler_queue);
if next_key != Some(current_key) {
break;
}
}
// Spawn a compound future that sequentially polls all events
// Spawn a compound future that sequentially polls all actions
// targeting the same mailbox.
self.executor.spawn_and_forget(event_sequence);
self.executor.spawn_and_forget(action_sequence);
}
current_key = match next_key {
// If the next event is scheduled at the same time, update the
// If the next action is scheduled at the same time, update the
// key and continue.
Some(k) if k.0 == current_key.0 => k,
// Otherwise wait until all events have completed and return.
// Otherwise wait until all actions have completed and return.
_ => {
drop(scheduler_queue); // make sure the queue's mutex is released.
let current_time = current_key.0;
@ -584,10 +609,10 @@ impl Simulation {
}
}
/// Iteratively advances simulation time and processes all events scheduled
/// Iteratively advances simulation time and processes all actions scheduled
/// up to the specified target time.
///
/// Once the method returns it is guaranteed that (i) all events scheduled
/// Once the method returns it is guaranteed that (i) all actions scheduled
/// up to the specified target time have completed and (ii) the final
/// simulation time matches the target time.
///
@ -598,7 +623,7 @@ impl Simulation {
match self.step_to_next_bounded(target_time) {
// The target time was reached exactly.
Some(t) if t == target_time => return,
// No events are scheduled before or at the target time.
// No actions are scheduled before or at the target time.
None => {
// Update the simulation time.
self.time.write(target_time);

View File

@ -1,69 +0,0 @@
use std::fmt;
use std::sync::{Arc, Mutex, TryLockError, TryLockResult};
use crate::util::spsc_queue;
/// An iterator that returns all events that were broadcast by an output port.
///
/// Events are returned in first-in-first-out order. Note that even if the
/// iterator returns `None`, it may still produce more items after simulation
/// time is incremented.
pub struct EventStream<T> {
consumer: spsc_queue::Consumer<T>,
}
impl<T> EventStream<T> {
/// Creates a new `EventStream`.
pub(crate) fn new(consumer: spsc_queue::Consumer<T>) -> Self {
Self { consumer }
}
}
impl<T> Iterator for EventStream<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.consumer.pop()
}
}
impl<T> fmt::Debug for EventStream<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventStream").finish_non_exhaustive()
}
}
/// A single-value slot that holds the last event that was broadcast by an
/// output port.
pub struct EventSlot<T> {
slot: Arc<Mutex<Option<T>>>,
}
impl<T> EventSlot<T> {
/// Creates a new `EventSlot`.
pub(crate) fn new(slot: Arc<Mutex<Option<T>>>) -> Self {
Self { slot }
}
/// Take the last event, if any, leaving the slot empty.
///
/// Note that even after the event is taken, it may become populated anew
/// after simulation time is incremented.
pub fn take(&mut self) -> Option<T> {
// We don't actually need to take self by mutable reference, but this
// signature is probably less surprising for the user and more
// consistent with `EventStream`. It also prevents multi-threaded
// access, which would be likely to be misused.
match self.slot.try_lock() {
TryLockResult::Ok(mut v) => v.take(),
TryLockResult::Err(TryLockError::WouldBlock) => None,
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
}
}
}
impl<T> fmt::Debug for EventSlot<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventSlot").finish_non_exhaustive()
}
}

View File

@ -3,7 +3,7 @@ use std::sync::{Arc, Mutex};
use crate::executor::Executor;
use crate::model::Model;
use crate::time::{Clock, Scheduler};
use crate::time::{Clock, NoClock, Scheduler};
use crate::time::{MonotonicTime, SchedulerQueue, TearableAtomicTime};
use crate::util::priority_queue::PriorityQueue;
use crate::util::sync_cell::SyncCell;
@ -15,6 +15,7 @@ pub struct SimInit {
executor: Executor,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCell<TearableAtomicTime>,
clock: Box<dyn Clock + 'static>,
}
impl SimInit {
@ -35,6 +36,7 @@ impl SimInit {
executor: Executor::new(num_threads),
scheduler_queue: Arc::new(Mutex::new(PriorityQueue::new())),
time: SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)),
clock: Box::new(NoClock::new()),
}
}
@ -55,35 +57,25 @@ impl SimInit {
self
}
/// Synchronize the simulation with the provided [`Clock`].
///
/// If the clock isn't explicitly set then the default [`NoClock`] is used,
/// resulting in the simulation running as fast as possible.
pub fn set_clock(mut self, clock: impl Clock + 'static) -> Self {
self.clock = Box::new(clock);
self
}
/// Builds a simulation initialized at the specified simulation time,
/// executing the [`Model::init()`](crate::model::Model::init) method on all
/// model initializers.
///
/// This is equivalent to calling [`SimInit::init_with_clock()`] with a
/// [`NoClock`](crate::time::NoClock) argument and effectively makes the
/// simulation run as fast as possible.
pub fn init(mut self, start_time: MonotonicTime) -> Simulation {
self.time.write(start_time);
self.clock.synchronize(start_time);
self.executor.run();
Simulation::new(self.executor, self.scheduler_queue, self.time)
}
/// Builds a simulation synchronized with the provided
/// [`Clock`](crate::time::Clock) and initialized at the specified
/// simulation time, executing the
/// [`Model::init()`](crate::model::Model::init) method on all model
/// initializers.
pub fn init_with_clock(
mut self,
start_time: MonotonicTime,
mut clock: impl Clock + 'static,
) -> Simulation {
self.time.write(start_time);
clock.synchronize(start_time);
self.executor.run();
Simulation::with_clock(self.executor, self.scheduler_queue, self.time, clock)
Simulation::new(self.executor, self.scheduler_queue, self.time, self.clock)
}
}

View File

@ -51,12 +51,13 @@ mod clock;
mod monotonic_time;
mod scheduler;
pub use tai_time::MonotonicTime;
pub use clock::{AutoSystemClock, Clock, NoClock, SyncStatus, SystemClock};
pub(crate) use monotonic_time::TearableAtomicTime;
pub use monotonic_time::{MonotonicTime, SystemTimeError};
pub(crate) use scheduler::{
schedule_event_at_unchecked, schedule_keyed_event_at_unchecked,
schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked,
ScheduledEvent, SchedulerQueue,
KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction, SchedulerQueue,
};
pub use scheduler::{Deadline, EventKey, Scheduler, SchedulingError};
pub use scheduler::{Action, ActionKey, Deadline, Scheduler, SchedulingError};

View File

@ -1,14 +1,16 @@
use std::time::{Duration, Instant, SystemTime};
use tai_time::MonotonicClock;
use crate::time::MonotonicTime;
/// A type that can be used to synchronize a simulation.
///
/// This trait abstract over the different types of clocks, such as
/// This trait abstracts over different types of clocks, such as
/// as-fast-as-possible and real-time clocks.
///
/// A clock can be associated to a simulation at initialization time by calling
/// [`SimInit::init_with_clock()`](crate::simulation::SimInit::init_with_clock).
/// A clock can be associated to a simulation prior to initialization by calling
/// [`SimInit::set_clock()`](crate::simulation::SimInit::set_clock).
pub trait Clock: Send {
/// Blocks until the deadline.
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus;
@ -49,10 +51,7 @@ impl Clock for NoClock {
/// This clock accepts an arbitrary reference time and remains synchronized with
/// the system's monotonic clock.
#[derive(Copy, Clone, Debug)]
pub struct SystemClock {
wall_clock_ref: Instant,
simulation_ref: MonotonicTime,
}
pub struct SystemClock(MonotonicClock);
impl SystemClock {
/// Constructs a `SystemClock` with an offset between simulation clock and
@ -69,7 +68,7 @@ impl SystemClock {
/// use asynchronix::simulation::SimInit;
/// use asynchronix::time::{MonotonicTime, SystemClock};
///
/// let t0 = MonotonicTime::new(1_234_567_890, 0);
/// let t0 = MonotonicTime::new(1_234_567_890, 0).unwrap();
///
/// // Make the simulation start in 1s.
/// let clock = SystemClock::from_instant(t0, Instant::now() + Duration::from_secs(1));
@ -77,13 +76,14 @@ impl SystemClock {
/// let simu = SimInit::new()
/// // .add_model(...)
/// // .add_model(...)
/// .init_with_clock(t0, clock);
/// .set_clock(clock)
/// .init(t0);
/// ```
pub fn from_instant(simulation_ref: MonotonicTime, wall_clock_ref: Instant) -> Self {
Self {
wall_clock_ref,
Self(MonotonicClock::init_from_instant(
simulation_ref,
}
wall_clock_ref,
))
}
/// Constructs a `SystemClock` with an offset between simulation clock and
@ -109,7 +109,7 @@ impl SystemClock {
/// use asynchronix::simulation::SimInit;
/// use asynchronix::time::{MonotonicTime, SystemClock};
///
/// let t0 = MonotonicTime::new(1_234_567_890, 0);
/// let t0 = MonotonicTime::new(1_234_567_890, 0).unwrap();
///
/// // Make the simulation start at the next full second boundary.
/// let now_secs = UNIX_EPOCH.elapsed().unwrap().as_secs();
@ -120,58 +120,14 @@ impl SystemClock {
/// let simu = SimInit::new()
/// // .add_model(...)
/// // .add_model(...)
/// .init_with_clock(t0, clock);
/// .set_clock(clock)
/// .init(t0);
/// ```
pub fn from_system_time(simulation_ref: MonotonicTime, wall_clock_ref: SystemTime) -> Self {
// Select the best-correlated `Instant`/`SystemTime` pair from several
// samples to improve robustness towards possible thread suspension
// between the calls to `SystemTime::now()` and `Instant::now()`.
const SAMPLES: usize = 3;
let mut last_instant = Instant::now();
let mut min_delta = Duration::MAX;
let mut ref_time = None;
// Select the best-correlated instant/date pair.
for _ in 0..SAMPLES {
// The inner loop is to work around monotonic clock platform bugs
// that may cause `checked_duration_since` to fail.
let (date, instant, delta) = loop {
let date = SystemTime::now();
let instant = Instant::now();
let delta = instant.checked_duration_since(last_instant);
last_instant = instant;
if let Some(delta) = delta {
break (date, instant, delta);
}
};
// Store the current instant/date if the time elapsed since the last
// measurement is shorter than the previous candidate.
if min_delta > delta {
min_delta = delta;
ref_time = Some((instant, date));
}
}
// Set the selected instant/date as the wall clock reference and adjust
// the simulation reference accordingly.
let (instant_ref, date_ref) = ref_time.unwrap();
let simulation_ref = if date_ref > wall_clock_ref {
let correction = date_ref.duration_since(wall_clock_ref).unwrap();
simulation_ref + correction
} else {
let correction = wall_clock_ref.duration_since(date_ref).unwrap();
simulation_ref - correction
};
Self {
wall_clock_ref: instant_ref,
Self(MonotonicClock::init_from_system_time(
simulation_ref,
}
wall_clock_ref,
))
}
}
@ -179,22 +135,14 @@ impl Clock for SystemClock {
/// Blocks until the system time corresponds to the specified simulation
/// time.
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus {
let target_time = if deadline >= self.simulation_ref {
self.wall_clock_ref + deadline.duration_since(self.simulation_ref)
} else {
self.wall_clock_ref - self.simulation_ref.duration_since(deadline)
};
let now = self.0.now();
if now <= deadline {
spin_sleep::sleep(deadline.duration_since(now));
let now = Instant::now();
match target_time.checked_duration_since(now) {
Some(sleep_duration) => {
spin_sleep::sleep(sleep_duration);
SyncStatus::Synchronized
}
None => SyncStatus::OutOfSync(now.duration_since(target_time)),
return SyncStatus::Synchronized;
}
SyncStatus::OutOfSync(now.duration_since(deadline))
}
}
@ -233,3 +181,29 @@ impl Clock for AutoSystemClock {
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn smoke_system_clock() {
let t0 = MonotonicTime::EPOCH;
const TOLERANCE: f64 = 0.0005; // [s]
let now = Instant::now();
let mut clock = SystemClock::from_instant(t0, now);
let t1 = t0 + Duration::from_millis(200);
clock.synchronize(t1);
let elapsed = now.elapsed().as_secs_f64();
let dt = t1.duration_since(t0).as_secs_f64();
assert!(
(dt - elapsed) <= TOLERANCE,
"Expected t = {:.6}s +/- {:.6}s, measured t = {:.6}s",
dt,
TOLERANCE,
elapsed,
);
}
}

View File

@ -1,483 +1,10 @@
//! Monotonic simulation time.
use std::error::Error;
use std::fmt;
use std::ops::{Add, AddAssign, Sub, SubAssign};
use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
use std::time::{Duration, SystemTime};
use super::MonotonicTime;
use crate::util::sync_cell::TearableAtomic;
const NANOS_PER_SEC: u32 = 1_000_000_000;
/// A nanosecond-precision monotonic clock timestamp.
///
/// A timestamp specifies a [TAI] point in time. It is represented as a 64-bit
/// signed number of seconds and a positive number of nanoseconds, counted with
/// reference to 1970-01-01 00:00:00 TAI. This timestamp format has a number of
/// desirable properties:
///
/// - it enables cheap inter-operation with the standard [`Duration`] type which
/// uses a very similar internal representation,
/// - it constitutes a strict 96-bit superset of 80-bit PTP IEEE-1588
/// timestamps, with the same epoch,
/// - if required, exact conversion to a Unix timestamp is trivial and only
/// requires subtracting from this timestamp the number of leap seconds
/// between TAI and UTC time (see also the
/// [`as_unix_secs()`](MonotonicTime::as_unix_secs) method).
///
/// Although no date-time conversion methods are provided, conversion from
/// timestamp to TAI date-time representations and back can be easily performed
/// using `NaiveDateTime` from the [chrono] crate or `OffsetDateTime` from the
/// [time] crate, treating the timestamp as a regular (UTC) Unix timestamp.
///
/// [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
/// [chrono]: https://crates.io/crates/chrono
/// [time]: https://crates.io/crates/time
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// // Set the timestamp to 2009-02-13 23:31:30.987654321 TAI.
/// let mut timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
///
/// // Increment the timestamp by 123.456s.
/// timestamp += Duration::new(123, 456_000_000);
///
/// assert_eq!(timestamp, MonotonicTime::new(1_234_568_014, 443_654_321));
/// assert_eq!(timestamp.as_secs(), 1_234_568_014);
/// assert_eq!(timestamp.subsec_nanos(), 443_654_321);
/// ```
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct MonotonicTime {
/// The number of whole seconds in the future (if positive) or in the past
/// (if negative) of 1970-01-01 00:00:00 TAI.
///
/// Note that the automatic derivation of `PartialOrd` relies on
/// lexicographical comparison so the `secs` field must appear before
/// `nanos` in declaration order to be given higher priority.
secs: i64,
/// The sub-second number of nanoseconds in the future of the point in time
/// defined by `secs`.
nanos: u32,
}
impl MonotonicTime {
/// The epoch used by `MonotonicTime`, equal to 1970-01-01 00:00:00 TAI.
///
/// This epoch coincides with the PTP epoch defined in the IEEE-1588
/// standard.
pub const EPOCH: Self = Self { secs: 0, nanos: 0 };
/// The minimum possible `MonotonicTime` timestamp.
pub const MIN: Self = Self {
secs: i64::MIN,
nanos: 0,
};
/// The maximum possible `MonotonicTime` timestamp.
pub const MAX: Self = Self {
secs: i64::MAX,
nanos: NANOS_PER_SEC - 1,
};
/// Creates a timestamp directly from timestamp parts.
///
/// The number of seconds is relative to the [`EPOCH`](MonotonicTime::EPOCH)
/// (1970-01-01 00:00:00 TAI). It is negative for dates in the past of the
/// epoch.
///
/// The number of nanoseconds is always positive and always points towards
/// the future.
///
/// # Panics
///
/// This constructor will panic if the number of nanoseconds is greater than
/// or equal to 1 second.
///
/// # Example
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// // A timestamp set to 2009-02-13 23:31:30.987654321 TAI.
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
///
/// // A timestamp set 3.5s before the epoch.
/// let timestamp = MonotonicTime::new(-4, 500_000_000);
/// assert_eq!(timestamp, MonotonicTime::EPOCH - Duration::new(3, 500_000_000));
/// ```
pub const fn new(secs: i64, subsec_nanos: u32) -> Self {
assert!(
subsec_nanos < NANOS_PER_SEC,
"invalid number of nanoseconds"
);
Self {
secs,
nanos: subsec_nanos,
}
}
/// Creates a timestamp from the current system time.
///
/// The argument is the current difference between TAI and UTC time in
/// seconds (a.k.a. leap seconds). For reference, this offset has been +37s
/// since 2017-01-01, a value which is to remain valid until at least
/// 2024-06-29. See the [official IERS bulletin
/// C](https://datacenter.iers.org/data/latestVersion/bulletinC.txt) for
/// leap second announcements or the [IETF
/// table](https://www.ietf.org/timezones/data/leap-seconds.list) for
/// current and historical values.
///
/// # Errors
///
/// This method will return an error if the reported system time is in the
/// past of the Unix epoch or if the offset-adjusted timestamp is outside
/// the representable range.
///
/// # Examples
///
/// ```
/// use asynchronix::time::MonotonicTime;
///
/// // Compute the current TAI time assuming that the current difference
/// // between TAI and UTC time is 37s.
/// let timestamp = MonotonicTime::from_system(37).unwrap();
/// ```
pub fn from_system(leap_secs: i64) -> Result<Self, SystemTimeError> {
let utc_timestamp = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map_err(|_| SystemTimeError::InvalidSystemTime)?;
Self::new(leap_secs, 0)
.checked_add(utc_timestamp)
.ok_or(SystemTimeError::OutOfRange)
}
/// Returns the number of whole seconds relative to
/// [`EPOCH`](MonotonicTime::EPOCH) (1970-01-01 00:00:00 TAI).
///
/// Consistently with the interpretation of seconds and nanoseconds in the
/// [`new()`](Self::new) constructor, seconds are always rounded towards
/// `-∞`.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
/// assert_eq!(timestamp.as_secs(), 1_234_567_890);
///
/// let timestamp = MonotonicTime::EPOCH - Duration::new(3, 500_000_000);
/// assert_eq!(timestamp.as_secs(), -4);
/// ```
pub const fn as_secs(&self) -> i64 {
self.secs
}
/// Returns the number of seconds of the corresponding Unix time.
///
/// The argument is the difference between TAI and UTC time in seconds
/// (a.k.a. leap seconds) applicable at the date represented by the
/// timestamp. See the [official IERS bulletin
/// C](https://datacenter.iers.org/data/latestVersion/bulletinC.txt) for
/// leap second announcements or the [IETF
/// table](https://www.ietf.org/timezones/data/leap-seconds.list) for
/// current and historical values.
///
/// This method merely subtracts the offset from the value returned by
/// [`as_secs()`](Self::as_secs) and checks for potential overflow; its main
/// purpose is to prevent mistakes regarding the direction in which the
/// offset should be applied.
///
/// Note that the nanosecond part of a Unix timestamp can be simply
/// retrieved with [`subsec_nanos()`](Self::subsec_nanos) since UTC and TAI
/// differ by a whole number of seconds.
///
/// # Panics
///
/// This will panic if the offset-adjusted timestamp cannot be represented
/// as an `i64`.
///
/// # Examples
///
/// ```
/// use asynchronix::time::MonotonicTime;
///
/// // Set the date to 2000-01-01 00:00:00 TAI.
/// let timestamp = MonotonicTime::new(946_684_800, 0);
///
/// // Convert to a Unix timestamp, accounting for the +32s difference between
/// // TAI and UTC on 2000-01-01.
/// let unix_secs = timestamp.as_unix_secs(32);
/// ```
pub const fn as_unix_secs(&self, leap_secs: i64) -> i64 {
if let Some(secs) = self.secs.checked_sub(leap_secs) {
secs
} else {
panic!("timestamp outside representable range");
}
}
/// Returns the sub-second fractional part in nanoseconds.
///
/// Note that nanoseconds always point towards the future even if the date
/// is in the past of the [`EPOCH`](MonotonicTime::EPOCH).
///
/// # Examples
///
/// ```
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
/// assert_eq!(timestamp.subsec_nanos(), 987_654_321);
/// ```
pub const fn subsec_nanos(&self) -> u32 {
self.nanos
}
/// Adds a duration to a timestamp, checking for overflow.
///
/// Returns `None` if overflow occurred.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
/// assert!(timestamp.checked_add(Duration::new(10, 123_456_789)).is_some());
/// assert!(timestamp.checked_add(Duration::MAX).is_none());
/// ```
pub const fn checked_add(self, rhs: Duration) -> Option<Self> {
// A durations in seconds greater than `i64::MAX` is actually fine as
// long as the number of seconds does not effectively overflow which is
// why the below does not use `checked_add`. So technically the below
// addition may wrap around on the negative side due to the
// unsigned-to-signed cast of the duration, but this does not
// necessarily indicate an actual overflow. Actual overflow can be ruled
// out by verifying that the new timestamp is in the future of the old
// timestamp.
let mut secs = self.secs.wrapping_add(rhs.as_secs() as i64);
// Check for overflow.
if secs < self.secs {
return None;
}
let mut nanos = self.nanos + rhs.subsec_nanos();
if nanos >= NANOS_PER_SEC {
secs = if let Some(s) = secs.checked_add(1) {
s
} else {
return None;
};
nanos -= NANOS_PER_SEC;
}
Some(Self { secs, nanos })
}
/// Subtracts a duration from a timestamp, checking for overflow.
///
/// Returns `None` if overflow occurred.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
/// assert!(timestamp.checked_sub(Duration::new(10, 123_456_789)).is_some());
/// assert!(timestamp.checked_sub(Duration::MAX).is_none());
/// ```
pub const fn checked_sub(self, rhs: Duration) -> Option<Self> {
// A durations in seconds greater than `i64::MAX` is actually fine as
// long as the number of seconds does not effectively overflow, which is
// why the below does not use `checked_sub`. So technically the below
// subtraction may wrap around on the positive side due to the
// unsigned-to-signed cast of the duration, but this does not
// necessarily indicate an actual overflow. Actual overflow can be ruled
// out by verifying that the new timestamp is in the past of the old
// timestamp.
let mut secs = self.secs.wrapping_sub(rhs.as_secs() as i64);
// Check for overflow.
if secs > self.secs {
return None;
}
let nanos = if self.nanos < rhs.subsec_nanos() {
secs = if let Some(s) = secs.checked_sub(1) {
s
} else {
return None;
};
(self.nanos + NANOS_PER_SEC) - rhs.subsec_nanos()
} else {
self.nanos - rhs.subsec_nanos()
};
Some(Self { secs, nanos })
}
/// Subtracts a timestamp from another timestamp.
///
/// # Panics
///
/// Panics if the argument lies in the future of `self`.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321);
/// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789);
/// assert_eq!(
/// timestamp_later.duration_since(timestamp_earlier),
/// Duration::new(20, 135_802_468)
/// );
/// ```
pub fn duration_since(self, earlier: Self) -> Duration {
self.checked_duration_since(earlier)
.expect("attempt to substract a timestamp from an earlier timestamp")
}
/// Computes the duration elapsed between a timestamp and an earlier
/// timestamp, checking that the timestamps are appropriately ordered.
///
/// Returns `None` if the argument lies in the future of `self`.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321);
/// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789);
/// assert!(timestamp_later.checked_duration_since(timestamp_earlier).is_some());
/// assert!(timestamp_earlier.checked_duration_since(timestamp_later).is_none());
/// ```
pub const fn checked_duration_since(self, earlier: Self) -> Option<Duration> {
// If the subtraction of the nanosecond fractions would overflow, carry
// over one second to the nanoseconds.
let (secs, nanos) = if earlier.nanos > self.nanos {
if let Some(s) = self.secs.checked_sub(1) {
(s, self.nanos + NANOS_PER_SEC)
} else {
return None;
}
} else {
(self.secs, self.nanos)
};
// Make sure the computation of the duration will not overflow the
// seconds.
if secs < earlier.secs {
return None;
}
// This subtraction may wrap around if the difference between the two
// timestamps is more than `i64::MAX`, but even if it does the result
// will be correct once cast to an unsigned integer.
let delta_secs = secs.wrapping_sub(earlier.secs) as u64;
// The below subtraction is guaranteed to never overflow.
let delta_nanos = nanos - earlier.nanos;
Some(Duration::new(delta_secs, delta_nanos))
}
}
impl Add<Duration> for MonotonicTime {
type Output = Self;
/// Adds a duration to a timestamp.
///
/// # Panics
///
/// This function panics if the resulting timestamp cannot be
/// represented. See [`MonotonicTime::checked_add`] for a panic-free
/// version.
fn add(self, other: Duration) -> Self {
self.checked_add(other)
.expect("overflow when adding duration to timestamp")
}
}
impl Sub<Duration> for MonotonicTime {
type Output = Self;
/// Subtracts a duration from a timestamp.
///
/// # Panics
///
/// This function panics if the resulting timestamp cannot be
/// represented. See [`MonotonicTime::checked_sub`] for a panic-free
/// version.
fn sub(self, other: Duration) -> Self {
self.checked_sub(other)
.expect("overflow when subtracting duration from timestamp")
}
}
impl AddAssign<Duration> for MonotonicTime {
/// Increments the timestamp by a duration.
///
/// # Panics
///
/// This function panics if the resulting timestamp cannot be represented.
fn add_assign(&mut self, other: Duration) {
*self = *self + other;
}
}
impl SubAssign<Duration> for MonotonicTime {
/// Decrements the timestamp by a duration.
///
/// # Panics
///
/// This function panics if the resulting timestamp cannot be represented.
fn sub_assign(&mut self, other: Duration) {
*self = *self - other;
}
}
/// An error that may be returned when initializing a [`MonotonicTime`] from
/// system time.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum SystemTimeError {
/// The system time is in the past of the Unix epoch.
InvalidSystemTime,
/// The system time cannot be represented as a `MonotonicTime`.
OutOfRange,
}
impl fmt::Display for SystemTimeError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidSystemTime => write!(fmt, "invalid system time"),
Self::OutOfRange => write!(fmt, "timestamp outside representable range"),
}
}
}
impl Error for SystemTimeError {}
/// A tearable atomic adapter over a `MonotonicTime`.
///
/// This makes it possible to store the simulation time in a `SyncCell`, an
@ -490,8 +17,8 @@ pub(crate) struct TearableAtomicTime {
impl TearableAtomicTime {
pub(crate) fn new(time: MonotonicTime) -> Self {
Self {
secs: AtomicI64::new(time.secs),
nanos: AtomicU32::new(time.nanos),
secs: AtomicI64::new(time.as_secs()),
nanos: AtomicU32::new(time.subsec_nanos()),
}
}
}
@ -502,170 +29,17 @@ impl TearableAtomic for TearableAtomicTime {
fn tearable_load(&self) -> MonotonicTime {
// Load each field separately. This can never create invalid values of a
// `MonotonicTime`, even if the load is torn.
MonotonicTime {
secs: self.secs.load(Ordering::Relaxed),
nanos: self.nanos.load(Ordering::Relaxed),
}
MonotonicTime::new(
self.secs.load(Ordering::Relaxed),
self.nanos.load(Ordering::Relaxed),
)
.unwrap()
}
fn tearable_store(&self, value: MonotonicTime) {
// Write each field separately. This can never create invalid values of
// a `MonotonicTime`, even if the store is torn.
self.secs.store(value.secs, Ordering::Relaxed);
self.nanos.store(value.nanos, Ordering::Relaxed);
}
}
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use super::*;
#[test]
fn time_equality() {
let t0 = MonotonicTime::new(123, 123_456_789);
let t1 = MonotonicTime::new(123, 123_456_789);
let t2 = MonotonicTime::new(123, 123_456_790);
let t3 = MonotonicTime::new(124, 123_456_789);
assert_eq!(t0, t1);
assert_ne!(t0, t2);
assert_ne!(t0, t3);
}
#[test]
fn time_ordering() {
let t0 = MonotonicTime::new(0, 1);
let t1 = MonotonicTime::new(1, 0);
assert!(t1 > t0);
}
#[cfg(not(miri))]
#[test]
fn time_from_system_smoke() {
const START_OF_2022: i64 = 1640995200;
const START_OF_2050: i64 = 2524608000;
let now_secs = MonotonicTime::from_system(0).unwrap().as_secs();
assert!(now_secs > START_OF_2022);
assert!(now_secs < START_OF_2050);
}
#[test]
#[should_panic]
fn time_invalid() {
MonotonicTime::new(123, 1_000_000_000);
}
#[test]
fn time_duration_since_smoke() {
let t0 = MonotonicTime::new(100, 100_000_000);
let t1 = MonotonicTime::new(123, 223_456_789);
assert_eq!(
t1.checked_duration_since(t0),
Some(Duration::new(23, 123_456_789))
);
}
#[test]
fn time_duration_with_carry() {
let t0 = MonotonicTime::new(100, 200_000_000);
let t1 = MonotonicTime::new(101, 100_000_000);
assert_eq!(
t1.checked_duration_since(t0),
Some(Duration::new(0, 900_000_000))
);
}
#[test]
fn time_duration_since_extreme() {
const MIN_TIME: MonotonicTime = MonotonicTime::new(i64::MIN, 0);
const MAX_TIME: MonotonicTime = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1);
assert_eq!(
MAX_TIME.checked_duration_since(MIN_TIME),
Some(Duration::new(u64::MAX, NANOS_PER_SEC - 1))
);
}
#[test]
fn time_duration_since_invalid() {
let t0 = MonotonicTime::new(100, 0);
let t1 = MonotonicTime::new(99, 0);
assert_eq!(t1.checked_duration_since(t0), None);
}
#[test]
fn time_add_duration_smoke() {
let t = MonotonicTime::new(-100, 100_000_000);
let dt = Duration::new(400, 300_000_000);
assert_eq!(t + dt, MonotonicTime::new(300, 400_000_000));
}
#[test]
fn time_add_duration_with_carry() {
let t = MonotonicTime::new(-100, 900_000_000);
let dt1 = Duration::new(400, 100_000_000);
let dt2 = Duration::new(400, 300_000_000);
assert_eq!(t + dt1, MonotonicTime::new(301, 0));
assert_eq!(t + dt2, MonotonicTime::new(301, 200_000_000));
}
#[test]
fn time_add_duration_extreme() {
let t = MonotonicTime::new(i64::MIN, 0);
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
assert_eq!(t + dt, MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1));
}
#[test]
#[should_panic]
fn time_add_duration_overflow() {
let t = MonotonicTime::new(i64::MIN, 1);
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
let _ = t + dt;
}
#[test]
fn time_sub_duration_smoke() {
let t = MonotonicTime::new(100, 500_000_000);
let dt = Duration::new(400, 300_000_000);
assert_eq!(t - dt, MonotonicTime::new(-300, 200_000_000));
}
#[test]
fn time_sub_duration_with_carry() {
let t = MonotonicTime::new(100, 100_000_000);
let dt1 = Duration::new(400, 100_000_000);
let dt2 = Duration::new(400, 300_000_000);
assert_eq!(t - dt1, MonotonicTime::new(-300, 0));
assert_eq!(t - dt2, MonotonicTime::new(-301, 800_000_000));
}
#[test]
fn time_sub_duration_extreme() {
let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1);
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
assert_eq!(t - dt, MonotonicTime::new(i64::MIN, 0));
}
#[test]
#[should_panic]
fn time_sub_duration_overflow() {
let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 2);
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
let _ = t - dt;
self.secs.store(value.as_secs(), Ordering::Relaxed);
self.nanos.store(value.subsec_nanos(), Ordering::Relaxed);
}
}

View File

@ -1,27 +1,35 @@
//! Scheduling functions and types.
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::hash::{Hash, Hasher};
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
use std::time::Duration;
use std::{fmt, ptr};
use pin_project_lite::pin_project;
use recycle_box::{coerce_box, RecycleBox};
use crate::channel::{ChannelId, Sender};
use crate::channel::Sender;
use crate::executor::Executor;
use crate::model::{InputFn, Model};
use crate::model::Model;
use crate::ports::InputFn;
use crate::time::{MonotonicTime, TearableAtomicTime};
use crate::util::priority_queue::PriorityQueue;
use crate::util::sync_cell::SyncCellReader;
/// Shorthand for the scheduler queue type.
pub(crate) type SchedulerQueue = PriorityQueue<(MonotonicTime, ChannelId), Box<dyn ScheduledEvent>>;
// Why use both time and channel ID as the key? The short answer is that this
// ensures that events targeting the same model are sent in the order they were
// scheduled. More precisely, this ensures that events targeting the same model
// are ordered contiguously in the priority queue, which in turns allows the
// event loop to easily aggregate such events into single futures and thus
// control their relative order of execution.
pub(crate) type SchedulerQueue = PriorityQueue<(MonotonicTime, usize), Action>;
/// Trait abstracting over time-absolute and time-relative deadlines.
///
@ -81,7 +89,9 @@ impl Deadline for MonotonicTime {
///
/// ```
/// use std::time::Duration;
/// use asynchronix::model::{Model, Output}; use asynchronix::time::Scheduler;
/// use asynchronix::model::Model;
/// use asynchronix::ports::Output;
/// use asynchronix::time::Scheduler;
///
/// #[derive(Default)]
/// pub struct DelayedGreeter {
@ -141,8 +151,8 @@ impl<M: Model> Scheduler<M> {
///
/// fn is_third_millenium<M: Model>(scheduler: &Scheduler<M>) -> bool {
/// let time = scheduler.time();
///
/// time >= MonotonicTime::new(978307200, 0) && time < MonotonicTime::new(32535216000, 0)
/// time >= MonotonicTime::new(978307200, 0).unwrap()
/// && time < MonotonicTime::new(32535216000, 0).unwrap()
/// }
/// ```
pub fn time(&self) -> MonotonicTime {
@ -203,7 +213,8 @@ impl<M: Model> Scheduler<M> {
Ok(())
}
/// Schedules a cancellable event at a future time and returns an event key.
/// Schedules a cancellable event at a future time and returns an action
/// key.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time.
@ -212,12 +223,12 @@ impl<M: Model> Scheduler<M> {
///
/// ```
/// use asynchronix::model::Model;
/// use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
/// use asynchronix::time::{ActionKey, MonotonicTime, Scheduler};
///
/// // An alarm clock that can be cancelled.
/// #[derive(Default)]
/// pub struct CancellableAlarmClock {
/// event_key: Option<EventKey>,
/// event_key: Option<ActionKey>,
/// }
///
/// impl CancellableAlarmClock {
@ -248,7 +259,7 @@ impl<M: Model> Scheduler<M> {
deadline: impl Deadline,
func: F,
arg: T,
) -> Result<EventKey, SchedulingError>
) -> Result<ActionKey, SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
@ -337,7 +348,7 @@ impl<M: Model> Scheduler<M> {
}
/// Schedules a cancellable, periodically recurring event at a future time
/// and returns an event key.
/// and returns an action key.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time or if the specified period is null.
@ -348,13 +359,13 @@ impl<M: Model> Scheduler<M> {
/// use std::time::Duration;
///
/// use asynchronix::model::Model;
/// use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
/// use asynchronix::time::{ActionKey, MonotonicTime, Scheduler};
///
/// // An alarm clock beeping at 1Hz that can be cancelled before it sets off, or
/// // stopped after it sets off.
/// #[derive(Default)]
/// pub struct CancellableBeepingAlarmClock {
/// event_key: Option<EventKey>,
/// event_key: Option<ActionKey>,
/// }
///
/// impl CancellableBeepingAlarmClock {
@ -391,7 +402,7 @@ impl<M: Model> Scheduler<M> {
period: Duration,
func: F,
arg: T,
) -> Result<EventKey, SchedulingError>
) -> Result<ActionKey, SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
@ -425,34 +436,55 @@ impl<M: Model> fmt::Debug for Scheduler<M> {
}
}
/// Handle to a scheduled event.
/// Handle to a scheduled action.
///
/// An `EventKey` can be used to cancel a future event.
/// An `ActionKey` can be used to cancel a scheduled action.
#[derive(Clone, Debug)]
#[must_use = "prefer unkeyed scheduling methods if the event is never cancelled"]
pub struct EventKey {
#[must_use = "prefer unkeyed scheduling methods if the action is never cancelled"]
pub struct ActionKey {
is_cancelled: Arc<AtomicBool>,
}
impl EventKey {
/// Creates a key for a pending event.
impl ActionKey {
/// Creates a key for a pending action.
pub(crate) fn new() -> Self {
Self {
is_cancelled: Arc::new(AtomicBool::new(false)),
}
}
/// Checks whether the event was cancelled.
/// Checks whether the action was cancelled.
pub(crate) fn is_cancelled(&self) -> bool {
self.is_cancelled.load(Ordering::Relaxed)
}
/// Cancels the associated event.
/// Cancels the associated action.
pub fn cancel(self) {
self.is_cancelled.store(true, Ordering::Relaxed);
}
}
impl PartialEq for ActionKey {
/// Implements equality by considering clones to be equivalent, rather than
/// keys with the same `is_cancelled` value.
fn eq(&self, other: &Self) -> bool {
ptr::addr_eq(&*self.is_cancelled, &*other.is_cancelled)
}
}
impl Eq for ActionKey {}
impl Hash for ActionKey {
/// Implements `Hash`` by considering clones to be equivalent, rather than
/// keys with the same `is_cancelled` value.
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
ptr::hash(&*self.is_cancelled, state)
}
}
/// Error returned when the scheduled time or the repetition period are invalid.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum SchedulingError {
@ -477,9 +509,73 @@ impl fmt::Display for SchedulingError {
impl Error for SchedulingError {}
/// A possibly periodic, possibly cancellable action that can be scheduled or
/// processed immediately.
pub struct Action {
inner: Box<dyn ActionInner>,
}
impl Action {
/// Creates a new `Action` from an `ActionInner`.
pub(crate) fn new<S: ActionInner>(s: S) -> Self {
Self { inner: Box::new(s) }
}
/// Reports whether the action was cancelled.
pub(crate) fn is_cancelled(&self) -> bool {
self.inner.is_cancelled()
}
/// If this is a periodic action, returns a boxed clone of this action and
/// its repetition period; otherwise returns `None`.
pub(crate) fn next(&self) -> Option<(Action, Duration)> {
self.inner
.next()
.map(|(inner, period)| (Self { inner }, period))
}
/// Returns a boxed future that performs the action.
pub(crate) fn into_future(self) -> Pin<Box<dyn Future<Output = ()> + Send>> {
self.inner.into_future()
}
/// Spawns the future that performs the action onto the provided executor.
///
/// This method is typically more efficient that spawning the boxed future
/// from `into_future` since it can directly spawn the unboxed future.
pub(crate) fn spawn_and_forget(self, executor: &Executor) {
self.inner.spawn_and_forget(executor)
}
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SchedulableEvent").finish_non_exhaustive()
}
}
/// Trait abstracting over the inner type of an action.
pub(crate) trait ActionInner: Send + 'static {
/// Reports whether the action was cancelled.
fn is_cancelled(&self) -> bool;
/// If this is a periodic action, returns a boxed clone of this action and
/// its repetition period; otherwise returns `None`.
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)>;
/// Returns a boxed future that performs the action.
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>>;
/// Spawns the future that performs the action onto the provided executor.
///
/// This method is typically more efficient that spawning the boxed future
/// from `into_future` since it can directly spawn the unboxed future.
fn spawn_and_forget(self: Box<Self>, executor: &Executor);
}
/// Schedules an event at a future time.
///
/// This method does not check whether the specified time lies in the future
/// This function does not check whether the specified time lies in the future
/// of the current simulation time.
pub(crate) fn schedule_event_at_unchecked<M, F, T, S>(
time: MonotonicTime,
@ -495,15 +591,15 @@ pub(crate) fn schedule_event_at_unchecked<M, F, T, S>(
{
let channel_id = sender.channel_id();
let event_dispatcher = Box::new(new_event_dispatcher(func, arg, sender));
let action = Action::new(OnceAction::new(process_event(func, arg, sender)));
let mut scheduler_queue = scheduler_queue.lock().unwrap();
scheduler_queue.insert((time, channel_id), event_dispatcher);
scheduler_queue.insert((time, channel_id), action);
}
/// Schedules an event at a future time, returning an event key.
/// Schedules an event at a future time, returning an action key.
///
/// This method does not check whether the specified time lies in the future
/// This function does not check whether the specified time lies in the future
/// of the current simulation time.
pub(crate) fn schedule_keyed_event_at_unchecked<M, F, T, S>(
time: MonotonicTime,
@ -511,31 +607,29 @@ pub(crate) fn schedule_keyed_event_at_unchecked<M, F, T, S>(
arg: T,
sender: Sender<M>,
scheduler_queue: &Mutex<SchedulerQueue>,
) -> EventKey
) -> ActionKey
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let event_key = EventKey::new();
let event_key = ActionKey::new();
let channel_id = sender.channel_id();
let event_dispatcher = Box::new(KeyedEventDispatcher::new(
let action = Action::new(KeyedOnceAction::new(
|ek| send_keyed_event(ek, func, arg, sender),
event_key.clone(),
func,
arg,
sender,
));
let mut scheduler_queue = scheduler_queue.lock().unwrap();
scheduler_queue.insert((time, channel_id), event_dispatcher);
scheduler_queue.insert((time, channel_id), action);
event_key
}
/// Schedules a periodic event at a future time.
///
/// This method does not check whether the specified time lies in the future
/// This function does not check whether the specified time lies in the future
/// of the current simulation time.
pub(crate) fn schedule_periodic_event_at_unchecked<M, F, T, S>(
time: MonotonicTime,
@ -552,15 +646,18 @@ pub(crate) fn schedule_periodic_event_at_unchecked<M, F, T, S>(
{
let channel_id = sender.channel_id();
let event_dispatcher = Box::new(PeriodicEventDispatcher::new(func, arg, sender, period));
let action = Action::new(PeriodicAction::new(
|| process_event(func, arg, sender),
period,
));
let mut scheduler_queue = scheduler_queue.lock().unwrap();
scheduler_queue.insert((time, channel_id), event_dispatcher);
scheduler_queue.insert((time, channel_id), action);
}
/// Schedules an event at a future time, returning an event key.
/// Schedules an event at a future time, returning an action key.
///
/// This method does not check whether the specified time lies in the future
/// This function does not check whether the specified time lies in the future
/// of the current simulation time.
pub(crate) fn schedule_periodic_keyed_event_at_unchecked<M, F, T, S>(
time: MonotonicTime,
@ -569,84 +666,52 @@ pub(crate) fn schedule_periodic_keyed_event_at_unchecked<M, F, T, S>(
arg: T,
sender: Sender<M>,
scheduler_queue: &Mutex<SchedulerQueue>,
) -> EventKey
) -> ActionKey
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
let event_key = EventKey::new();
let event_key = ActionKey::new();
let channel_id = sender.channel_id();
let event_dispatcher = Box::new(PeriodicKeyedEventDispatcher::new(
event_key.clone(),
func,
arg,
sender,
let action = Action::new(KeyedPeriodicAction::new(
|ek| send_keyed_event(ek, func, arg, sender),
period,
event_key.clone(),
));
let mut scheduler_queue = scheduler_queue.lock().unwrap();
scheduler_queue.insert((time, channel_id), event_dispatcher);
scheduler_queue.insert((time, channel_id), action);
event_key
}
/// Trait for objects that can be converted to a future dispatching a scheduled
/// event.
pub(crate) trait ScheduledEvent: Send {
/// Reports whether the associated event was cancelled.
fn is_cancelled(&self) -> bool;
/// Returns a boxed clone of this event and the repetition period if this is
/// a periodic even, otherwise returns `None`.
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)>;
/// Returns a boxed future dispatching the associated event.
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>>;
/// Spawns the future that dispatches the associated event onto the provided
/// executor.
///
/// This method is typically more efficient that spawning the boxed future
/// from `into_future` since it can directly spawn the unboxed future.
fn spawn_and_forget(self: Box<Self>, executor: &Executor);
}
pin_project! {
/// Object that can be converted to a future dispatching a non-cancellable
/// event.
/// An object that can be converted to a future performing a single
/// non-cancellable action.
///
/// Note that this particular event dispatcher is in fact already a future:
/// since the future cannot be cancelled and the dispatcher does not need to
/// be cloned, there is no need to defer the construction of the future.
/// This makes `into_future` a trivial cast, which saves a boxing operation.
pub(crate) struct EventDispatcher<F> {
/// Note that this particular action is in fact already a future: since the
/// future cannot be cancelled and the action does not need to be cloned,
/// there is no need to defer the construction of the future. This makes
/// `into_future` a trivial cast, which saves a boxing operation.
pub(crate) struct OnceAction<F> {
#[pin]
fut: F,
}
}
/// Constructs a new `EventDispatcher`.
///
/// Due to some limitations of type inference or of my understanding of it, the
/// constructor for this event dispatchers is a freestanding function.
fn new_event_dispatcher<M, F, T, S>(
func: F,
arg: T,
sender: Sender<M>,
) -> EventDispatcher<impl Future<Output = ()>>
impl<F> OnceAction<F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
F: Future<Output = ()> + Send + 'static,
{
let fut = dispatch_event(func, arg, sender);
EventDispatcher { fut }
/// Constructs a new `OnceAction`.
pub(crate) fn new(fut: F) -> Self {
OnceAction { fut }
}
}
impl<F> Future for EventDispatcher<F>
impl<F> Future for OnceAction<F>
where
F: Future,
{
@ -658,14 +723,14 @@ where
}
}
impl<F> ScheduledEvent for EventDispatcher<F>
impl<F> ActionInner for OnceAction<F>
where
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
false
}
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
None
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
@ -677,230 +742,155 @@ where
}
}
/// Object that can be converted to a future dispatching a non-cancellable periodic
/// event.
pub(crate) struct PeriodicEventDispatcher<M, F, T, S>
/// An object that can be converted to a future performing a non-cancellable,
/// periodic action.
pub(crate) struct PeriodicAction<G, F>
where
M: Model,
G: (FnOnce() -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
func: F,
arg: T,
sender: Sender<M>,
/// A clonable generator for the associated future.
gen: G,
/// The action repetition period.
period: Duration,
_input_kind: PhantomData<S>,
}
impl<M, F, T, S> PeriodicEventDispatcher<M, F, T, S>
impl<G, F> PeriodicAction<G, F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
G: (FnOnce() -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// Constructs a new `PeriodicEventDispatcher`.
fn new(func: F, arg: T, sender: Sender<M>, period: Duration) -> Self {
Self {
func,
arg,
sender,
period,
_input_kind: PhantomData,
}
/// Constructs a new `PeriodicAction`.
pub(crate) fn new(gen: G, period: Duration) -> Self {
Self { gen, period }
}
}
impl<M, F, T, S> ScheduledEvent for PeriodicEventDispatcher<M, F, T, S>
impl<G, F> ActionInner for PeriodicAction<G, F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
G: (FnOnce() -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
false
}
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
let event = Box::new(Self::new(
self.func.clone(),
self.arg.clone(),
self.sender.clone(),
self.period,
));
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
let event = Box::new(Self::new(self.gen.clone(), self.period));
Some((event, self.period))
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
let Self {
func, arg, sender, ..
} = *self;
Box::pin(dispatch_event(func, arg, sender))
Box::pin((self.gen)())
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
let Self {
func, arg, sender, ..
} = *self;
let fut = dispatch_event(func, arg, sender);
executor.spawn_and_forget(fut);
executor.spawn_and_forget((self.gen)());
}
}
/// Object that can be converted to a future dispatching a cancellable event.
pub(crate) struct KeyedEventDispatcher<M, F, T, S>
/// An object that can be converted to a future performing a single, cancellable
/// action.
pub(crate) struct KeyedOnceAction<G, F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
G: (FnOnce(ActionKey) -> F) + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
event_key: EventKey,
func: F,
arg: T,
sender: Sender<M>,
_input_kind: PhantomData<S>,
/// A generator for the associated future.
gen: G,
/// The event cancellation key.
event_key: ActionKey,
}
impl<M, F, T, S> KeyedEventDispatcher<M, F, T, S>
impl<G, F> KeyedOnceAction<G, F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
G: (FnOnce(ActionKey) -> F) + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// Constructs a new `KeyedEventDispatcher`.
fn new(event_key: EventKey, func: F, arg: T, sender: Sender<M>) -> Self {
Self {
event_key,
func,
arg,
sender,
_input_kind: PhantomData,
}
/// Constructs a new `KeyedOnceAction`.
pub(crate) fn new(gen: G, event_key: ActionKey) -> Self {
Self { gen, event_key }
}
}
impl<M, F, T, S> ScheduledEvent for KeyedEventDispatcher<M, F, T, S>
impl<G, F> ActionInner for KeyedOnceAction<G, F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
G: (FnOnce(ActionKey) -> F) + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
self.event_key.is_cancelled()
}
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
None
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
let Self {
event_key,
func,
arg,
sender,
..
} = *self;
Box::pin(dispatch_keyed_event(event_key, func, arg, sender))
Box::pin((self.gen)(self.event_key))
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
let Self {
event_key,
func,
arg,
sender,
..
} = *self;
let fut = dispatch_keyed_event(event_key, func, arg, sender);
executor.spawn_and_forget(fut);
executor.spawn_and_forget((self.gen)(self.event_key));
}
}
/// Object that can be converted to a future dispatching a cancellable event.
pub(crate) struct PeriodicKeyedEventDispatcher<M, F, T, S>
/// An object that can be converted to a future performing a periodic,
/// cancellable action.
pub(crate) struct KeyedPeriodicAction<G, F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
event_key: EventKey,
func: F,
arg: T,
sender: Sender<M>,
/// A clonable generator for associated future.
gen: G,
/// The repetition period.
period: Duration,
_input_kind: PhantomData<S>,
/// The event cancellation key.
event_key: ActionKey,
}
impl<M, F, T, S> PeriodicKeyedEventDispatcher<M, F, T, S>
impl<G, F> KeyedPeriodicAction<G, F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// Constructs a new `KeyedEventDispatcher`.
fn new(event_key: EventKey, func: F, arg: T, sender: Sender<M>, period: Duration) -> Self {
/// Constructs a new `KeyedPeriodicAction`.
pub(crate) fn new(gen: G, period: Duration, event_key: ActionKey) -> Self {
Self {
event_key,
func,
arg,
sender,
gen,
period,
_input_kind: PhantomData,
event_key,
}
}
}
impl<M, F, T, S> ScheduledEvent for PeriodicKeyedEventDispatcher<M, F, T, S>
impl<G, F> ActionInner for KeyedPeriodicAction<G, F>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
self.event_key.is_cancelled()
}
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
let event = Box::new(Self::new(
self.event_key.clone(),
self.func.clone(),
self.arg.clone(),
self.sender.clone(),
self.gen.clone(),
self.period,
self.event_key.clone(),
));
Some((event, self.period))
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
let Self {
event_key,
func,
arg,
sender,
..
} = *self;
Box::pin(dispatch_keyed_event(event_key, func, arg, sender))
Box::pin((self.gen)(self.event_key))
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
let Self {
event_key,
func,
arg,
sender,
..
} = *self;
let fut = dispatch_keyed_event(event_key, func, arg, sender);
executor.spawn_and_forget(fut);
executor.spawn_and_forget((self.gen)(self.event_key));
}
}
/// Asynchronously dispatch a regular, non-cancellable event.
async fn dispatch_event<M, F, T, S>(func: F, arg: T, sender: Sender<M>)
/// Asynchronously sends a non-cancellable event to a model input.
pub(crate) async fn process_event<M, F, T, S>(func: F, arg: T, sender: Sender<M>)
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
T: Send + 'static,
{
let _ = sender
.send(
@ -916,9 +906,13 @@ where
.await;
}
/// Asynchronously dispatch a cancellable event.
async fn dispatch_keyed_event<M, F, T, S>(event_key: EventKey, func: F, arg: T, sender: Sender<M>)
where
/// Asynchronously sends a cancellable event to a model input.
pub(crate) async fn send_keyed_event<M, F, T, S>(
event_key: ActionKey,
func: F,
arg: T,
sender: Sender<M>,
) where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,

View File

@ -1,7 +1,8 @@
pub(crate) mod bit;
pub(crate) mod futures;
pub(crate) mod indexed_priority_queue;
pub(crate) mod priority_queue;
pub(crate) mod rng;
pub(crate) mod seq_futures;
pub(crate) mod slot;
pub(crate) mod spsc_queue;
pub(crate) mod sync_cell;
pub(crate) mod task_set;

View File

@ -1,7 +1,5 @@
//! Bit manipulation and algorithms.
#![allow(unused)]
/// Find the position of the `Nᵗʰ` set bit starting the search from the least
/// significant bit.
///

View File

@ -0,0 +1,696 @@
//! Associative priority queue.
#![allow(unused)]
use std::mem;
/// An associative container optimized for extraction of the value with the
/// lowest key and deletion of arbitrary key-value pairs.
///
/// This implementation has the same theoretical complexity for insert and pull
/// operations as a conventional array-based binary heap but does differ from
/// the latter in some important aspects:
///
/// - elements can be deleted in *O*(log(*N*)) time rather than *O*(*N*) time
/// using a unique index returned at insertion time.
/// - same-key elements are guaranteed to be pulled in FIFO order,
///
/// Under the hood, the priority queue relies on a binary heap cross-indexed
/// with values stored in a slab allocator. Each item of the binary heap
/// contains an index pointing to the associated slab-allocated node, as well as
/// the user-provided key. Each slab node contains the value associated to the
/// key and a back-pointing index to the binary heap. The heap items also
/// contain a unique epoch which allows same-key nodes to be sorted by insertion
/// order. The epoch is used as well to build unique indices that enable
/// efficient deletion of arbitrary key-value pairs.
///
/// The slab-based design is what makes *O*(log(*N*)) deletion possible, but it
/// does come with some trade-offs:
///
/// - its memory footprint is higher because it needs 2 extra pointer-sized
/// indices for each element to cross-index the heap and the slab,
/// - its computational footprint is higher because of the extra cost associated
/// with random slab access; that being said, array-based binary heaps are not
/// extremely cache-friendly to start with so unless the slab becomes very
/// fragmented, this is not expected to introduce more than a reasonable
/// constant-factor penalty compared to a conventional binary heap.
///
/// The computational penalty is partially offset by the fact that the value
/// never needs to be moved from the moment it is inserted until it is pulled.
///
/// Note that the `Copy` bound on they keys could be lifted but this would make
/// the implementation slightly less efficient unless `unsafe` is used.
pub(crate) struct IndexedPriorityQueue<K, V>
where
K: Copy + Clone + Ord,
{
heap: Vec<Item<K>>,
slab: Vec<Node<V>>,
first_free_node: Option<usize>,
next_epoch: u64,
}
impl<K: Copy + Ord, V> IndexedPriorityQueue<K, V> {
/// Creates an empty `PriorityQueue`.
pub(crate) fn new() -> Self {
Self {
heap: Vec::new(),
slab: Vec::new(),
first_free_node: None,
next_epoch: 0,
}
}
/// Creates an empty `PriorityQueue` with at least the specified capacity.
pub(crate) fn with_capacity(capacity: usize) -> Self {
Self {
heap: Vec::with_capacity(capacity),
slab: Vec::with_capacity(capacity),
first_free_node: None,
next_epoch: 0,
}
}
/// Returns the number of key-value pairs in the priority queue.
pub(crate) fn len(&self) -> usize {
self.heap.len()
}
/// Inserts a new key-value pair and returns a unique insertion key.
///
/// This operation has *O*(log(*N*)) amortized worse-case theoretical
/// complexity and *O*(1) amortized theoretical complexity for a
/// sufficiently random heap.
pub(crate) fn insert(&mut self, key: K, value: V) -> InsertKey {
// Build a unique key from the user-provided key and a unique epoch.
let epoch = self.next_epoch;
assert_ne!(epoch, u64::MAX);
self.next_epoch += 1;
let unique_key = UniqueKey { key, epoch };
// Add a new node to the slab, either by re-using a free node or by
// appending a new one.
let slab_idx = match self.first_free_node {
Some(idx) => {
self.first_free_node = self.slab[idx].unwrap_next_free_node();
self.slab[idx] = Node::HeapNode(HeapNode {
value,
heap_idx: 0, // temporary value overridden in `sift_up`
});
idx
}
None => {
let idx = self.slab.len();
self.slab.push(Node::HeapNode(HeapNode {
value,
heap_idx: 0, // temporary value overridden in `sift_up`
}));
idx
}
};
// Add a new node at the bottom of the heap.
let heap_idx = self.heap.len();
self.heap.push(Item {
key: unique_key, // temporary value overridden in `sift_up`
slab_idx: 0, // temporary value overridden in `sift_up`
});
// Sift up the new node.
self.sift_up(
Item {
key: unique_key,
slab_idx,
},
heap_idx,
);
InsertKey { slab_idx, epoch }
}
/// Pulls the value with the lowest key.
///
/// If there are several equal lowest keys, the value which was inserted
/// first is returned.
///
/// This operation has *O*(log(N)) non-amortized theoretical complexity.
pub(crate) fn pull(&mut self) -> Option<(K, V)> {
let item = self.heap.first()?;
let top_slab_idx = item.slab_idx;
let key = item.key.key;
// Free the top node, extracting its value.
let value = mem::replace(
&mut self.slab[top_slab_idx],
Node::FreeNode(FreeNode {
next: self.first_free_node,
}),
)
.unwrap_value();
self.first_free_node = Some(top_slab_idx);
// Sift the last node at the bottom of the heap from the top of the heap.
let last_item = self.heap.pop().unwrap();
if last_item.slab_idx != top_slab_idx {
self.sift_down(last_item, 0);
}
Some((key, value))
}
/// Peeks a reference to the key-value pair with the lowest key, leaving it
/// in the queue.
///
/// If there are several equal lowest keys, a reference to the key-value
/// pair which was inserted first is returned.
///
/// This operation has *O*(1) non-amortized theoretical complexity.
pub(crate) fn peek(&self) -> Option<(&K, &V)> {
let item = self.heap.first()?;
let top_slab_idx = item.slab_idx;
let key = &item.key.key;
let value = self.slab[top_slab_idx].unwrap_value_ref();
Some((key, value))
}
/// Peeks a reference to the lowest key, leaving it in the queue.
///
/// If there are several equal lowest keys, a reference to the key which was
/// inserted first is returned.
///
/// This operation has *O*(1) non-amortized theoretical complexity.
pub(crate) fn peek_key(&self) -> Option<&K> {
let item = self.heap.first()?;
Some(&item.key.key)
}
/// Removes the key-value pair associated to the provided insertion key if
/// it is still in the queue and returns it.
///
/// Using an insertion key returned from another `PriorityQueue` is a logic
/// error and could result in the deletion of an arbitrary key-value pair.
///
/// This operation has guaranteed *O*(log(*N*)) theoretical complexity.
pub(crate) fn extract(&mut self, insert_key: InsertKey) -> Option<(K, V)> {
let slab_idx = insert_key.slab_idx;
// Check that (i) there is a node at this index, (ii) this node is in
// the heap and (iii) this node has the correct epoch.
match self.slab.get(slab_idx) {
None | Some(Node::FreeNode(_)) => return None,
Some(Node::HeapNode(node)) => {
if self.heap[node.heap_idx].key.epoch != insert_key.epoch {
return None;
}
}
};
// Free the node, extracting its content.
let node = mem::replace(
&mut self.slab[slab_idx],
Node::FreeNode(FreeNode {
next: self.first_free_node,
}),
)
.unwrap_heap_node();
self.first_free_node = Some(slab_idx);
// Save the key before the node is removed from the heap.
let key = self.heap[node.heap_idx].key.key;
// If the last item of the heap is not the one to be deleted, sift it up
// or down as appropriate starting from the vacant spot.
let last_item = self.heap.pop().unwrap();
if let Some(item) = self.heap.get(node.heap_idx) {
if last_item.key < item.key {
self.sift_up(last_item, node.heap_idx);
} else {
self.sift_down(last_item, node.heap_idx);
}
}
Some((key, node.value))
}
/// Take a heap item and, starting at `heap_idx`, move it up the heap while
/// a parent has a larger key.
#[inline]
fn sift_up(&mut self, item: Item<K>, heap_idx: usize) {
let mut child_heap_idx = heap_idx;
let key = &item.key;
while child_heap_idx != 0 {
let parent_heap_idx = (child_heap_idx - 1) / 2;
// Stop when the key is larger or equal to the parent's.
if key >= &self.heap[parent_heap_idx].key {
break;
}
// Move the parent down one level.
self.heap[child_heap_idx] = self.heap[parent_heap_idx];
let parent_slab_idx = self.heap[parent_heap_idx].slab_idx;
*self.slab[parent_slab_idx].unwrap_heap_index_mut() = child_heap_idx;
// Stop when the key is larger or equal to the parent's.
if key >= &self.heap[parent_heap_idx].key {
break;
}
// Make the former parent the new child.
child_heap_idx = parent_heap_idx;
}
// Move the original item to the current child.
self.heap[child_heap_idx] = item;
*self.slab[item.slab_idx].unwrap_heap_index_mut() = child_heap_idx;
}
/// Take a heap item and, starting at `heap_idx`, move it down the heap
/// while a child has a smaller key.
#[inline]
fn sift_down(&mut self, item: Item<K>, heap_idx: usize) {
let mut parent_heap_idx = heap_idx;
let mut child_heap_idx = 2 * parent_heap_idx + 1;
let key = &item.key;
while child_heap_idx < self.heap.len() {
// If the sibling exists and has a smaller key, make it the
// candidate for swapping.
if let Some(other_child) = self.heap.get(child_heap_idx + 1) {
child_heap_idx += (self.heap[child_heap_idx].key > other_child.key) as usize;
}
// Stop when the key is smaller or equal to the child with the smallest key.
if key <= &self.heap[child_heap_idx].key {
break;
}
// Move the child up one level.
self.heap[parent_heap_idx] = self.heap[child_heap_idx];
let child_slab_idx = self.heap[child_heap_idx].slab_idx;
*self.slab[child_slab_idx].unwrap_heap_index_mut() = parent_heap_idx;
// Make the child the new parent.
parent_heap_idx = child_heap_idx;
child_heap_idx = 2 * parent_heap_idx + 1;
}
// Move the original item to the current parent.
self.heap[parent_heap_idx] = item;
*self.slab[item.slab_idx].unwrap_heap_index_mut() = parent_heap_idx;
}
}
impl<K: Copy + Ord, V> Default for IndexedPriorityQueue<K, V> {
fn default() -> Self {
Self::new()
}
}
/// Data related to a single key-value pair stored in the heap.
#[derive(Copy, Clone)]
struct Item<K: Copy> {
// A unique key by which the heap is sorted.
key: UniqueKey<K>,
// An index pointing to the corresponding node in the slab.
slab_idx: usize,
}
/// Data related to a single key-value pair stored in the slab.
enum Node<V> {
FreeNode(FreeNode),
HeapNode(HeapNode<V>),
}
impl<V> Node<V> {
/// Unwraps the `FreeNode::next` field.
fn unwrap_next_free_node(&self) -> Option<usize> {
match self {
Self::FreeNode(n) => n.next,
_ => panic!("the node was expected to be a free node"),
}
}
/// Unwraps a `HeapNode`.
fn unwrap_heap_node(self) -> HeapNode<V> {
match self {
Self::HeapNode(n) => n,
_ => panic!("the node was expected to be a heap node"),
}
}
/// Unwraps the `HeapNode::value` field.
fn unwrap_value(self) -> V {
match self {
Self::HeapNode(n) => n.value,
_ => panic!("the node was expected to be a heap node"),
}
}
/// Unwraps the `HeapNode::value` field.
fn unwrap_value_ref(&self) -> &V {
match self {
Self::HeapNode(n) => &n.value,
_ => panic!("the node was expected to be a heap node"),
}
}
/// Unwraps a mutable reference to the `HeapNode::heap_idx` field.
fn unwrap_heap_index_mut(&mut self) -> &mut usize {
match self {
Self::HeapNode(n) => &mut n.heap_idx,
_ => panic!("the node was expected to be a heap node"),
}
}
}
/// A node that is no longer in the binary heap.
struct FreeNode {
// An index pointing to the next free node, if any.
next: Option<usize>,
}
/// A node currently in the binary heap.
struct HeapNode<V> {
// The value associated to this node.
value: V,
// Index of the node in the heap.
heap_idx: usize,
}
/// A unique insertion key that can be used for key-value pair deletion.
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
pub(crate) struct InsertKey {
// An index pointing to a node in the slab.
slab_idx: usize,
// The epoch when the node was inserted.
epoch: u64,
}
impl InsertKey {
// Creates an `InsertKey` directly from its raw components.
//
// This method is safe: the worse than can happen is for the key to be
// invalid, in which case it will simply be rejected by
// `IndexedPriorityQueue::extract`.
pub(crate) fn from_raw_parts(slab_idx: usize, epoch: u64) -> Self {
Self { slab_idx, epoch }
}
// Decomposes an `InsertKey` into its raw components.
pub(crate) fn into_raw_parts(self) -> (usize, u64) {
(self.slab_idx, self.epoch)
}
}
/// A unique key made of the user-provided key complemented by a unique epoch.
///
/// Implementation note: `UniqueKey` automatically derives `PartialOrd`, which
/// implies that lexicographic order between `key` and `epoch` must be preserved
/// to make sure that `key` has a higher sorting priority than `epoch`.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
struct UniqueKey<K: Copy + Clone> {
/// The user-provided key.
key: K,
/// A unique epoch that indicates the insertion date.
epoch: u64,
}
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use std::fmt::Debug;
use super::*;
enum Op<K, V> {
Insert(K, V),
InsertAndMark(K, V),
Pull(Option<(K, V)>),
ExtractMarked(Option<(K, V)>),
}
fn check<K: Copy + Clone + Ord + Debug, V: Eq + Debug>(
operations: impl Iterator<Item = Op<K, V>>,
) {
let mut queue = IndexedPriorityQueue::new();
let mut marked = None;
for op in operations {
match op {
Op::Insert(key, value) => {
queue.insert(key, value);
}
Op::InsertAndMark(key, value) => {
marked = Some(queue.insert(key, value));
}
Op::Pull(kv) => {
assert_eq!(queue.pull(), kv);
}
Op::ExtractMarked(kv) => {
assert_eq!(
queue.extract(marked.take().expect("no item was marked for deletion")),
kv
)
}
}
}
}
#[test]
fn indexed_priority_queue_smoke() {
let operations = [
Op::Insert(5, 'a'),
Op::Insert(2, 'b'),
Op::Insert(3, 'c'),
Op::Insert(4, 'd'),
Op::Insert(9, 'e'),
Op::Insert(1, 'f'),
Op::Insert(8, 'g'),
Op::Insert(0, 'h'),
Op::Insert(7, 'i'),
Op::Insert(6, 'j'),
Op::Pull(Some((0, 'h'))),
Op::Pull(Some((1, 'f'))),
Op::Pull(Some((2, 'b'))),
Op::Pull(Some((3, 'c'))),
Op::Pull(Some((4, 'd'))),
Op::Pull(Some((5, 'a'))),
Op::Pull(Some((6, 'j'))),
Op::Pull(Some((7, 'i'))),
Op::Pull(Some((8, 'g'))),
Op::Pull(Some((9, 'e'))),
];
check(operations.into_iter());
}
#[test]
fn indexed_priority_queue_interleaved() {
let operations = [
Op::Insert(2, 'a'),
Op::Insert(7, 'b'),
Op::Insert(5, 'c'),
Op::Pull(Some((2, 'a'))),
Op::Insert(4, 'd'),
Op::Pull(Some((4, 'd'))),
Op::Insert(8, 'e'),
Op::Insert(2, 'f'),
Op::Pull(Some((2, 'f'))),
Op::Pull(Some((5, 'c'))),
Op::Pull(Some((7, 'b'))),
Op::Insert(5, 'g'),
Op::Insert(3, 'h'),
Op::Pull(Some((3, 'h'))),
Op::Pull(Some((5, 'g'))),
Op::Pull(Some((8, 'e'))),
Op::Pull(None),
];
check(operations.into_iter());
}
#[test]
fn indexed_priority_queue_equal_keys() {
let operations = [
Op::Insert(4, 'a'),
Op::Insert(1, 'b'),
Op::Insert(3, 'c'),
Op::Pull(Some((1, 'b'))),
Op::Insert(4, 'd'),
Op::Insert(8, 'e'),
Op::Insert(3, 'f'),
Op::Pull(Some((3, 'c'))),
Op::Pull(Some((3, 'f'))),
Op::Pull(Some((4, 'a'))),
Op::Insert(8, 'g'),
Op::Pull(Some((4, 'd'))),
Op::Pull(Some((8, 'e'))),
Op::Pull(Some((8, 'g'))),
Op::Pull(None),
];
check(operations.into_iter());
}
#[test]
fn indexed_priority_queue_extract_valid() {
let operations = [
Op::Insert(8, 'a'),
Op::Insert(1, 'b'),
Op::Insert(3, 'c'),
Op::InsertAndMark(3, 'd'),
Op::Insert(2, 'e'),
Op::Pull(Some((1, 'b'))),
Op::Insert(4, 'f'),
Op::ExtractMarked(Some((3, 'd'))),
Op::Insert(5, 'g'),
Op::Pull(Some((2, 'e'))),
Op::Pull(Some((3, 'c'))),
Op::Pull(Some((4, 'f'))),
Op::Pull(Some((5, 'g'))),
Op::Pull(Some((8, 'a'))),
Op::Pull(None),
];
check(operations.into_iter());
}
#[test]
fn indexed_priority_queue_extract_invalid() {
let operations = [
Op::Insert(0, 'a'),
Op::Insert(7, 'b'),
Op::InsertAndMark(2, 'c'),
Op::Insert(4, 'd'),
Op::Pull(Some((0, 'a'))),
Op::Insert(2, 'e'),
Op::Pull(Some((2, 'c'))),
Op::Insert(4, 'f'),
Op::ExtractMarked(None),
Op::Pull(Some((2, 'e'))),
Op::Pull(Some((4, 'd'))),
Op::Pull(Some((4, 'f'))),
Op::Pull(Some((7, 'b'))),
Op::Pull(None),
];
check(operations.into_iter());
}
#[test]
fn indexed_priority_queue_fuzz() {
use std::cell::Cell;
use std::collections::BTreeMap;
use crate::util::rng::Rng;
// Number of fuzzing operations.
const ITER: usize = if cfg!(miri) { 1000 } else { 10_000_000 };
// Inclusive upper bound for randomly generated keys.
const MAX_KEY: u64 = 99;
// Probabilistic weight of each of the 4 operations.
//
// The weight for pull values should probably stay close to the sum of
// the two insertion weights to prevent queue size runaway.
const INSERT_WEIGHT: u64 = 5;
const INSERT_AND_MARK_WEIGHT: u64 = 1;
const PULL_WEIGHT: u64 = INSERT_WEIGHT + INSERT_AND_MARK_WEIGHT;
const DELETE_MARKED_WEIGHT: u64 = 1;
// Defines 4 basic operations on the priority queue, each of them being
// performed on both the tested implementation and on a shadow queue
// implemented with a `BTreeMap`. Any mismatch between the outcomes of
// pull and delete operations between the two queues triggers a panic.
let epoch: Cell<usize> = Cell::new(0);
let marked: Cell<Option<InsertKey>> = Cell::new(None);
let shadow_marked: Cell<Option<(u64, usize)>> = Cell::new(None);
let insert_fn = |queue: &mut IndexedPriorityQueue<u64, u64>,
shadow_queue: &mut BTreeMap<(u64, usize), u64>,
key,
value| {
queue.insert(key, value);
shadow_queue.insert((key, epoch.get()), value);
epoch.set(epoch.get() + 1);
};
let insert_and_mark_fn = |queue: &mut IndexedPriorityQueue<u64, u64>,
shadow_queue: &mut BTreeMap<(u64, usize), u64>,
key,
value| {
marked.set(Some(queue.insert(key, value)));
shadow_queue.insert((key, epoch.get()), value);
shadow_marked.set(Some((key, epoch.get())));
epoch.set(epoch.get() + 1);
};
let pull_fn = |queue: &mut IndexedPriorityQueue<u64, u64>,
shadow_queue: &mut BTreeMap<(u64, usize), u64>| {
let value = queue.pull();
let shadow_value = match shadow_queue.iter().next() {
Some((&unique_key, &value)) => {
shadow_queue.remove(&unique_key);
Some((unique_key.0, value))
}
None => None,
};
assert_eq!(value, shadow_value);
};
let delete_marked_fn =
|queue: &mut IndexedPriorityQueue<u64, u64>,
shadow_queue: &mut BTreeMap<(u64, usize), u64>| {
let success = match marked.take() {
Some(delete_key) => Some(queue.extract(delete_key).is_some()),
None => None,
};
let shadow_success = match shadow_marked.take() {
Some(delete_key) => Some(shadow_queue.remove(&delete_key).is_some()),
None => None,
};
assert_eq!(success, shadow_success);
};
// Fuzz away.
let mut queue = IndexedPriorityQueue::new();
let mut shadow_queue = BTreeMap::new();
let rng = Rng::new(12345);
const TOTAL_WEIGHT: u64 =
INSERT_WEIGHT + INSERT_AND_MARK_WEIGHT + PULL_WEIGHT + DELETE_MARKED_WEIGHT;
for _ in 0..ITER {
// Randomly choose one of the 4 possible operations, respecting the
// probability weights.
let mut op = rng.gen_bounded(TOTAL_WEIGHT);
if op < INSERT_WEIGHT {
let key = rng.gen_bounded(MAX_KEY + 1);
let val = rng.gen();
insert_fn(&mut queue, &mut shadow_queue, key, val);
continue;
}
op -= INSERT_WEIGHT;
if op < INSERT_AND_MARK_WEIGHT {
let key = rng.gen_bounded(MAX_KEY + 1);
let val = rng.gen();
insert_and_mark_fn(&mut queue, &mut shadow_queue, key, val);
continue;
}
op -= INSERT_AND_MARK_WEIGHT;
if op < PULL_WEIGHT {
pull_fn(&mut queue, &mut shadow_queue);
continue;
}
delete_marked_fn(&mut queue, &mut shadow_queue);
}
}
}

View File

@ -111,7 +111,7 @@ impl<K: Copy + Ord, V> PriorityQueue<K, V> {
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use super::*;
use super::PriorityQueue;
#[test]
fn priority_smoke() {

View File

@ -1,7 +1,5 @@
//! Pseudo-random number generation.
#![allow(unused)]
use std::cell::Cell;
/// A pseudo-random generator for 64-bit integers based on Wang Yi's Wyrand.

View File

@ -1,11 +1,7 @@
//! Futures and future-related functions.
#![allow(unused)]
//! Sequential composition of futures into a single future.
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
use std::task::{Context, Poll};
/// An owned future which sequentially polls a collection of futures.
@ -53,39 +49,3 @@ impl<F: Future + Unpin> Future for SeqFuture<F> {
Poll::Pending
}
}
trait RevocableFuture: Future {
fn is_revoked() -> bool;
}
struct NeverRevokedFuture<F> {
inner: F,
}
impl<F: Future> NeverRevokedFuture<F> {
fn new(fut: F) -> Self {
Self { inner: fut }
}
}
impl<T: Future> Future for NeverRevokedFuture<T> {
type Output = T::Output;
#[inline(always)]
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) }
}
}
impl<T: Future> RevocableFuture for NeverRevokedFuture<T> {
fn is_revoked() -> bool {
false
}
}
struct ConcurrentlyRevocableFuture<F> {
inner: F,
is_revoked: Arc<AtomicBool>,
}

View File

@ -1,8 +1,6 @@
//! A primitive similar to a one-shot channel but without any signaling
//! capability.
#![allow(unused)]
use std::error::Error;
use std::fmt;
use std::marker::PhantomData;
@ -327,8 +325,6 @@ pub(crate) fn slot<T>() -> (SlotWriter<T>, SlotReader<T>) {
mod tests {
use super::*;
use std::io::Read;
use std::sync::Arc;
use std::thread;
#[test]
@ -358,9 +354,9 @@ mod tests {
#[test]
fn slot_multi_threaded_write() {
let (mut writer, mut reader) = slot();
let (writer, mut reader) = slot();
let th = thread::spawn(move || {
thread::spawn(move || {
assert!(writer.write(42).is_ok());
});
@ -370,15 +366,13 @@ mod tests {
return;
}
}
th.join().unwrap();
}
#[test]
fn slot_multi_threaded_drop_writer() {
let (mut writer, mut reader) = slot::<i32>();
let (writer, mut reader) = slot::<i32>();
let th = thread::spawn(move || {
thread::spawn(move || {
drop(writer);
});
@ -389,8 +383,6 @@ mod tests {
return;
}
}
th.join().unwrap();
}
}

View File

@ -1,393 +0,0 @@
//! Single-producer single-consumer unbounded FIFO queue that stores values in
//! fixed-size memory segments.
#![allow(unused)]
use std::cell::Cell;
use std::error::Error;
use std::fmt;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::ptr::{self, NonNull};
use std::sync::atomic::Ordering;
use crossbeam_utils::CachePadded;
use crate::loom_exports::cell::UnsafeCell;
use crate::loom_exports::sync::atomic::{AtomicBool, AtomicPtr};
use crate::loom_exports::sync::Arc;
/// The number of slots in a single segment.
const SEGMENT_LEN: usize = 32;
/// A slot containing a single value.
struct Slot<T> {
has_value: AtomicBool,
value: UnsafeCell<MaybeUninit<T>>,
}
impl<T> Default for Slot<T> {
fn default() -> Self {
Slot {
has_value: AtomicBool::new(false),
value: UnsafeCell::new(MaybeUninit::uninit()),
}
}
}
/// A memory segment containing `SEGMENT_LEN` slots.
struct Segment<T> {
/// Address of the next segment.
///
/// A null pointer means that the next segment is not allocated yet.
next_segment: AtomicPtr<Segment<T>>,
data: [Slot<T>; SEGMENT_LEN],
}
impl<T> Segment<T> {
/// Allocates a new segment.
fn allocate_new() -> NonNull<Self> {
let segment = Self {
next_segment: AtomicPtr::new(ptr::null_mut()),
data: Default::default(),
};
// Safety: the pointer is non-null since it comes from a box.
unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(segment))) }
}
}
/// The head of the queue from which values are popped.
struct Head<T> {
/// Pointer to the segment at the head of the queue.
segment: NonNull<Segment<T>>,
/// Index of the next value to be read.
///
/// If the index is equal to the segment length, it is necessary to move to
/// the next segment before the next value can be read.
next_read_idx: usize,
}
/// The tail of the queue to which values are pushed.
struct Tail<T> {
/// Pointer to the segment at the tail of the queue.
segment: NonNull<Segment<T>>,
/// Index of the next value to be written.
///
/// If the index is equal to the segment length, a new segment must be
/// allocated before a new value can be written.
next_write_idx: usize,
}
/// A single-producer, single-consumer unbounded FIFO queue.
struct Queue<T> {
head: CachePadded<UnsafeCell<Head<T>>>,
tail: CachePadded<UnsafeCell<Tail<T>>>,
}
impl<T> Queue<T> {
/// Creates a new queue.
fn new() -> Self {
let segment = Segment::allocate_new();
let head = Head {
segment,
next_read_idx: 0,
};
let tail = Tail {
segment,
next_write_idx: 0,
};
Self {
head: CachePadded::new(UnsafeCell::new(head)),
tail: CachePadded::new(UnsafeCell::new(tail)),
}
}
/// Pushes a new value.
///
/// # Safety
///
/// The method cannot be called from multiple threads concurrently.
unsafe fn push(&self, value: T) {
// Safety: this is the only thread accessing the tail.
let tail = self.tail.with_mut(|p| &mut *p);
// If the whole segment has been written, allocate a new segment.
if tail.next_write_idx == SEGMENT_LEN {
let old_segment = tail.segment;
tail.segment = Segment::allocate_new();
// Safety: the old segment is still allocated since the consumer
// cannot deallocate it before `next_segment` is set to a non-null
// value.
old_segment
.as_ref()
.next_segment
.store(tail.segment.as_ptr(), Ordering::Release);
tail.next_write_idx = 0;
}
// Safety: the tail segment is allocated since the consumer cannot
// deallocate it before `next_segment` is set to a non-null value.
let data = &tail.segment.as_ref().data[tail.next_write_idx];
// Safety: we have exclusive access to the slot value since the consumer
// cannot access it before `has_value` is set to true.
data.value.with_mut(|p| (*p).write(value));
// Ordering: this Release store synchronizes with the Acquire load in
// `pop` and ensures that the value is visible to the consumer once
// `has_value` reads `true`.
data.has_value.store(true, Ordering::Release);
tail.next_write_idx += 1;
}
/// Pops a new value.
///
/// # Safety
///
/// The method cannot be called from multiple threads concurrently.
unsafe fn pop(&self) -> Option<T> {
// Safety: this is the only thread accessing the head.
let head = self.head.with_mut(|p| &mut *p);
// If the whole segment has been read, try to move to the next segment.
if head.next_read_idx == SEGMENT_LEN {
// Read the next segment or return `None` if it is not ready yet.
//
// Safety: the head segment is still allocated since we are the only
// thread that can deallocate it.
let next_segment = head.segment.as_ref().next_segment.load(Ordering::Acquire);
let next_segment = NonNull::new(next_segment)?;
// Deallocate the old segment.
//
// Safety: the pointer was initialized from a box and the segment is
// still allocated since we are the only thread that can deallocate
// it.
let _ = Box::from_raw(head.segment.as_ptr());
// Update the segment and the next index.
head.segment = next_segment;
head.next_read_idx = 0;
}
let data = &head.segment.as_ref().data[head.next_read_idx];
// Ordering: this Acquire load synchronizes with the Release store in
// `push` and ensures that the value is visible once `has_value` reads
// `true`.
if !data.has_value.load(Ordering::Acquire) {
return None;
}
// Safety: since `has_value` is `true` then we have exclusive ownership
// of the value and we know that it was initialized.
let value = data.value.with(|p| (*p).assume_init_read());
head.next_read_idx += 1;
Some(value)
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
// Drop all values.
while self.pop().is_some() {}
// All values have been dropped: the last segment can be freed.
// Safety: this is the only thread accessing the head since both the
// consumer and producer have been dropped.
let head = self.head.with_mut(|p| &mut *p);
// Safety: the pointer was initialized from a box and the segment is
// still allocated since we are the only thread that can deallocate
// it.
let _ = Box::from_raw(head.segment.as_ptr());
}
}
}
unsafe impl<T: Send> Send for Queue<T> {}
unsafe impl<T: Send> Sync for Queue<T> {}
impl<T> UnwindSafe for Queue<T> {}
impl<T> RefUnwindSafe for Queue<T> {}
/// A handle to a single-producer, single-consumer queue that can push values.
pub(crate) struct Producer<T> {
queue: Arc<Queue<T>>,
_non_sync_phantom: PhantomData<Cell<()>>,
}
impl<T> Producer<T> {
/// Pushes a value to the queue.
pub(crate) fn push(&self, value: T) -> Result<(), PushError> {
if Arc::strong_count(&self.queue) == 1 {
return Err(PushError {});
}
unsafe { self.queue.push(value) };
Ok(())
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
/// Error returned when a push failed due to the consumer being dropped.
pub(crate) struct PushError {}
impl fmt::Display for PushError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "sending message into a closed mailbox")
}
}
impl Error for PushError {}
/// A handle to a single-producer, single-consumer queue that can pop values.
pub(crate) struct Consumer<T> {
queue: Arc<Queue<T>>,
_non_sync_phantom: PhantomData<Cell<()>>,
}
impl<T> Consumer<T> {
/// Pops a value from the queue.
pub(crate) fn pop(&self) -> Option<T> {
unsafe { self.queue.pop() }
}
}
/// Creates the producer and consumer handles of a single-producer,
/// single-consumer queue.
pub(crate) fn spsc_queue<T>() -> (Producer<T>, Consumer<T>) {
let queue = Arc::new(Queue::new());
let producer = Producer {
queue: queue.clone(),
_non_sync_phantom: PhantomData,
};
let consumer = Consumer {
queue,
_non_sync_phantom: PhantomData,
};
(producer, consumer)
}
/// Loom tests.
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use super::*;
use std::thread;
#[test]
fn spsc_queue_basic() {
const VALUE_COUNT: usize = if cfg!(miri) { 1000 } else { 100_000 };
let (producer, consumer) = spsc_queue();
let th = thread::spawn(move || {
for i in 0..VALUE_COUNT {
let value = loop {
if let Some(v) = consumer.pop() {
break v;
}
};
assert_eq!(value, i);
}
});
for i in 0..VALUE_COUNT {
producer.push(i).unwrap();
}
th.join().unwrap();
}
}
/// Loom tests.
#[cfg(all(test, asynchronix_loom))]
mod tests {
use super::*;
use loom::model::Builder;
use loom::thread;
#[test]
fn loom_spsc_queue_basic() {
const DEFAULT_PREEMPTION_BOUND: usize = 4;
const VALUE_COUNT: usize = 10;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (producer, consumer) = spsc_queue();
let th = thread::spawn(move || {
let mut value = 0;
for _ in 0..VALUE_COUNT {
if let Some(v) = consumer.pop() {
assert_eq!(v, value);
value += 1;
}
}
});
for i in 0..VALUE_COUNT {
let _ = producer.push(i);
}
th.join().unwrap();
});
}
#[test]
fn loom_spsc_queue_new_segment() {
const DEFAULT_PREEMPTION_BOUND: usize = 4;
const VALUE_COUNT_BEFORE: usize = 5;
const VALUE_COUNT_AFTER: usize = 5;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (producer, consumer) = spsc_queue();
// Fill up the first segment except for the last `VALUE_COUNT_BEFORE` slots.
for i in 0..(SEGMENT_LEN - VALUE_COUNT_BEFORE) {
producer.push(i).unwrap();
consumer.pop();
}
let th = thread::spawn(move || {
let mut value = SEGMENT_LEN - VALUE_COUNT_BEFORE;
for _ in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) {
if let Some(v) = consumer.pop() {
assert_eq!(v, value);
value += 1;
}
}
});
for i in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) {
let _ = producer.push(i);
}
th.join().unwrap();
});
}
}

View File

@ -1,3 +1,5 @@
//! Primitive for the efficient management of concurrent tasks.
use std::sync::atomic::Ordering;
use std::sync::Arc;
@ -21,31 +23,36 @@ const COUNTDOWN_MASK: u64 = !INDEX_MASK;
/// scheduled tasks.
const COUNTDOWN_ONE: u64 = 1 << 32;
/// A set of tasks that may be scheduled cheaply and can be requested to wake a
/// parent task only when a given amount of tasks have been scheduled.
/// A primitive that simplifies the management of a set of tasks scheduled
/// concurrently.
///
/// This object maintains both a list of all active tasks and a list of the
/// subset of active tasks currently scheduled. The latter is stored in a
/// Treiber stack which links tasks through indices rather than pointers. Using
/// indices has two advantages: (i) it enables a fully safe implementation and
/// (ii) it makes it possible to use a single CAS to simultaneously move the
/// head and decrement the outstanding amount of tasks to be scheduled before
/// the parent task is notified.
pub(super) struct TaskSet {
/// Set of all active tasks, scheduled or not.
/// A `TaskSet` maintains both a vector-based list of tasks (or more accurately,
/// task waker handles) and a linked list of the subset of tasks that are
/// currently scheduled. The latter is stored in a vector-based Treiber stack
/// which links tasks through indices rather than pointers. Using indices has
/// two advantages: (i) it makes a fully safe implementation possible and (ii)
/// it can take advantage of a single CAS to simultaneously move the head and
/// decrement the outstanding amount of tasks to be scheduled before the parent
/// task is notified.
///
/// This can be used to implement primitives similar to `FuturesOrdered` or
/// `FuturesUnordered` in the `futures` crate.
///
/// The `notify_count` argument of `TaskSet::take_scheduled()` can be set to
/// more than 1 to wake the parent task less frequently. For instance, if
/// `notify_count` is set to the number of pending sub-tasks, the parent task
/// will only be woken once all subtasks have been woken.
pub(crate) struct TaskSet {
/// Set of all tasks, scheduled or not.
///
/// In some rare cases, the back of the vector can also contain inactive
/// (retired) tasks.
/// In some cases, the use of `resize()` to shrink the task set may leave
/// inactive tasks at the back of the vector, in which case the length of
/// the vector will exceed `task_count`.
tasks: Vec<Arc<Task>>,
/// Head of the Treiber stack for scheduled tasks.
///
/// The lower bits specify the index of the last scheduled task, if any,
/// whereas the upper bits specify the countdown of tasks still to be
/// scheduled before the parent task is notified.
head: Arc<AtomicU64>,
/// A notifier used to wake the parent task.
notifier: WakeSource,
/// Count of all active tasks, scheduled or not.
/// Shared Treiber stack head and parent task notifier.
shared: Arc<Shared>,
/// Count of all tasks, scheduled or not.
task_count: usize,
}
@ -53,35 +60,71 @@ impl TaskSet {
/// Creates an initially empty set of tasks associated to the parent task
/// which notifier is provided.
#[allow(clippy::assertions_on_constants)]
pub(super) fn new(notifier: WakeSource) -> Self {
pub(crate) fn new(notifier: WakeSource) -> Self {
// Only 32-bit targets and above are supported.
assert!(usize::BITS >= u32::BITS);
Self {
tasks: Vec::new(),
head: Arc::new(AtomicU64::new(EMPTY as u64)),
notifier,
shared: Arc::new(Shared {
head: AtomicU64::new(EMPTY as u64),
notifier,
}),
task_count: 0,
}
}
/// Steals scheduled tasks if any and returns an iterator over their
/// indices, otherwise returns `None` and requests a notification to be sent
/// after `notify_count` tasks have been scheduled.
/// Creates a set of `len` tasks associated to the parent task which
/// notifier is provided.
#[allow(clippy::assertions_on_constants)]
pub(crate) fn with_len(notifier: WakeSource, len: usize) -> Self {
// Only 32-bit targets and above are supported.
assert!(usize::BITS >= u32::BITS);
assert!(len <= EMPTY as usize && len <= SLEEPING as usize);
let len = len as u32;
let shared = Arc::new(Shared {
head: AtomicU64::new(EMPTY as u64),
notifier,
});
let tasks: Vec<_> = (0..len)
.map(|idx| {
Arc::new(Task {
idx,
shared: shared.clone(),
next: AtomicU32::new(SLEEPING),
})
})
.collect();
Self {
tasks,
shared,
task_count: len as usize,
}
}
/// Take all scheduled tasks and returns an iterator over their indices, or
/// if there are no currently scheduled tasks returns `None` and requests a
/// notification to be sent after `notify_count` tasks have been scheduled.
///
/// In all cases, the list of scheduled tasks is guaranteed to be empty
/// after this call.
/// In all cases, the list of scheduled tasks will be empty right after this
/// call.
///
/// If some tasks were stolen, no notification is requested.
/// If there were scheduled tasks, no notification is requested because this
/// method is expected to be called repeatedly until it returns `None`.
/// Failure to do so will result in missed notifications.
///
/// If no tasks were stolen, the notification is guaranteed to be triggered
/// no later than after `notify_count` tasks have been scheduled, though it
/// may in some cases be triggered earlier. If the specified `notify_count`
/// is zero then no notification is requested.
pub(super) fn steal_scheduled(&self, notify_count: usize) -> Option<TaskIterator<'_>> {
/// If no tasks were scheduled, the notification is guaranteed to be
/// triggered no later than after `notify_count` tasks have been scheduled,
/// though it may in some cases be triggered earlier. If the specified
/// `notify_count` is zero then no notification is requested.
pub(crate) fn take_scheduled(&self, notify_count: usize) -> Option<TaskIterator<'_>> {
let countdown = u32::try_from(notify_count).unwrap();
let mut head = self.head.load(Ordering::Relaxed);
let mut head = self.shared.head.load(Ordering::Relaxed);
loop {
let new_head = if head & INDEX_MASK == EMPTY as u64 {
(countdown as u64 * COUNTDOWN_ONE) | EMPTY as u64
@ -93,7 +136,7 @@ impl TaskSet {
// operations in `Task::wake_by_ref` and ensures that all memory
// operations performed during and before the tasks were scheduled
// become visible.
match self.head.compare_exchange_weak(
match self.shared.head.compare_exchange_weak(
head,
new_head,
Ordering::Acquire,
@ -122,22 +165,22 @@ impl TaskSet {
/// notification is currently requested.
///
/// All discarded tasks are put in the sleeping (unscheduled) state.
pub(super) fn discard_scheduled(&self) {
if self.head.load(Ordering::Relaxed) != EMPTY as u64 {
pub(crate) fn discard_scheduled(&self) {
if self.shared.head.load(Ordering::Relaxed) != EMPTY as u64 {
// Dropping the iterator ensures that all tasks are put in the
// sleeping state.
let _ = self.steal_scheduled(0);
let _ = self.take_scheduled(0);
}
}
/// Modify the number of active tasks.
/// Set the number of active tasks.
///
/// Note that this method may discard all scheduled tasks.
/// Note that this method may discard already scheduled tasks.
///
/// # Panic
///
/// This method will panic if `len` is greater than `u32::MAX - 1`.
pub(super) fn resize(&mut self, len: usize) {
pub(crate) fn resize(&mut self, len: usize) {
assert!(len <= EMPTY as usize && len <= SLEEPING as usize);
self.task_count = len;
@ -149,37 +192,46 @@ impl TaskSet {
self.tasks.push(Arc::new(Task {
idx,
notifier: self.notifier.clone(),
shared: self.shared.clone(),
next: AtomicU32::new(SLEEPING),
head: self.head.clone(),
}));
}
return;
}
// Try to remove inactive tasks.
// Try to shrink the vector of tasks.
//
// The main issue when shrinking the set of active tasks is that stale
// The main issue when shrinking the vector of tasks is that stale
// wakers may still be around and may at any moment be scheduled and
// insert their index in the list of scheduled tasks. If it cannot be
// guaranteed that this will not happen, then a reference to that task
// must be kept or the iterator for scheduled tasks will panic when
// indexing a stale task.
// insert their task index in the list of scheduled tasks. If it cannot
// be guaranteed that this will not happen, then the vector of tasks
// cannot be shrunk further, otherwise the iterator for scheduled tasks
// will later fail when reaching a task with an invalid index.
//
// To prevent an inactive task from being spuriously scheduled, it is
// enough to pretend that the task is already scheduled by setting its
// `next` field to anything else than `SLEEPING`. However, this could
// race if the task has just set its `next` field but has not yet
// updated the head of the list of scheduled tasks, so this can only be
// done reliably if the task is currently sleeping.
// We follow a 2-steps strategy:
//
// 1) remove all tasks currently in the list of scheduled task and set
// them to `SLEEPING` state in case some of them might have an index
// that will be invalidated when the vector of tasks is shrunk;
//
// 2) attempt to iteratively shrink the vector of tasks by removing
// tasks starting from the back of the vector:
// - If a task is in the `SLEEPING` state, then its `next` pointer is
// changed to an arbitrary value other than`SLEEPING`, but the task
// is not inserted in the list of scheduled tasks; this way, the
// task will be effectively rendered inactive. The task can now be
// removed from the vector.
// - If a task is found in a non-`SLEEPING` state (meaning that there
// was a race and the task was scheduled after step 1) then abandon
// further shrinking and leave this task in the vector; the iterator
// for scheduled tasks mitigates such situation by only yielding
// task indices that are within the expected range.
// All scheduled tasks are first unscheduled in case some of them are
// now inactive.
// Step 1: unscheduled tasks that may be scheduled.
self.discard_scheduled();
// The position of tasks in the set must stay consistent with their
// associated index so tasks are popped from the back.
// Step 2: attempt to remove tasks starting at the back of the vector.
while self.tasks.len() > len {
// There is at least one task since `len()` was non-zero.
let task = self.tasks.last().unwrap();
@ -200,11 +252,11 @@ impl TaskSet {
}
}
/// Returns `true` if one or more tasks are currently scheduled.
pub(super) fn has_scheduled(&self) -> bool {
/// Returns `true` if one or more sub-tasks are currently scheduled.
pub(crate) fn has_scheduled(&self) -> bool {
// Ordering: the content of the head is only used as an advisory flag so
// Relaxed ordering is sufficient.
self.head.load(Ordering::Relaxed) & INDEX_MASK != EMPTY as u64
self.shared.head.load(Ordering::Relaxed) & INDEX_MASK != EMPTY as u64
}
/// Returns a reference to the waker associated to the active task with the
@ -214,29 +266,36 @@ impl TaskSet {
///
/// This method will panic if there is no active task with the provided
/// index.
pub(super) fn waker_of(&self, idx: usize) -> WakerRef {
pub(crate) fn waker_of(&self, idx: usize) -> WakerRef {
assert!(idx < self.task_count);
waker_ref(&self.tasks[idx])
}
}
/// Internals shared between a `TaskSet` and its associated `Task`s.
struct Shared {
/// Head of the Treiber stack for scheduled tasks.
///
/// The lower 32 bits specify the index of the last scheduled task (the
/// actual head), if any, whereas the upper 32 bits specify the countdown of
/// tasks still to be scheduled before the parent task is notified.
head: AtomicU64,
/// A notifier used to wake the parent task.
notifier: WakeSource,
}
/// An asynchronous task associated with the future of a sender.
pub(super) struct Task {
struct Task {
/// Index of this task.
idx: u32,
/// A notifier triggered once a certain number of tasks have been scheduled.
notifier: WakeSource,
/// Index of the next task in the list of scheduled tasks.
next: AtomicU32,
/// Head of the list of scheduled tasks.
head: Arc<AtomicU64>,
shared: Arc<Shared>,
}
impl ArcWake for Task {
fn wake(self: Arc<Self>) {
Self::wake_by_ref(&self);
}
fn wake_by_ref(arc_self: &Arc<Self>) {
let mut next = arc_self.next.load(Ordering::Relaxed);
@ -251,7 +310,7 @@ impl ArcWake for Task {
// CAS on the head already ensure that all memory operations
// that precede this call to `wake_by_ref` become visible when
// the tasks are stolen.
let head = arc_self.head.load(Ordering::Relaxed);
let head = arc_self.shared.head.load(Ordering::Relaxed);
match arc_self.next.compare_exchange_weak(
SLEEPING,
(head & INDEX_MASK) as u32,
@ -297,7 +356,7 @@ impl ArcWake for Task {
// that the value of the `next` field as well as all memory
// operations that precede this call to `wake_by_ref` become visible
// when the tasks are stolen.
match arc_self.head.compare_exchange_weak(
match arc_self.shared.head.compare_exchange_weak(
head,
new_head,
Ordering::Release,
@ -307,7 +366,7 @@ impl ArcWake for Task {
// If the countdown has just been cleared, it is necessary
// to send a notification.
if countdown == COUNTDOWN_ONE {
arc_self.notifier.notify();
arc_self.shared.notifier.notify();
}
return;
@ -339,7 +398,7 @@ impl ArcWake for Task {
}
/// An iterator over scheduled tasks.
pub(super) struct TaskIterator<'a> {
pub(crate) struct TaskIterator<'a> {
task_list: &'a TaskSet,
next_index: u32,
}

View File

@ -2,9 +2,10 @@
use std::time::Duration;
use asynchronix::model::{Model, Output};
use asynchronix::model::Model;
use asynchronix::ports::{EventBuffer, Output};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
use asynchronix::time::{ActionKey, MonotonicTime, Scheduler};
#[test]
fn model_schedule_event() {
@ -27,13 +28,14 @@ fn model_schedule_event() {
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let mut output = EventBuffer::new();
model.output.connect_sink(&output);
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.process_event(TestModel::trigger, (), addr);
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
assert!(output.next().is_some());
@ -46,7 +48,7 @@ fn model_cancel_future_keyed_event() {
#[derive(Default)]
struct TestModel {
output: Output<i32>,
key: Option<EventKey>,
key: Option<ActionKey>,
}
impl TestModel {
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
@ -71,13 +73,14 @@ fn model_cancel_future_keyed_event() {
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let mut output = EventBuffer::new();
model.output.connect_sink(&output);
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.process_event(TestModel::trigger, (), addr);
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(1));
assert_eq!(output.next(), Some(1));
@ -91,7 +94,7 @@ fn model_cancel_same_time_keyed_event() {
#[derive(Default)]
struct TestModel {
output: Output<i32>,
key: Option<EventKey>,
key: Option<ActionKey>,
}
impl TestModel {
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
@ -116,13 +119,14 @@ fn model_cancel_same_time_keyed_event() {
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let mut output = EventBuffer::new();
model.output.connect_sink(&output);
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.process_event(TestModel::trigger, (), addr);
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
assert_eq!(output.next(), Some(1));
@ -157,13 +161,14 @@ fn model_schedule_periodic_event() {
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let mut output = EventBuffer::new();
model.output.connect_sink(&output);
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.process_event(TestModel::trigger, (), addr);
// Move to the next events at t0 + 2s + k*3s.
for k in 0..10 {
@ -182,7 +187,7 @@ fn model_cancel_periodic_event() {
#[derive(Default)]
struct TestModel {
output: Output<()>,
key: Option<EventKey>,
key: Option<ActionKey>,
}
impl TestModel {
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
@ -206,13 +211,14 @@ fn model_cancel_periodic_event() {
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let mut output = EventBuffer::new();
model.output.connect_sink(&output);
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.process_event(TestModel::trigger, (), addr);
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(2));

View File

@ -2,8 +2,9 @@
use std::time::Duration;
use asynchronix::model::{Model, Output};
use asynchronix::simulation::{Address, EventStream, Mailbox, SimInit, Simulation};
use asynchronix::model::Model;
use asynchronix::ports::{EventBuffer, Output};
use asynchronix::simulation::{Address, Mailbox, SimInit, Simulation};
use asynchronix::time::MonotonicTime;
// Input-to-output pass-through model.
@ -26,12 +27,13 @@ impl<T: Clone + Send + 'static> Model for PassThroughModel<T> {}
/// output) running as fast as possible.
fn passthrough_bench<T: Clone + Send + 'static>(
t0: MonotonicTime,
) -> (Simulation, Address<PassThroughModel<T>>, EventStream<T>) {
) -> (Simulation, Address<PassThroughModel<T>>, EventBuffer<T>) {
// Bench assembly.
let mut model = PassThroughModel::new();
let mbox = Mailbox::new();
let out_stream = model.output.connect_stream().0;
let out_stream = EventBuffer::new();
model.output.connect_sink(&out_stream);
let addr = mbox.address();
let simu = SimInit::new().add_model(model, mbox).init(t0);
@ -243,18 +245,20 @@ fn timestamp_bench(
) -> (
Simulation,
Address<TimestampModel>,
EventStream<(Instant, SystemTime)>,
EventBuffer<(Instant, SystemTime)>,
) {
// Bench assembly.
let mut model = TimestampModel::default();
let mbox = Mailbox::new();
let stamp_stream = model.stamp.connect_stream().0;
let stamp_stream = EventBuffer::new();
model.stamp.connect_sink(&stamp_stream);
let addr = mbox.address();
let simu = SimInit::new()
.add_model(model, mbox)
.init_with_clock(t0, clock);
.set_clock(clock)
.init(t0);
(simu, addr, stamp_stream)
}