From e84e802f09f733d90eadf97b5aea76e02e58b581 Mon Sep 17 00:00:00 2001 From: Serge Barral Date: Thu, 25 Apr 2024 11:12:54 +0200 Subject: [PATCH 01/12] Initial (g)RPC implementation --- .github/workflows/ci.yml | 2 +- .github/workflows/loom.yml | 1 - .gitignore | 2 +- asynchronix/Cargo.toml | 34 +- asynchronix/build.rs | 17 + asynchronix/examples/espresso_machine.rs | 48 +- asynchronix/examples/power_supply.rs | 25 +- asynchronix/examples/stepper_motor.rs | 12 +- asynchronix/src/channel.rs | 9 +- asynchronix/src/channel/queue.rs | 2 +- asynchronix/src/executor.rs | 2 +- asynchronix/src/lib.rs | 62 +- asynchronix/src/model.rs | 23 +- asynchronix/src/ports.rs | 35 + asynchronix/src/ports/input.rs | 4 + .../src/{model => ports/input}/markers.rs | 0 .../src/{model => ports/input}/model_fn.rs | 4 +- .../src/{model/ports.rs => ports/output.rs} | 100 +- .../ports => ports/output}/broadcaster.rs | 272 +++-- .../{model/ports => ports/output}/sender.rs | 91 +- asynchronix/src/ports/sink.rs | 54 + asynchronix/src/ports/sink/event_buffer.rs | 138 +++ asynchronix/src/ports/sink/event_slot.rs | 120 ++ asynchronix/src/ports/source.rs | 295 +++++ asynchronix/src/ports/source/broadcaster.rs | 759 ++++++++++++ asynchronix/src/ports/source/sender.rs | 136 +++ asynchronix/src/rpc.rs | 10 + .../src/rpc/api/custom_transport.proto | 50 + asynchronix/src/rpc/api/simulation.proto | 161 +++ asynchronix/src/rpc/codegen.rs | 5 + asynchronix/src/rpc/codegen/.gitkeep | 0 .../src/rpc/codegen/custom_transport.rs | 111 ++ asynchronix/src/rpc/codegen/simulation.rs | 1071 +++++++++++++++++ asynchronix/src/rpc/endpoint_registry.rs | 307 +++++ asynchronix/src/rpc/generic_server.rs | 673 +++++++++++ asynchronix/src/rpc/grpc.rs | 146 +++ asynchronix/src/rpc/key_registry.rs | 47 + asynchronix/src/simulation.rs | 189 +-- asynchronix/src/simulation/endpoints.rs | 69 -- asynchronix/src/simulation/sim_init.rs | 38 +- asynchronix/src/time.rs | 7 +- asynchronix/src/time/clock.rs | 126 +- asynchronix/src/time/monotonic_time.rs | 648 +--------- asynchronix/src/time/scheduler.rs | 480 ++++---- asynchronix/src/util.rs | 5 +- asynchronix/src/util/bit.rs | 2 - .../src/util/indexed_priority_queue.rs | 696 +++++++++++ asynchronix/src/util/priority_queue.rs | 2 +- asynchronix/src/util/rng.rs | 2 - .../src/util/{futures.rs => seq_futures.rs} | 42 +- asynchronix/src/util/slot.rs | 16 +- asynchronix/src/util/spsc_queue.rs | 393 ------ .../ports/broadcaster => util}/task_set.rs | 213 ++-- asynchronix/tests/model_scheduling.rs | 36 +- asynchronix/tests/simulation_scheduling.rs | 18 +- 55 files changed, 5814 insertions(+), 1996 deletions(-) create mode 100644 asynchronix/build.rs create mode 100644 asynchronix/src/ports.rs create mode 100644 asynchronix/src/ports/input.rs rename asynchronix/src/{model => ports/input}/markers.rs (100%) rename asynchronix/src/{model => ports/input}/model_fn.rs (99%) rename asynchronix/src/{model/ports.rs => ports/output.rs} (59%) rename asynchronix/src/{model/ports => ports/output}/broadcaster.rs (80%) rename asynchronix/src/{model/ports => ports/output}/sender.rs (73%) create mode 100644 asynchronix/src/ports/sink.rs create mode 100644 asynchronix/src/ports/sink/event_buffer.rs create mode 100644 asynchronix/src/ports/sink/event_slot.rs create mode 100644 asynchronix/src/ports/source.rs create mode 100644 asynchronix/src/ports/source/broadcaster.rs create mode 100644 asynchronix/src/ports/source/sender.rs create mode 100644 asynchronix/src/rpc.rs create mode 100644 asynchronix/src/rpc/api/custom_transport.proto create mode 100644 asynchronix/src/rpc/api/simulation.proto create mode 100644 asynchronix/src/rpc/codegen.rs create mode 100644 asynchronix/src/rpc/codegen/.gitkeep create mode 100644 asynchronix/src/rpc/codegen/custom_transport.rs create mode 100644 asynchronix/src/rpc/codegen/simulation.rs create mode 100644 asynchronix/src/rpc/endpoint_registry.rs create mode 100644 asynchronix/src/rpc/generic_server.rs create mode 100644 asynchronix/src/rpc/grpc.rs create mode 100644 asynchronix/src/rpc/key_registry.rs delete mode 100644 asynchronix/src/simulation/endpoints.rs create mode 100644 asynchronix/src/util/indexed_priority_queue.rs rename asynchronix/src/util/{futures.rs => seq_futures.rs} (62%) delete mode 100644 asynchronix/src/util/spsc_queue.rs rename asynchronix/src/{model/ports/broadcaster => util}/task_set.rs (62%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f156eac..e25d852 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,7 @@ jobs: toolchain: ${{ matrix.rust }} - name: Run cargo check - run: cargo check --all-features + run: cargo check test: name: Test suite diff --git a/.github/workflows/loom.yml b/.github/workflows/loom.yml index 7b2c805..e739386 100644 --- a/.github/workflows/loom.yml +++ b/.github/workflows/loom.yml @@ -13,7 +13,6 @@ on: - 'asynchronix/src/model/ports/broadcaster.rs' - 'asynchronix/src/model/ports/broadcaster/**' - 'asynchronix/src/util/slot.rs' - - 'asynchronix/src/util/spsc_queue.rs' - 'asynchronix/src/util/sync_cell.rs' jobs: diff --git a/.gitignore b/.gitignore index f2f9e58..a9d37c5 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,2 @@ target -Cargo.lock \ No newline at end of file +Cargo.lock diff --git a/asynchronix/Cargo.toml b/asynchronix/Cargo.toml index faab1bf..e9bef07 100644 --- a/asynchronix/Cargo.toml +++ b/asynchronix/Cargo.toml @@ -20,17 +20,26 @@ categories = ["simulation", "aerospace", "science"] keywords = ["simulation", "discrete-event", "systems", "cyberphysical", "real-time"] autotests = false + [features] -serde = ["dep:serde"] +# Remote procedure call API. +rpc = ["dep:rmp-serde", "dep:serde", "dep:tonic", "dep:prost", "dep:prost-types", "dep:bytes"] +# This feature forces protobuf/gRPC code (re-)generation. +rpc-codegen = ["dep:tonic-build"] +# gRPC server. +grpc-server = ["rpc", "dep:tokio"] # API-unstable public exports meant for external test/benchmarking; development only. dev-hooks = [] # Logging of performance-related statistics; development only. dev-logs = [] + [dependencies] +# Mandatory dependencies. async-event = "0.1" crossbeam-utils = "0.8" diatomic-waker = "0.1" +futures-channel = "0.3" futures-task = "0.3" multishot = "0.3.2" num_cpus = "1.13" @@ -39,21 +48,34 @@ recycle-box = "0.2" slab = "0.4" spin_sleep = "1" st3 = "0.4" +tai-time = "0.3" + +# Common RPC dependencies. +bytes = { version = "1", default-features = false, optional = true } +prost = { version = "0.12", optional = true } +prost-types = { version = "0.12", optional = true } +rmp-serde = { version = "1.1", optional = true } +serde = { version = "1", optional = true } + +# gRPC dependencies. +tokio = { version = "1.0", optional = true } +tonic = { version = "0.11", optional = true } -[dependencies.serde] -version = "1" -optional = true -features = ["derive"] [target.'cfg(asynchronix_loom)'.dependencies] loom = "0.5" waker-fn = "1.1" + [dev-dependencies] futures-util = "0.3" -futures-channel = "0.3" futures-executor = "0.3" + +[build-dependencies] +tonic-build = { version = "0.11", optional = true } + + [[test]] name = "integration" path = "tests/tests.rs" diff --git a/asynchronix/build.rs b/asynchronix/build.rs new file mode 100644 index 0000000..ce89ca8 --- /dev/null +++ b/asynchronix/build.rs @@ -0,0 +1,17 @@ +fn main() -> Result<(), Box> { + #[cfg(feature = "rpc-codegen")] + let builder = tonic_build::configure() + .build_client(false) + .out_dir("src/rpc/codegen/"); + + #[cfg(all(feature = "rpc-codegen", not(feature = "grpc-server")))] + let builder = builder.build_server(false); + + #[cfg(feature = "rpc-codegen")] + builder.compile( + &["simulation.proto", "custom_transport.proto"], + &["src/rpc/api/"], + )?; + + Ok(()) +} diff --git a/asynchronix/examples/espresso_machine.rs b/asynchronix/examples/espresso_machine.rs index 8167717..a2c0826 100644 --- a/asynchronix/examples/espresso_machine.rs +++ b/asynchronix/examples/espresso_machine.rs @@ -35,9 +35,10 @@ use std::future::Future; use std::pin::Pin; use std::time::Duration; -use asynchronix::model::{InitializedModel, Model, Output}; +use asynchronix::model::{InitializedModel, Model}; +use asynchronix::ports::{EventSlot, Output}; use asynchronix::simulation::{Mailbox, SimInit}; -use asynchronix::time::{EventKey, MonotonicTime, Scheduler}; +use asynchronix::time::{ActionKey, MonotonicTime, Scheduler}; /// Water pump. pub struct Pump { @@ -81,7 +82,7 @@ pub struct Controller { water_sense: WaterSenseState, /// Event key, which if present indicates that the machine is currently /// brewing -- internal state. - stop_brew_key: Option, + stop_brew_key: Option, } impl Controller { @@ -323,7 +324,7 @@ impl Model for Tank { /// is non-zero. struct TankDynamicState { last_volume_update: MonotonicTime, - set_empty_key: EventKey, + set_empty_key: ActionKey, flow_rate: f64, } @@ -364,7 +365,8 @@ fn main() { pump.flow_rate.connect(Tank::set_flow_rate, &tank_mbox); // Model handles for simulation. - let mut flow_rate = pump.flow_rate.connect_slot().0; + let mut flow_rate = EventSlot::new(); + pump.flow_rate.connect_sink(&flow_rate); let controller_addr = controller_mbox.address(); let tank_addr = tank_mbox.address(); @@ -387,48 +389,48 @@ fn main() { assert_eq!(simu.time(), t); // Brew one espresso shot with the default brew time. - simu.send_event(Controller::brew_cmd, (), &controller_addr); - assert_eq!(flow_rate.take(), Some(pump_flow_rate)); + simu.process_event(Controller::brew_cmd, (), &controller_addr); + assert_eq!(flow_rate.next(), Some(pump_flow_rate)); simu.step(); t += Controller::DEFAULT_BREW_TIME; assert_eq!(simu.time(), t); - assert_eq!(flow_rate.take(), Some(0.0)); + assert_eq!(flow_rate.next(), Some(0.0)); // Drink too much coffee. let volume_per_shot = pump_flow_rate * Controller::DEFAULT_BREW_TIME.as_secs_f64(); let shots_per_tank = (init_tank_volume / volume_per_shot) as u64; // YOLO--who cares about floating-point rounding errors? for _ in 0..(shots_per_tank - 1) { - simu.send_event(Controller::brew_cmd, (), &controller_addr); - assert_eq!(flow_rate.take(), Some(pump_flow_rate)); + simu.process_event(Controller::brew_cmd, (), &controller_addr); + assert_eq!(flow_rate.next(), Some(pump_flow_rate)); simu.step(); t += Controller::DEFAULT_BREW_TIME; assert_eq!(simu.time(), t); - assert_eq!(flow_rate.take(), Some(0.0)); + assert_eq!(flow_rate.next(), Some(0.0)); } // Check that the tank becomes empty before the completion of the next shot. - simu.send_event(Controller::brew_cmd, (), &controller_addr); + simu.process_event(Controller::brew_cmd, (), &controller_addr); simu.step(); assert!(simu.time() < t + Controller::DEFAULT_BREW_TIME); t = simu.time(); - assert_eq!(flow_rate.take(), Some(0.0)); + assert_eq!(flow_rate.next(), Some(0.0)); // Try to brew another shot while the tank is still empty. - simu.send_event(Controller::brew_cmd, (), &controller_addr); - assert!(flow_rate.take().is_none()); + simu.process_event(Controller::brew_cmd, (), &controller_addr); + assert!(flow_rate.next().is_none()); // Change the brew time and fill up the tank. let brew_time = Duration::new(30, 0); - simu.send_event(Controller::brew_time, brew_time, &controller_addr); - simu.send_event(Tank::fill, 1.0e-3, tank_addr); - simu.send_event(Controller::brew_cmd, (), &controller_addr); - assert_eq!(flow_rate.take(), Some(pump_flow_rate)); + simu.process_event(Controller::brew_time, brew_time, &controller_addr); + simu.process_event(Tank::fill, 1.0e-3, tank_addr); + simu.process_event(Controller::brew_cmd, (), &controller_addr); + assert_eq!(flow_rate.next(), Some(pump_flow_rate)); simu.step(); t += brew_time; assert_eq!(simu.time(), t); - assert_eq!(flow_rate.take(), Some(0.0)); + assert_eq!(flow_rate.next(), Some(0.0)); // Interrupt the brew after 15s by pressing again the brew button. simu.schedule_event( @@ -438,11 +440,11 @@ fn main() { &controller_addr, ) .unwrap(); - simu.send_event(Controller::brew_cmd, (), &controller_addr); - assert_eq!(flow_rate.take(), Some(pump_flow_rate)); + simu.process_event(Controller::brew_cmd, (), &controller_addr); + assert_eq!(flow_rate.next(), Some(pump_flow_rate)); simu.step(); t += Duration::from_secs(15); assert_eq!(simu.time(), t); - assert_eq!(flow_rate.take(), Some(0.0)); + assert_eq!(flow_rate.next(), Some(0.0)); } diff --git a/asynchronix/examples/power_supply.rs b/asynchronix/examples/power_supply.rs index a477912..4b4930a 100644 --- a/asynchronix/examples/power_supply.rs +++ b/asynchronix/examples/power_supply.rs @@ -26,7 +26,8 @@ //! │ ├───────────────────────────────▶ Total power //! └──────────┘ //! ``` -use asynchronix::model::{Model, Output, Requestor}; +use asynchronix::model::Model; +use asynchronix::ports::{EventSlot, Output, Requestor}; use asynchronix::simulation::{Mailbox, SimInit}; use asynchronix::time::MonotonicTime; @@ -124,10 +125,14 @@ fn main() { psu.pwr_out.connect(Load::pwr_in, &load3_mbox); // Model handles for simulation. - let mut psu_power = psu.power.connect_slot().0; - let mut load1_power = load1.power.connect_slot().0; - let mut load2_power = load2.power.connect_slot().0; - let mut load3_power = load3.power.connect_slot().0; + let mut psu_power = EventSlot::new(); + let mut load1_power = EventSlot::new(); + let mut load2_power = EventSlot::new(); + let mut load3_power = EventSlot::new(); + psu.power.connect_sink(&psu_power); + load1.power.connect_sink(&load1_power); + load2.power.connect_sink(&load2_power); + load3.power.connect_sink(&load3_power); let psu_addr = psu_mbox.address(); // Start time (arbitrary since models do not depend on absolute time). @@ -153,14 +158,14 @@ fn main() { // Vary the supply voltage, check the load and power supply consumptions. for voltage in [10.0, 15.0, 20.0] { - simu.send_event(PowerSupply::voltage_setting, voltage, &psu_addr); + simu.process_event(PowerSupply::voltage_setting, voltage, &psu_addr); let v_square = voltage * voltage; - assert!(same_power(load1_power.take().unwrap(), v_square / r1)); - assert!(same_power(load2_power.take().unwrap(), v_square / r2)); - assert!(same_power(load3_power.take().unwrap(), v_square / r3)); + assert!(same_power(load1_power.next().unwrap(), v_square / r1)); + assert!(same_power(load2_power.next().unwrap(), v_square / r2)); + assert!(same_power(load3_power.next().unwrap(), v_square / r3)); assert!(same_power( - psu_power.take().unwrap(), + psu_power.next().unwrap(), v_square * (1.0 / r1 + 1.0 / r2 + 1.0 / r3) )); } diff --git a/asynchronix/examples/stepper_motor.rs b/asynchronix/examples/stepper_motor.rs index c9937db..9f5d764 100644 --- a/asynchronix/examples/stepper_motor.rs +++ b/asynchronix/examples/stepper_motor.rs @@ -18,7 +18,8 @@ use std::future::Future; use std::pin::Pin; use std::time::Duration; -use asynchronix::model::{InitializedModel, Model, Output}; +use asynchronix::model::{InitializedModel, Model}; +use asynchronix::ports::{EventBuffer, Output}; use asynchronix::simulation::{Mailbox, SimInit}; use asynchronix::time::{MonotonicTime, Scheduler}; @@ -200,7 +201,8 @@ fn main() { driver.current_out.connect(Motor::current_in, &motor_mbox); // Model handles for simulation. - let mut position = motor.position.connect_stream().0; + let mut position = EventBuffer::new(); + motor.position.connect_sink(&position); let motor_addr = motor_mbox.address(); let driver_addr = driver_mbox.address(); @@ -258,7 +260,7 @@ fn main() { assert!(position.next().is_none()); // Increase the load beyond the torque limit for a 1A driver current. - simu.send_event(Motor::load, 2.0, &motor_addr); + simu.process_event(Motor::load, 2.0, &motor_addr); // Advance simulation time and check that the motor is blocked. simu.step(); @@ -274,7 +276,7 @@ fn main() { // Decrease the load below the torque limit for a 1A driver current and // advance simulation time. - simu.send_event(Motor::load, 0.5, &motor_addr); + simu.process_event(Motor::load, 0.5, &motor_addr); simu.step(); t += Duration::new(0, 100_000_000); @@ -298,7 +300,7 @@ fn main() { // Now make the motor rotate in the opposite direction. Note that this // driver only accounts for a new PPS at the next pulse. - simu.send_event(Driver::pulse_rate, -10.0, &driver_addr); + simu.process_event(Driver::pulse_rate, -10.0, &driver_addr); simu.step(); t += Duration::new(0, 100_000_000); assert_eq!(simu.time(), t); diff --git a/asynchronix/src/channel.rs b/asynchronix/src/channel.rs index c732477..a1e8a43 100644 --- a/asynchronix/src/channel.rs +++ b/asynchronix/src/channel.rs @@ -8,7 +8,6 @@ use std::error; use std::fmt; use std::future::Future; use std::marker::PhantomData; -use std::num::NonZeroUsize; use std::sync::atomic::{self, AtomicUsize, Ordering}; use std::sync::Arc; @@ -154,7 +153,7 @@ impl Receiver { /// time, but an identifier may be reused after all handles to a channel /// have been dropped. pub(crate) fn channel_id(&self) -> ChannelId { - ChannelId(NonZeroUsize::new(&*self.inner as *const Inner as usize).unwrap()) + ChannelId(&*self.inner as *const Inner as usize) } } @@ -255,8 +254,8 @@ impl Sender { /// All channels are guaranteed to have different identifiers at any given /// time, but an identifier may be reused after all handles to a channel /// have been dropped. - pub(crate) fn channel_id(&self) -> ChannelId { - ChannelId(NonZeroUsize::new(&*self.inner as *const Inner as usize).unwrap()) + pub(crate) fn channel_id(&self) -> usize { + Arc::as_ptr(&self.inner) as usize } } @@ -369,7 +368,7 @@ where /// Unique identifier for a channel. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub(crate) struct ChannelId(NonZeroUsize); +pub(crate) struct ChannelId(usize); impl fmt::Display for ChannelId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { diff --git a/asynchronix/src/channel/queue.rs b/asynchronix/src/channel/queue.rs index 6b1e233..4c4f37b 100644 --- a/asynchronix/src/channel/queue.rs +++ b/asynchronix/src/channel/queue.rs @@ -85,7 +85,7 @@ struct Slot { message: UnsafeCell>, } -/// An fast MPSC queue that stores its items in recyclable boxes. +/// A fast MPSC queue that stores its items in recyclable boxes. /// /// The item may be unsized. /// diff --git a/asynchronix/src/executor.rs b/asynchronix/src/executor.rs index 5eb0ec5..b33a603 100644 --- a/asynchronix/src/executor.rs +++ b/asynchronix/src/executor.rs @@ -88,7 +88,7 @@ pub(crate) struct Executor { active_tasks: Arc>>, /// Parker for the main executor thread. parker: Parker, - /// Join handles of the worker threads. + /// Handles to the worker threads. worker_handles: Vec>, } diff --git a/asynchronix/src/lib.rs b/asynchronix/src/lib.rs index c27bd70..aeaefad 100644 --- a/asynchronix/src/lib.rs +++ b/asynchronix/src/lib.rs @@ -36,18 +36,18 @@ //! //! Models can contain four kinds of ports: //! -//! * _output ports_, which are instances of the [`Output`](model::Output) type +//! * _output ports_, which are instances of the [`Output`](ports::Output) type //! and can be used to broadcast a message, //! * _requestor ports_, which are instances of the -//! [`Requestor`](model::Requestor) type and can be used to broadcast a +//! [`Requestor`](ports::Requestor) type and can be used to broadcast a //! message and receive an iterator yielding the replies from all connected //! replier ports, //! * _input ports_, which are synchronous or asynchronous methods that -//! implement the [`InputFn`](model::InputFn) trait and take an `&mut self` +//! implement the [`InputFn`](ports::InputFn) trait and take an `&mut self` //! argument, a message argument, and an optional //! [`&Scheduler`](time::Scheduler) argument, //! * _replier ports_, which are similar to input ports but implement the -//! [`ReplierFn`](model::ReplierFn) trait and return a reply. +//! [`ReplierFn`](ports::ReplierFn) trait and return a reply. //! //! Messages that are broadcast by an output port to an input port are referred //! to as *events*, while messages exchanged between requestor and replier ports @@ -78,7 +78,8 @@ //! `Multiplier` could be implemented as follows: //! //! ``` -//! use asynchronix::model::{Model, Output}; +//! use asynchronix::model::Model; +//! use asynchronix::ports::Output; //! //! #[derive(Default)] //! pub struct Multiplier { @@ -104,7 +105,8 @@ //! //! ``` //! use std::time::Duration; -//! use asynchronix::model::{Model, Output}; +//! use asynchronix::model::Model; +//! use asynchronix::ports::Output; //! use asynchronix::time::Scheduler; //! //! #[derive(Default)] @@ -166,7 +168,8 @@ //! ``` //! # mod models { //! # use std::time::Duration; -//! # use asynchronix::model::{Model, Output}; +//! # use asynchronix::model::Model; +//! # use asynchronix::ports::Output; //! # use asynchronix::time::Scheduler; //! # #[derive(Default)] //! # pub struct Multiplier { @@ -193,6 +196,7 @@ //! # impl Model for Delay {} //! # } //! use std::time::Duration; +//! use asynchronix::ports::EventSlot; //! use asynchronix::simulation::{Mailbox, SimInit}; //! use asynchronix::time::MonotonicTime; //! @@ -217,7 +221,8 @@ //! delay1.output.connect(Delay::input, &delay2_mbox); //! //! // Keep handles to the system input and output for the simulation. -//! let mut output_slot = delay2.output.connect_slot().0; +//! let mut output_slot = EventSlot::new(); +//! delay2.output.connect_sink(&output_slot); //! let input_address = multiplier1_mbox.address(); //! //! // Pick an arbitrary simulation start time and build the simulation. @@ -239,23 +244,20 @@ //! deadline using for instance //! [`Simulation::step_by()`](simulation::Simulation::step_by). //! 2. by sending events or queries without advancing simulation time, using -//! [`Simulation::send_event()`](simulation::Simulation::send_event) or -//! [`Simulation::send_query()`](simulation::Simulation::send_query), +//! [`Simulation::process_event()`](simulation::Simulation::process_event) or +//! [`Simulation::send_query()`](simulation::Simulation::process_query), //! 3. by scheduling events, using for instance //! [`Simulation::schedule_event()`](simulation::Simulation::schedule_event). //! -//! When a simulation is initialized via -//! [`SimInit::init()`](simulation::SimInit::init) then the simulation will run -//! as fast as possible, without regard for the actual wall clock time. -//! Alternatively, it is possible to initialize a simulation via -//! [`SimInit::init_with_clock()`](simulation::SimInit::init_with_clock) to bind -//! the simulation time to the wall clock time using a custom -//! [`Clock`](time::Clock) type or a readily-available real-time clock such as -//! [`AutoSystemClock`](time::AutoSystemClock). +//! When initialized with the default clock, the simulation will run as fast as +//! possible, without regard for the actual wall clock time. Alternatively, the +//! simulation time can be synchronized to the wall clock time using +//! [`SimInit::set_clock()`](simulation::SimInit::set_clock) and providing a +//! custom [`Clock`](time::Clock) type or a readily-available real-time clock +//! such as [`AutoSystemClock`](time::AutoSystemClock). //! -//! Simulation outputs can be monitored using -//! [`EventSlot`](simulation::EventSlot)s and -//! [`EventStream`](simulation::EventStream)s, which can be connected to any +//! Simulation outputs can be monitored using [`EventSlot`](ports::EventSlot)s +//! and [`EventBuffer`](ports::EventBuffer)s, which can be connected to any //! model's output port. While an event slot only gives access to the last value //! sent from a port, an event stream is an iterator that yields all events that //! were sent in first-in-first-out order. @@ -266,7 +268,8 @@ //! ``` //! # mod models { //! # use std::time::Duration; -//! # use asynchronix::model::{Model, Output}; +//! # use asynchronix::model::Model; +//! # use asynchronix::ports::Output; //! # use asynchronix::time::Scheduler; //! # #[derive(Default)] //! # pub struct Multiplier { @@ -293,6 +296,7 @@ //! # impl Model for Delay {} //! # } //! # use std::time::Duration; +//! # use asynchronix::ports::EventSlot; //! # use asynchronix::simulation::{Mailbox, SimInit}; //! # use asynchronix::time::MonotonicTime; //! # use models::{Delay, Multiplier}; @@ -308,7 +312,8 @@ //! # multiplier1.output.connect(Multiplier::input, &multiplier2_mbox); //! # multiplier2.output.connect(Delay::input, &delay2_mbox); //! # delay1.output.connect(Delay::input, &delay2_mbox); -//! # let mut output_slot = delay2.output.connect_slot().0; +//! # let mut output_slot = EventSlot::new(); +//! # delay2.output.connect_sink(&output_slot); //! # let input_address = multiplier1_mbox.address(); //! # let t0 = MonotonicTime::EPOCH; //! # let mut simu = SimInit::new() @@ -318,21 +323,21 @@ //! # .add_model(delay2, delay2_mbox) //! # .init(t0); //! // Send a value to the first multiplier. -//! simu.send_event(Multiplier::input, 21.0, &input_address); +//! simu.process_event(Multiplier::input, 21.0, &input_address); //! //! // The simulation is still at t0 so nothing is expected at the output of the //! // second delay gate. -//! assert!(output_slot.take().is_none()); +//! assert!(output_slot.next().is_none()); //! //! // Advance simulation time until the next event and check the time and output. //! simu.step(); //! assert_eq!(simu.time(), t0 + Duration::from_secs(1)); -//! assert_eq!(output_slot.take(), Some(84.0)); +//! assert_eq!(output_slot.next(), Some(84.0)); //! //! // Get the answer to the ultimate question of life, the universe & everything. //! simu.step(); //! assert_eq!(simu.time(), t0 + Duration::from_secs(2)); -//! assert_eq!(output_slot.take(), Some(42.0)); +//! assert_eq!(output_slot.next(), Some(42.0)); //! ``` //! //! # Message ordering guarantees @@ -406,6 +411,9 @@ pub(crate) mod executor; mod loom_exports; pub(crate) mod macros; pub mod model; +pub mod ports; +#[cfg(feature = "rpc")] +pub mod rpc; pub mod simulation; pub mod time; pub(crate) mod util; diff --git a/asynchronix/src/model.rs b/asynchronix/src/model.rs index e5ea5d1..4fecf87 100644 --- a/asynchronix/src/model.rs +++ b/asynchronix/src/model.rs @@ -65,8 +65,9 @@ //! ### Output and requestor ports //! //! Output and requestor ports can be added to a model using composition, adding -//! [`Output`] and [`Requestor`] objects as members. They are parametrized by -//! the event, request and reply types. +//! [`Output`](crate::ports::Output) and [`Requestor`](crate::ports::Requestor) +//! objects as members. They are parametrized by the event, request and reply +//! types. //! //! Models are expected to expose their output and requestor ports as public //! members so they can be connected to input and replier ports when assembling @@ -75,7 +76,8 @@ //! #### Example //! //! ``` -//! use asynchronix::model::{Model, Output, Requestor}; +//! use asynchronix::model::Model; +//! use asynchronix::ports::{Output, Requestor}; //! //! pub struct MyModel { //! pub my_output: Output, @@ -90,9 +92,9 @@ //! //! ### Input and replier ports //! -//! Input ports and replier ports are methods that implement the [`InputFn`] or -//! [`ReplierFn`] traits with appropriate bounds on their argument and return -//! types. +//! Input ports and replier ports are methods that implement the +//! [`InputFn`](crate::ports::InputFn) or [`ReplierFn`](crate::ports::ReplierFn) +//! traits with appropriate bounds on their argument and return types. //! //! In practice, an input port method for an event of type `T` may have any of //! the following signatures, where the futures returned by the `async` variants @@ -132,7 +134,7 @@ //! can be connected to input and requestor ports when assembling the simulation //! bench. However, input ports may instead be defined as private methods if //! they are only used by the model itself to schedule future actions (see the -//! [`Scheduler`](crate::time::Scheduler) examples). +//! [`Scheduler`] examples). //! //! Changing the signature of an input or replier port is not considered to //! alter the public interface of a model provided that the event, request and @@ -165,13 +167,6 @@ use std::pin::Pin; use crate::time::Scheduler; -pub use model_fn::{InputFn, ReplierFn}; -pub use ports::{LineError, LineId, Output, Requestor}; - -pub mod markers; -mod model_fn; -mod ports; - /// Trait to be implemented by all models. /// /// This trait enables models to perform specific actions in the diff --git a/asynchronix/src/ports.rs b/asynchronix/src/ports.rs new file mode 100644 index 0000000..f7ae6d6 --- /dev/null +++ b/asynchronix/src/ports.rs @@ -0,0 +1,35 @@ +//! Model ports for event and query broadcasting. +//! +//! Models typically contain [`Output`] and/or [`Requestor`] ports, exposed as +//! public member variables. Output ports broadcast events to all connected +//! input ports, while requestor ports broadcast queries to, and retrieve +//! replies from, all connected replier ports. +//! +//! On the surface, output and requestor ports only differ in that sending a +//! query from a requestor port also returns an iterator over the replies from +//! all connected ports. Sending a query is more costly, however, because of the +//! need to wait until all connected models have processed the query. In +//! contrast, since events are buffered in the mailbox of the target model, +//! sending an event is a fire-and-forget operation. For this reason, output +//! ports should generally be preferred over requestor ports when possible. + +mod input; +mod output; +mod sink; +mod source; + +pub use input::markers; +pub use input::{InputFn, ReplierFn}; +pub use output::{Output, Requestor}; +pub use sink::{ + event_buffer::EventBuffer, event_slot::EventSlot, EventSink, EventSinkStream, EventSinkWriter, +}; +pub use source::{EventSource, QuerySource, ReplyReceiver}; + +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +/// Unique identifier for a connection between two ports. +pub struct LineId(u64); + +/// Error raised when the specified line cannot be found. +#[derive(Copy, Clone, Debug)] +pub struct LineError {} diff --git a/asynchronix/src/ports/input.rs b/asynchronix/src/ports/input.rs new file mode 100644 index 0000000..224863c --- /dev/null +++ b/asynchronix/src/ports/input.rs @@ -0,0 +1,4 @@ +pub mod markers; +mod model_fn; + +pub use model_fn::{InputFn, ReplierFn}; diff --git a/asynchronix/src/model/markers.rs b/asynchronix/src/ports/input/markers.rs similarity index 100% rename from asynchronix/src/model/markers.rs rename to asynchronix/src/ports/input/markers.rs diff --git a/asynchronix/src/model/model_fn.rs b/asynchronix/src/ports/input/model_fn.rs similarity index 99% rename from asynchronix/src/model/model_fn.rs rename to asynchronix/src/ports/input/model_fn.rs index 2d0b618..5ace206 100644 --- a/asynchronix/src/model/model_fn.rs +++ b/asynchronix/src/ports/input/model_fn.rs @@ -2,9 +2,11 @@ use std::future::{ready, Future, Ready}; -use crate::model::{markers, Model}; +use crate::model::Model; use crate::time::Scheduler; +use super::markers; + /// A function, method or closures that can be used as an *input port*. /// /// This trait is in particular implemented for any function or method with the diff --git a/asynchronix/src/model/ports.rs b/asynchronix/src/ports/output.rs similarity index 59% rename from asynchronix/src/model/ports.rs rename to asynchronix/src/ports/output.rs index f296f16..5f60e8a 100644 --- a/asynchronix/src/model/ports.rs +++ b/asynchronix/src/ports/output.rs @@ -1,35 +1,16 @@ -//! Model ports for event and query broadcasting. -//! -//! Models typically contain [`Output`] and/or [`Requestor`] ports, exposed as -//! public member variables. Output ports broadcast events to all connected -//! input ports, while requestor ports broadcast queries to, and retrieve -//! replies from, all connected replier ports. -//! -//! On the surface, output and requestor ports only differ in that sending a -//! query from a requestor port also returns an iterator over the replies from -//! all connected ports. Sending a query is more costly, however, because of the -//! need to wait until all connected models have processed the query. In -//! contrast, since events are buffered in the mailbox of the target model, -//! sending an event is a fire-and-forget operation. For this reason, output -//! ports should generally be preferred over requestor ports when possible. - -use std::fmt; -use std::sync::{Arc, Mutex}; - mod broadcaster; mod sender; -use crate::model::{InputFn, Model, ReplierFn}; -use crate::simulation::{Address, EventSlot, EventStream}; -use crate::util::spsc_queue; +use std::fmt; -use broadcaster::Broadcaster; +use crate::model::Model; +use crate::ports::{EventSink, LineError, LineId}; +use crate::ports::{InputFn, ReplierFn}; +use crate::simulation::Address; -use self::sender::{EventSender, EventSlotSender, EventStreamSender, QuerySender}; +use broadcaster::{EventBroadcaster, QueryBroadcaster}; -#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] -/// Unique identifier for a connection between two ports. -pub struct LineId(u64); +use self::sender::{EventSinkSender, InputSender, ReplierSender}; /// An output port. /// @@ -37,7 +18,7 @@ pub struct LineId(u64); /// methods that return no value. They broadcast events to all connected input /// ports. pub struct Output { - broadcaster: Broadcaster, + broadcaster: EventBroadcaster, next_line_id: u64, } @@ -56,53 +37,37 @@ impl Output { pub fn connect(&mut self, input: F, address: impl Into>) -> LineId where M: Model, - F: for<'a> InputFn<'a, M, T, S> + Copy, + F: for<'a> InputFn<'a, M, T, S> + Clone, S: Send + 'static, { assert!(self.next_line_id != u64::MAX); let line_id = LineId(self.next_line_id); self.next_line_id += 1; - let sender = Box::new(EventSender::new(input, address.into().0)); + let sender = Box::new(InputSender::new(input, address.into().0)); self.broadcaster.add(sender, line_id); line_id } - /// Adds a connection to an event stream iterator. - pub fn connect_stream(&mut self) -> (EventStream, LineId) { + /// Adds a connection to an event sink such as an + /// [`EventSlot`](crate::ports::EventSlot) or + /// [`EventBuffer`](crate::ports::EventBuffer). + pub fn connect_sink>(&mut self, sink: &S) -> LineId { assert!(self.next_line_id != u64::MAX); let line_id = LineId(self.next_line_id); self.next_line_id += 1; - - let (producer, consumer) = spsc_queue::spsc_queue(); - let sender = Box::new(EventStreamSender::new(producer)); - let event_stream = EventStream::new(consumer); - + let sender = Box::new(EventSinkSender::new(sink.writer())); self.broadcaster.add(sender, line_id); - (event_stream, line_id) - } - - /// Adds a connection to an event slot. - pub fn connect_slot(&mut self) -> (EventSlot, LineId) { - assert!(self.next_line_id != u64::MAX); - let line_id = LineId(self.next_line_id); - self.next_line_id += 1; - - let slot = Arc::new(Mutex::new(None)); - let sender = Box::new(EventSlotSender::new(slot.clone())); - let event_slot = EventSlot::new(slot); - - self.broadcaster.add(sender, line_id); - - (event_slot, line_id) + line_id } /// Removes the connection specified by the `LineId` parameter. /// - /// It is a logic error to specify a line identifier from another [`Output`] - /// or [`Requestor`] instance and may result in the disconnection of an - /// arbitrary endpoint. + /// It is a logic error to specify a line identifier from another + /// [`Output`], [`Requestor`], [`EventSource`](crate::ports::EventSource) or + /// [`QuerySource`](crate::ports::QuerySource) instance and may result in + /// the disconnection of an arbitrary endpoint. pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> { if self.broadcaster.remove(line_id) { Ok(()) @@ -118,14 +83,14 @@ impl Output { /// Broadcasts an event to all connected input ports. pub async fn send(&mut self, arg: T) { - self.broadcaster.broadcast_event(arg).await.unwrap(); + self.broadcaster.broadcast(arg).await.unwrap(); } } impl Default for Output { fn default() -> Self { Self { - broadcaster: Broadcaster::default(), + broadcaster: EventBroadcaster::default(), next_line_id: 0, } } @@ -143,7 +108,7 @@ impl fmt::Debug for Output { /// model methods that return a value. They broadcast queries to all connected /// replier ports. pub struct Requestor { - broadcaster: Broadcaster, + broadcaster: QueryBroadcaster, next_line_id: u64, } @@ -162,13 +127,13 @@ impl Requestor { pub fn connect(&mut self, replier: F, address: impl Into>) -> LineId where M: Model, - F: for<'a> ReplierFn<'a, M, T, R, S> + Copy, + F: for<'a> ReplierFn<'a, M, T, R, S> + Clone, S: Send + 'static, { assert!(self.next_line_id != u64::MAX); let line_id = LineId(self.next_line_id); self.next_line_id += 1; - let sender = Box::new(QuerySender::new(replier, address.into().0)); + let sender = Box::new(ReplierSender::new(replier, address.into().0)); self.broadcaster.add(sender, line_id); line_id @@ -176,9 +141,10 @@ impl Requestor { /// Removes the connection specified by the `LineId` parameter. /// - /// It is a logic error to specify a line identifier from another [`Output`] - /// or [`Requestor`] instance and may result in the disconnection of an - /// arbitrary endpoint. + /// It is a logic error to specify a line identifier from another + /// [`Requestor`], [`Output`], [`EventSource`](crate::ports::EventSource) or + /// [`QuerySource`](crate::ports::QuerySource) instance and may result in + /// the disconnection of an arbitrary endpoint. pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> { if self.broadcaster.remove(line_id) { Ok(()) @@ -194,14 +160,14 @@ impl Requestor { /// Broadcasts a query to all connected replier ports. pub async fn send(&mut self, arg: T) -> impl Iterator + '_ { - self.broadcaster.broadcast_query(arg).await.unwrap() + self.broadcaster.broadcast(arg).await.unwrap() } } impl Default for Requestor { fn default() -> Self { Self { - broadcaster: Broadcaster::default(), + broadcaster: QueryBroadcaster::default(), next_line_id: 0, } } @@ -212,7 +178,3 @@ impl fmt::Debug for Requestor { +/// with an asynchronous iterator (a.k.a. async stream). +pub(super) struct BroadcasterInner { /// The list of senders with their associated line identifier. senders: Vec<(LineId, Box>)>, /// Fields explicitly borrowed by the `BroadcastFuture`. shared: Shared, } -impl Broadcaster { - /// Broadcasts an event to all addresses. - pub(super) async fn broadcast_event(&mut self, arg: T) -> Result<(), BroadcastError> { - match self.senders.as_mut_slice() { - // No sender. - [] => Ok(()), - // One sender. - [sender] => sender.1.send(arg).await.map_err(|_| BroadcastError {}), - // Multiple senders. - _ => self.broadcast(arg).await, - } - } -} - -impl Broadcaster { +impl BroadcasterInner { /// Adds a new sender associated to the specified identifier. /// /// # Panics @@ -93,55 +77,25 @@ impl Broadcaster { self.senders.len() } - /// Broadcasts a query to all addresses and collect all responses. - pub(super) async fn broadcast_query( - &mut self, - arg: T, - ) -> Result + '_, BroadcastError> { - match self.senders.as_mut_slice() { - // No sender. - [] => {} - // One sender. - [sender] => { - let output = sender.1.send(arg).await.map_err(|_| BroadcastError {})?; - self.shared.futures_env[0].output = Some(output); - } - // Multiple senders. - _ => self.broadcast(arg).await?, - }; - - // At this point all outputs should be available so `unwrap` can be - // called on the output of each future. - let outputs = self - .shared - .futures_env - .iter_mut() - .map(|t| t.output.take().unwrap()); - - Ok(outputs) - } - /// Efficiently broadcasts a message or a query to multiple addresses. /// /// This method does not collect the responses from queries. fn broadcast(&mut self, arg: T) -> BroadcastFuture<'_, R> { - let futures_count = self.senders.len(); let mut futures = recycle_vec(self.shared.storage.take().unwrap_or_default()); // Broadcast the message and collect all futures. - for (i, (sender, futures_env)) in self + let mut iter = self .senders .iter_mut() - .zip(self.shared.futures_env.iter_mut()) - .enumerate() - { + .zip(self.shared.futures_env.iter_mut()); + while let Some((sender, futures_env)) = iter.next() { let future_cache = futures_env .storage .take() .unwrap_or_else(|| RecycleBox::new(())); // Move the argument rather than clone it for the last future. - if i + 1 == futures_count { + if iter.len() == 0 { let future: RecycleBox> + Send + '_> = coerce_box!(RecycleBox::recycle(future_cache, sender.1.send(arg))); @@ -161,7 +115,7 @@ impl Broadcaster { } } -impl Default for Broadcaster { +impl Default for BroadcasterInner { /// Creates an empty `Broadcaster` object. fn default() -> Self { let wake_sink = WakeSink::new(); @@ -179,6 +133,145 @@ impl Default for Broadcaster { } } +/// An object that can efficiently broadcast events to several input ports. +/// +/// This is very similar to `source::broadcaster::EventBroadcaster`, but +/// generates non-owned futures instead. +/// +/// See `BroadcasterInner` for implementation details. +pub(super) struct EventBroadcaster { + /// The broadcaster core object. + inner: BroadcasterInner, +} + +impl EventBroadcaster { + /// Adds a new sender associated to the specified identifier. + /// + /// # Panics + /// + /// This method will panic if the total count of senders would reach + /// `u32::MAX - 1`. + pub(super) fn add(&mut self, sender: Box>, id: LineId) { + self.inner.add(sender, id); + } + + /// Removes the first sender with the specified identifier, if any. + /// + /// Returns `true` if there was indeed a sender associated to the specified + /// identifier. + pub(super) fn remove(&mut self, id: LineId) -> bool { + self.inner.remove(id) + } + + /// Removes all senders. + pub(super) fn clear(&mut self) { + self.inner.clear(); + } + + /// Returns the number of connected senders. + pub(super) fn len(&self) -> usize { + self.inner.len() + } + + /// Broadcasts an event to all addresses. + pub(super) async fn broadcast(&mut self, arg: T) -> Result<(), BroadcastError> { + match self.inner.senders.as_mut_slice() { + // No sender. + [] => Ok(()), + // One sender. + [sender] => sender.1.send(arg).await.map_err(|_| BroadcastError {}), + // Multiple senders. + _ => self.inner.broadcast(arg).await, + } + } +} + +impl Default for EventBroadcaster { + fn default() -> Self { + Self { + inner: BroadcasterInner::default(), + } + } +} + +/// An object that can efficiently broadcast queries to several replier ports. +/// +/// This is very similar to `source::broadcaster::QueryBroadcaster`, but +/// generates non-owned futures instead. +/// +/// See `BroadcasterInner` for implementation details. +pub(super) struct QueryBroadcaster { + /// The broadcaster core object. + inner: BroadcasterInner, +} + +impl QueryBroadcaster { + /// Adds a new sender associated to the specified identifier. + /// + /// # Panics + /// + /// This method will panic if the total count of senders would reach + /// `u32::MAX - 1`. + pub(super) fn add(&mut self, sender: Box>, id: LineId) { + self.inner.add(sender, id); + } + + /// Removes the first sender with the specified identifier, if any. + /// + /// Returns `true` if there was indeed a sender associated to the specified + /// identifier. + pub(super) fn remove(&mut self, id: LineId) -> bool { + self.inner.remove(id) + } + + /// Removes all senders. + pub(super) fn clear(&mut self) { + self.inner.clear(); + } + + /// Returns the number of connected senders. + pub(super) fn len(&self) -> usize { + self.inner.len() + } + + /// Broadcasts a query to all addresses and collect all responses. + pub(super) async fn broadcast( + &mut self, + arg: T, + ) -> Result + '_, BroadcastError> { + match self.inner.senders.as_mut_slice() { + // No sender. + [] => {} + // One sender. + [sender] => { + let output = sender.1.send(arg).await.map_err(|_| BroadcastError {})?; + self.inner.shared.futures_env[0].output = Some(output); + } + // Multiple senders. + _ => self.inner.broadcast(arg).await?, + }; + + // At this point all outputs should be available so `unwrap` can be + // called on the output of each future. + let outputs = self + .inner + .shared + .futures_env + .iter_mut() + .map(|t| t.output.take().unwrap()); + + Ok(outputs) + } +} + +impl Default for QueryBroadcaster { + fn default() -> Self { + Self { + inner: BroadcasterInner::default(), + } + } +} + /// Data related to a sender future. struct FutureEnv { /// Cached storage for the future. @@ -212,8 +305,6 @@ struct Shared { /// /// - the sender futures are polled simultaneously rather than waiting for their /// completion in a sequential manner, -/// - this future is never woken if it can be proven that at least one of the -/// individual sender task will still be awaken, /// - the storage allocated for the sender futures is always returned to the /// `Broadcast` object so it can be reused by the next future, /// - the happy path (all futures immediately ready) is very fast. @@ -231,9 +322,9 @@ pub(super) struct BroadcastFuture<'a, R> { impl<'a, R> BroadcastFuture<'a, R> { /// Creates a new `BroadcastFuture`. fn new(shared: &'a mut Shared, futures: Vec>>) -> Self { - let futures_count = futures.len(); + let pending_futures_count = futures.len(); - assert!(shared.futures_env.len() == futures_count); + assert!(shared.futures_env.len() == pending_futures_count); for futures_env in shared.futures_env.iter_mut() { // Drop the previous output if necessary. @@ -244,7 +335,7 @@ impl<'a, R> BroadcastFuture<'a, R> { shared, futures: ManuallyDrop::new(futures), state: FutureState::Uninit, - pending_futures_count: futures_count, + pending_futures_count, } } } @@ -276,7 +367,10 @@ impl<'a, R> Future for BroadcastFuture<'a, R> { // Poll all sender futures once if this is the first time the broadcast // future is polled. if this.state == FutureState::Uninit { - // Prevent spurious wake-ups. + // The task set is re-used for each broadcast, so it may have some + // task scheduled due to e.g. spurious wake-ups that were triggered + // after the previous broadcast was completed. Discarding scheduled + // tasks can prevent unnecessary wake-ups. this.shared.task_set.discard_scheduled(); for task_idx in 0..this.futures.len() { @@ -311,20 +405,22 @@ impl<'a, R> Future for BroadcastFuture<'a, R> { // Repeatedly poll the futures of all scheduled tasks until there are no // more scheduled tasks. loop { - // Only register the waker if it is probable that we won't find any - // scheduled task. + // No need to register the waker if some tasks have been scheduled. if !this.shared.task_set.has_scheduled() { this.shared.wake_sink.register(cx.waker()); } // Retrieve the indices of the scheduled tasks if any. If there are // no scheduled tasks, `Poll::Pending` is returned and this future - // will be awaken again when enough tasks have been scheduled. - let scheduled_tasks = match this - .shared - .task_set - .steal_scheduled(this.pending_futures_count) - { + // will be awaken again when enough tasks have been awaken. + // + // NOTE: the current implementation requires a notification to be + // sent each time a sub-future has made progress. We may try at some + // point to benchmark an alternative strategy where a notification + // is requested only when all pending sub-futures have made progress, + // using `take_scheduled(this.pending_futures_count)`. This would + // reduce the cost of context switch but could hurt latency. + let scheduled_tasks = match this.shared.task_set.take_scheduled(1) { Some(st) => st, None => return Poll::Pending, }; @@ -403,6 +499,7 @@ fn recycle_vec(mut v: Vec) -> Vec { #[cfg(all(test, not(asynchronix_loom)))] mod tests { use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::{Arc, Mutex}; use std::thread; use futures_executor::block_on; @@ -413,8 +510,9 @@ mod tests { use crate::util::priority_queue::PriorityQueue; use crate::util::sync_cell::SyncCell; - use super::super::*; + use super::super::sender::{InputSender, ReplierSender}; use super::*; + use crate::model::Model; struct Counter { inner: Arc, @@ -438,18 +536,18 @@ mod tests { const N_RECV: usize = 4; let mut mailboxes = Vec::new(); - let mut broadcaster = Broadcaster::default(); + let mut broadcaster = EventBroadcaster::default(); for id in 0..N_RECV { let mailbox = Receiver::new(10); let address = mailbox.sender(); - let sender = Box::new(EventSender::new(Counter::inc, address)); + let sender = Box::new(InputSender::new(Counter::inc, address)); broadcaster.add(sender, LineId(id as u64)); mailboxes.push(mailbox); } let th_broadcast = thread::spawn(move || { - block_on(broadcaster.broadcast_event(1)).unwrap(); + block_on(broadcaster.broadcast(1)).unwrap(); }); let counter = Arc::new(AtomicUsize::new(0)); @@ -486,18 +584,18 @@ mod tests { const N_RECV: usize = 4; let mut mailboxes = Vec::new(); - let mut broadcaster = Broadcaster::default(); + let mut broadcaster = QueryBroadcaster::default(); for id in 0..N_RECV { let mailbox = Receiver::new(10); let address = mailbox.sender(); - let sender = Box::new(QuerySender::new(Counter::fetch_inc, address)); + let sender = Box::new(ReplierSender::new(Counter::fetch_inc, address)); broadcaster.add(sender, LineId(id as u64)); mailboxes.push(mailbox); } let th_broadcast = thread::spawn(move || { - let iter = block_on(broadcaster.broadcast_query(1)).unwrap(); + let iter = block_on(broadcaster.broadcast(1)).unwrap(); let sum = iter.fold(0, |acc, val| acc + val); assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)} @@ -606,12 +704,12 @@ mod tests { let (test_event2, waker2) = test_event::(); let (test_event3, waker3) = test_event::(); - let mut broadcaster = Broadcaster::default(); + let mut broadcaster = QueryBroadcaster::default(); broadcaster.add(Box::new(test_event1), LineId(1)); broadcaster.add(Box::new(test_event2), LineId(2)); broadcaster.add(Box::new(test_event3), LineId(3)); - let mut fut = Box::pin(broadcaster.broadcast_query(())); + let mut fut = Box::pin(broadcaster.broadcast(())); let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false)); let is_scheduled_waker = is_scheduled.clone(); @@ -626,7 +724,6 @@ mod tests { let th2 = thread::spawn(move || waker2.wake_final(7)); let th3 = thread::spawn(move || waker3.wake_final(42)); - let mut schedule_count = 0; loop { match fut.as_mut().poll(&mut cx) { Poll::Ready(Ok(mut res)) => { @@ -645,8 +742,6 @@ mod tests { if !is_scheduled.swap(false, Ordering::Acquire) { break; } - schedule_count += 1; - assert!(schedule_count <= 1); } th1.join().unwrap(); @@ -681,11 +776,11 @@ mod tests { let (test_event1, waker1) = test_event::(); let (test_event2, waker2) = test_event::(); - let mut broadcaster = Broadcaster::default(); + let mut broadcaster = QueryBroadcaster::default(); broadcaster.add(Box::new(test_event1), LineId(1)); broadcaster.add(Box::new(test_event2), LineId(2)); - let mut fut = Box::pin(broadcaster.broadcast_query(())); + let mut fut = Box::pin(broadcaster.broadcast(())); let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false)); let is_scheduled_waker = is_scheduled.clone(); @@ -701,7 +796,6 @@ mod tests { let th2 = thread::spawn(move || waker2.wake_final(7)); let th_spurious = thread::spawn(move || spurious_waker.wake_spurious()); - let mut schedule_count = 0; loop { match fut.as_mut().poll(&mut cx) { Poll::Ready(Ok(mut res)) => { @@ -719,8 +813,6 @@ mod tests { if !is_scheduled.swap(false, Ordering::Acquire) { break; } - schedule_count += 1; - assert!(schedule_count <= 2); } th1.join().unwrap(); diff --git a/asynchronix/src/model/ports/sender.rs b/asynchronix/src/ports/output/sender.rs similarity index 73% rename from asynchronix/src/model/ports/sender.rs rename to asynchronix/src/ports/output/sender.rs index d5dc2a2..88cddfa 100644 --- a/asynchronix/src/model/ports/sender.rs +++ b/asynchronix/src/ports/output/sender.rs @@ -4,22 +4,28 @@ use std::future::Future; use std::marker::PhantomData; use std::mem::ManuallyDrop; use std::pin::Pin; -use std::sync::{Arc, Mutex}; use std::task::{Context, Poll}; use recycle_box::{coerce_box, RecycleBox}; use crate::channel; -use crate::model::{InputFn, Model, ReplierFn}; -use crate::util::spsc_queue; +use crate::model::Model; +use crate::ports::{EventSinkWriter, InputFn, ReplierFn}; -/// Abstraction over `EventSender` and `QuerySender`. +/// An event or query sender abstracting over the target model and input or +/// replier method. pub(super) trait Sender: Send { + /// Asynchronously send the event or request. fn send(&mut self, arg: T) -> RecycledFuture<'_, Result>; } -/// An object that can send a payload to a model. -pub(super) struct EventSender { +/// An object that can send events to an input port. +pub(super) struct InputSender +where + M: Model, + F: for<'a> InputFn<'a, M, T, S>, + T: Send + 'static, +{ func: F, sender: channel::Sender, fut_storage: Option>, @@ -27,7 +33,7 @@ pub(super) struct EventSender { _phantom_closure_marker: PhantomData, } -impl EventSender +impl InputSender where M: Model, F: for<'a> InputFn<'a, M, T, S>, @@ -44,15 +50,15 @@ where } } -impl Sender for EventSender +impl Sender for InputSender where M: Model, - F: for<'a> InputFn<'a, M, T, S> + Copy, + F: for<'a> InputFn<'a, M, T, S> + Clone, T: Send + 'static, - S: Send, + S: Send + 'static, { fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> { - let func = self.func; + let func = self.func.clone(); let fut = self.sender.send(move |model, scheduler, recycle_box| { let fut = func.call(model, arg, scheduler); @@ -66,8 +72,8 @@ where } } -/// An object that can send a payload to a model and retrieve a response. -pub(super) struct QuerySender { +/// An object that can send a request to a replier port and retrieve a response. +pub(super) struct ReplierSender { func: F, sender: channel::Sender, receiver: multishot::Receiver, @@ -76,7 +82,7 @@ pub(super) struct QuerySender { _phantom_closure_marker: PhantomData, } -impl QuerySender +impl ReplierSender where M: Model, F: for<'a> ReplierFn<'a, M, T, R, S>, @@ -95,16 +101,16 @@ where } } -impl Sender for QuerySender +impl Sender for ReplierSender where M: Model, - F: for<'a> ReplierFn<'a, M, T, R, S> + Copy, + F: for<'a> ReplierFn<'a, M, T, R, S> + Clone, T: Send + 'static, R: Send + 'static, S: Send, { fn send(&mut self, arg: T) -> RecycledFuture<'_, Result> { - let func = self.func; + let func = self.func.clone(); let sender = &mut self.sender; let reply_receiver = &mut self.receiver; let fut_storage = &mut self.fut_storage; @@ -134,67 +140,40 @@ where } } -/// An object that can send a payload to an unbounded queue. -pub(super) struct EventStreamSender { - producer: spsc_queue::Producer, +/// An object that can send a payload to an event sink. +pub(super) struct EventSinkSender> { + writer: W, fut_storage: Option>, + _phantom_event: PhantomData, } -impl EventStreamSender { - pub(super) fn new(producer: spsc_queue::Producer) -> Self { +impl> EventSinkSender { + pub(super) fn new(writer: W) -> Self { Self { - producer, + writer, fut_storage: None, + _phantom_event: PhantomData, } } } -impl Sender for EventStreamSender +impl> Sender for EventSinkSender where T: Send + 'static, { fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> { - let producer = &mut self.producer; + let writer = &mut self.writer; RecycledFuture::new(&mut self.fut_storage, async move { - producer.push(arg).map_err(|_| SendError {}) - }) - } -} - -/// An object that can send a payload to a mutex-protected slot. -pub(super) struct EventSlotSender { - slot: Arc>>, - fut_storage: Option>, -} - -impl EventSlotSender { - pub(super) fn new(slot: Arc>>) -> Self { - Self { - slot, - fut_storage: None, - } - } -} - -impl Sender for EventSlotSender -where - T: Send + 'static, -{ - fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> { - let slot = &*self.slot; - - RecycledFuture::new(&mut self.fut_storage, async move { - let mut slot = slot.lock().unwrap(); - *slot = Some(arg); + writer.write(arg); Ok(()) }) } } -#[derive(Debug, PartialEq, Eq, Clone, Copy)] /// Error returned when the mailbox was closed or dropped. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] pub(super) struct SendError {} impl fmt::Display for SendError { diff --git a/asynchronix/src/ports/sink.rs b/asynchronix/src/ports/sink.rs new file mode 100644 index 0000000..639ed4c --- /dev/null +++ b/asynchronix/src/ports/sink.rs @@ -0,0 +1,54 @@ +pub(crate) mod event_buffer; +pub(crate) mod event_slot; + +/// A simulation endpoint that can receive events sent by model outputs. +/// +/// An `EventSink` can be thought of as a self-standing input meant to +/// externally monitor the simulated system. +pub trait EventSink { + /// Writer handle to an event sink. + type Writer: EventSinkWriter; + + /// Returns the writer handle associated to this sink. + fn writer(&self) -> Self::Writer; +} + +/// A writer handle to an event sink. +pub trait EventSinkWriter: Send + Sync + 'static { + /// Writes a value to the associated sink. + fn write(&self, event: T); +} + +/// An iterator over collected events with the ability to pause and resume event +/// collection. +/// +/// An `EventSinkStream` will typically be implemented on an `EventSink` for +/// which it will constitute a draining iterator. +pub trait EventSinkStream: Iterator { + /// Starts or resumes the collection of new events. + fn open(&mut self); + + /// Pauses the collection of new events. + /// + /// Events that were previously in the stream remain available. + fn close(&mut self); + + /// This is a stop-gap method that shadows `Iterator::try_fold` until the + /// latter can be implemented by user-defined types on stable Rust. + /// + /// It serves the exact same purpose as `Iterator::try_fold` but is + /// specialized for `Result` to avoid depending on the unstable `Try` trait. + /// + /// Implementors may elect to override the default implementation when the + /// event sink stream can be iterated over more rapidly than by repeatably + /// calling `Iterator::next`, for instance if the implementation of the + /// stream relies on a mutex that must be locked on each call. + #[doc(hidden)] + fn try_fold(&mut self, init: B, f: F) -> Result + where + Self: Sized, + F: FnMut(B, Self::Item) -> Result, + { + Iterator::try_fold(self, init, f) + } +} diff --git a/asynchronix/src/ports/sink/event_buffer.rs b/asynchronix/src/ports/sink/event_buffer.rs new file mode 100644 index 0000000..15b89b0 --- /dev/null +++ b/asynchronix/src/ports/sink/event_buffer.rs @@ -0,0 +1,138 @@ +use std::collections::VecDeque; +use std::fmt; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex}; + +use super::{EventSink, EventSinkStream, EventSinkWriter}; + +/// The shared data of an `EventBuffer`. +struct Inner { + capacity: usize, + is_open: AtomicBool, + buffer: Mutex>, +} + +/// An [`EventSink`] and [`EventSinkStream`] with a bounded size. +/// +/// If the maximum capacity is exceeded, older events are overwritten. Events +/// are returned in first-in-first-out order. Note that even if the iterator +/// returns `None`, it may still produce more items in the future (in other +/// words, it is not a [`FusedIterator`](std::iter::FusedIterator)). +pub struct EventBuffer { + inner: Arc>, +} + +impl EventBuffer { + /// Default capacity when constructed with `new`. + pub const DEFAULT_CAPACITY: usize = 16; + + /// Creates an open `EventBuffer` with the default capacity. + pub fn new() -> Self { + Self::with_capacity(Self::DEFAULT_CAPACITY) + } + + /// Creates a closed `EventBuffer` with the default capacity. + pub fn new_closed() -> Self { + Self::with_capacity_closed(Self::DEFAULT_CAPACITY) + } + + /// Creates an open `EventBuffer` with the specified capacity. + pub fn with_capacity(capacity: usize) -> Self { + Self { + inner: Arc::new(Inner { + capacity, + is_open: AtomicBool::new(true), + buffer: Mutex::new(VecDeque::new()), + }), + } + } + + /// Creates a closed `EventBuffer` with the specified capacity. + pub fn with_capacity_closed(capacity: usize) -> Self { + Self { + inner: Arc::new(Inner { + capacity, + is_open: AtomicBool::new(false), + buffer: Mutex::new(VecDeque::new()), + }), + } + } +} + +impl EventSink for EventBuffer { + type Writer = EventBufferWriter; + + fn writer(&self) -> Self::Writer { + EventBufferWriter { + inner: self.inner.clone(), + } + } +} + +impl Iterator for EventBuffer { + type Item = T; + + fn next(&mut self) -> Option { + self.inner.buffer.lock().unwrap().pop_front() + } +} + +impl EventSinkStream for EventBuffer { + fn open(&mut self) { + self.inner.is_open.store(true, Ordering::Relaxed); + } + + fn close(&mut self) { + self.inner.is_open.store(false, Ordering::Relaxed); + } + + fn try_fold(&mut self, init: B, f: F) -> Result + where + Self: Sized, + F: FnMut(B, Self::Item) -> Result, + { + let mut inner = self.inner.buffer.lock().unwrap(); + let mut drain = inner.drain(..); + + drain.try_fold(init, f) + } +} + +impl Default for EventBuffer { + fn default() -> Self { + Self::new() + } +} + +impl fmt::Debug for EventBuffer { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EventBuffer").finish_non_exhaustive() + } +} + +/// A producer handle of an `EventStream`. +pub struct EventBufferWriter { + inner: Arc>, +} + +impl EventSinkWriter for EventBufferWriter { + /// Pushes an event onto the queue. + fn write(&self, event: T) { + if !self.inner.is_open.load(Ordering::Relaxed) { + return; + } + + let mut buffer = self.inner.buffer.lock().unwrap(); + if buffer.len() == self.inner.capacity { + buffer.pop_front(); + } + + buffer.push_back(event); + } +} + +impl fmt::Debug for EventBufferWriter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EventBufferWriter").finish_non_exhaustive() + } +} diff --git a/asynchronix/src/ports/sink/event_slot.rs b/asynchronix/src/ports/sink/event_slot.rs new file mode 100644 index 0000000..62a6cef --- /dev/null +++ b/asynchronix/src/ports/sink/event_slot.rs @@ -0,0 +1,120 @@ +use std::fmt; +use std::sync::atomic::{AtomicBool, Ordering}; +use std::sync::{Arc, Mutex, TryLockError, TryLockResult}; + +use super::{EventSink, EventSinkStream, EventSinkWriter}; + +/// The shared data of an `EventBuffer`. +struct Inner { + is_open: AtomicBool, + slot: Mutex>, +} + +/// An `EventSink` and `EventSinkStream` that only keeps the last event. +/// +/// Once the value is read, the iterator will return `None` until a new value is +/// received. If the slot contains a value when a new value is received, the +/// previous value is overwritten. +pub struct EventSlot { + inner: Arc>, +} + +impl EventSlot { + /// Creates an open `EventSlot`. + pub fn new() -> Self { + Self { + inner: Arc::new(Inner { + is_open: AtomicBool::new(true), + slot: Mutex::new(None), + }), + } + } + + /// Creates a closed `EventSlot`. + pub fn new_closed() -> Self { + Self { + inner: Arc::new(Inner { + is_open: AtomicBool::new(false), + slot: Mutex::new(None), + }), + } + } +} + +impl EventSink for EventSlot { + type Writer = EventSlotWriter; + + /// Returns a writer handle. + fn writer(&self) -> EventSlotWriter { + EventSlotWriter { + inner: self.inner.clone(), + } + } +} + +impl Iterator for EventSlot { + type Item = T; + + fn next(&mut self) -> Option { + match self.inner.slot.try_lock() { + TryLockResult::Ok(mut v) => v.take(), + TryLockResult::Err(TryLockError::WouldBlock) => None, + TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(), + } + } +} + +impl EventSinkStream for EventSlot { + fn open(&mut self) { + self.inner.is_open.store(true, Ordering::Relaxed); + } + fn close(&mut self) { + self.inner.is_open.store(false, Ordering::Relaxed); + } +} + +impl Default for EventSlot { + fn default() -> Self { + Self::new() + } +} + +impl fmt::Debug for EventSlot { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EventSlot").finish_non_exhaustive() + } +} + +/// A writer handle of an `EventSlot`. +pub struct EventSlotWriter { + inner: Arc>, +} + +impl EventSinkWriter for EventSlotWriter { + /// Write an event into the slot. + fn write(&self, event: T) { + // Ignore if the sink is closed. + if !self.inner.is_open.load(Ordering::Relaxed) { + return; + } + + // Why do we just use `try_lock` and abandon if the lock is taken? The + // reason is that (i) the reader is never supposed to access the slot + // when the simulation runs and (ii) as a rule the simulator does not + // warrant fairness when concurrently writing to an input. Therefore, if + // the mutex is already locked when this writer attempts to lock it, it + // means another writer is concurrently writing an event, and that event + // is just as legitimate as ours so there is not need to overwrite it. + match self.inner.slot.try_lock() { + TryLockResult::Ok(mut v) => *v = Some(event), + TryLockResult::Err(TryLockError::WouldBlock) => {} + TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(), + } + } +} + +impl fmt::Debug for EventSlotWriter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("EventStreamWriter").finish_non_exhaustive() + } +} diff --git a/asynchronix/src/ports/source.rs b/asynchronix/src/ports/source.rs new file mode 100644 index 0000000..5e745ae --- /dev/null +++ b/asynchronix/src/ports/source.rs @@ -0,0 +1,295 @@ +mod broadcaster; +mod sender; + +use std::fmt; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use crate::model::Model; +use crate::ports::InputFn; +use crate::ports::{LineError, LineId}; +use crate::simulation::Address; +use crate::time::{ + Action, ActionKey, KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction, +}; +use crate::util::slot; + +use broadcaster::ReplyIterator; +use broadcaster::{EventBroadcaster, QueryBroadcaster}; +use sender::{InputSender, ReplierSender}; + +use super::ReplierFn; + +/// An event source port. +/// +/// The `EventSource` port is similar to an [`Output`](crate::ports::Output) +/// port in that it can send events to connected input ports. It is not meant, +/// however, to be instantiated as a member of a model, but rather as a +/// simulation monitoring endpoint instantiated during bench assembly. +pub struct EventSource { + broadcaster: Arc>>, + next_line_id: u64, +} + +impl EventSource { + /// Creates a new, disconnected `EventSource` port. + pub fn new() -> Self { + Self::default() + } + + /// Adds a connection to an input port of the model specified by the + /// address. + /// + /// The input port must be an asynchronous method of a model of type `M` + /// taking as argument a value of type `T` plus, optionally, a scheduler + /// reference. + pub fn connect(&mut self, input: F, address: impl Into>) -> LineId + where + M: Model, + F: for<'a> InputFn<'a, M, T, S> + Clone, + S: Send + 'static, + { + assert!(self.next_line_id != u64::MAX); + let line_id = LineId(self.next_line_id); + self.next_line_id += 1; + let sender = Box::new(InputSender::new(input, address.into().0)); + self.broadcaster.lock().unwrap().add(sender, line_id); + + line_id + } + + /// Removes the connection specified by the `LineId` parameter. + /// + /// It is a logic error to specify a line identifier from another + /// [`EventSource`], [`QuerySource`], [`Output`](crate::ports::Output) or + /// [`Requestor`](crate::ports::Requestor) instance and may result in the + /// disconnection of an arbitrary endpoint. + pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> { + if self.broadcaster.lock().unwrap().remove(line_id) { + Ok(()) + } else { + Err(LineError {}) + } + } + + /// Removes all connections. + pub fn disconnect_all(&mut self) { + self.broadcaster.lock().unwrap().clear(); + } + + /// Returns an action which, when processed, broadcasts an event to all + /// connected input ports. + /// + /// Note that the action broadcasts the event to those models that are + /// connected to the event source at the time the action is processed. + pub fn event(&mut self, arg: T) -> Action { + let fut = self.broadcaster.lock().unwrap().broadcast(arg); + let fut = async { + fut.await.unwrap(); + }; + + Action::new(OnceAction::new(fut)) + } + + /// Returns a cancellable action and a cancellation key; when processed, the + /// action broadcasts an event to all connected input ports. + /// + /// Note that the action broadcasts the event to those models that are + /// connected to the event source at the time the action is processed. + pub fn keyed_event(&mut self, arg: T) -> (Action, ActionKey) { + let action_key = ActionKey::new(); + let fut = self.broadcaster.lock().unwrap().broadcast(arg); + + let action = Action::new(KeyedOnceAction::new( + // Cancellation is ignored once the action is already spawned on the + // executor. This means the action cannot be cancelled while the + // simulation is running, but since an event source is meant to be + // used outside the simulator, this shouldn't be an issue in + // practice. + |_| async { + fut.await.unwrap(); + }, + action_key.clone(), + )); + + (action, action_key) + } + + /// Returns a periodically recurring action which, when processed, + /// broadcasts an event to all connected input ports. + /// + /// Note that the action broadcasts the event to those models that are + /// connected to the event source at the time the action is processed. + pub fn periodic_event(&mut self, period: Duration, arg: T) -> Action { + let broadcaster = self.broadcaster.clone(); + + Action::new(PeriodicAction::new( + || async move { + let fut = broadcaster.lock().unwrap().broadcast(arg); + fut.await.unwrap(); + }, + period, + )) + } + + /// Returns a cancellable, periodically recurring action and a cancellation + /// key; when processed, the action broadcasts an event to all connected + /// input ports. + /// + /// Note that the action broadcasts the event to those models that are + /// connected to the event source at the time the action is processed. + pub fn keyed_periodic_event(&mut self, period: Duration, arg: T) -> (Action, ActionKey) { + let action_key = ActionKey::new(); + let broadcaster = self.broadcaster.clone(); + + let action = Action::new(KeyedPeriodicAction::new( + // Cancellation is ignored once the action is already spawned on the + // executor. This means the action cannot be cancelled while the + // simulation is running, but since an event source is meant to be + // used outside the simulator, this shouldn't be an issue in + // practice. + |_| async move { + let fut = broadcaster.lock().unwrap().broadcast(arg); + fut.await.unwrap(); + }, + period, + action_key.clone(), + )); + + (action, action_key) + } +} + +impl Default for EventSource { + fn default() -> Self { + Self { + broadcaster: Arc::new(Mutex::new(EventBroadcaster::default())), + next_line_id: 0, + } + } +} + +impl fmt::Debug for EventSource { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "Event source ({} connected ports)", + self.broadcaster.lock().unwrap().len() + ) + } +} + +/// A request source port. +/// +/// The `QuerySource` port is similar to an +/// [`Requestor`](crate::ports::Requestor) port in that it can send events to +/// connected input ports. It is not meant, however, to be instantiated as a +/// member of a model, but rather as a simulation monitoring endpoint +/// instantiated during bench assembly. +pub struct QuerySource { + broadcaster: Arc>>, + next_line_id: u64, +} + +impl QuerySource { + /// Creates a new, disconnected `EventSource` port. + pub fn new() -> Self { + Self::default() + } + + /// Adds a connection to a replier port of the model specified by the + /// address. + /// + /// The replier port must be an asynchronous method of a model of type `M` + /// returning a value of type `R` and taking as argument a value of type `T` + /// plus, optionally, a scheduler reference. + pub fn connect(&mut self, replier: F, address: impl Into>) -> LineId + where + M: Model, + F: for<'a> ReplierFn<'a, M, T, R, S> + Clone, + S: Send + 'static, + { + assert!(self.next_line_id != u64::MAX); + let line_id = LineId(self.next_line_id); + self.next_line_id += 1; + let sender = Box::new(ReplierSender::new(replier, address.into().0)); + self.broadcaster.lock().unwrap().add(sender, line_id); + + line_id + } + + /// Removes the connection specified by the `LineId` parameter. + /// + /// It is a logic error to specify a line identifier from another + /// [`QuerySource`], [`EventSource`], [`Output`](crate::ports::Output) or + /// [`Requestor`](crate::ports::Requestor) instance and may result in the + /// disconnection of an arbitrary endpoint. + pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> { + if self.broadcaster.lock().unwrap().remove(line_id) { + Ok(()) + } else { + Err(LineError {}) + } + } + + /// Removes all connections. + pub fn disconnect_all(&mut self) { + self.broadcaster.lock().unwrap().clear(); + } + + /// Returns an action which, when processed, broadcasts a query to all + /// connected replier ports. + /// + /// Note that the action broadcasts the query to those models that are + /// connected to the query source at the time the action is processed. + pub fn query(&mut self, arg: T) -> (Action, ReplyReceiver) { + let (writer, reader) = slot::slot(); + let fut = self.broadcaster.lock().unwrap().broadcast(arg); + let fut = async move { + let replies = fut.await.unwrap(); + let _ = writer.write(replies); + }; + + let action = Action::new(OnceAction::new(fut)); + + (action, ReplyReceiver::(reader)) + } +} + +impl Default for QuerySource { + fn default() -> Self { + Self { + broadcaster: Arc::new(Mutex::new(QueryBroadcaster::default())), + next_line_id: 0, + } + } +} + +impl fmt::Debug for QuerySource { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "Query source ({} connected ports)", + self.broadcaster.lock().unwrap().len() + ) + } +} + +/// A receiver for all replies collected from a single query broadcast. +pub struct ReplyReceiver(slot::SlotReader>); + +impl ReplyReceiver { + /// Returns all replies to a query. + /// + /// Returns `None` if the replies are not yet available or if they were + /// already taken in a previous call to `take`. + pub fn take(&mut self) -> Option> { + self.0.try_read().ok() + } +} + +impl fmt::Debug for ReplyReceiver { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Replies") + } +} diff --git a/asynchronix/src/ports/source/broadcaster.rs b/asynchronix/src/ports/source/broadcaster.rs new file mode 100644 index 0000000..c418b83 --- /dev/null +++ b/asynchronix/src/ports/source/broadcaster.rs @@ -0,0 +1,759 @@ +use std::future::Future; +use std::mem; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::vec; + +use pin_project_lite::pin_project; + +use diatomic_waker::WakeSink; + +use super::sender::{Sender, SenderFuture}; + +use crate::ports::LineId; +use crate::util::task_set::TaskSet; + +/// An object that can efficiently broadcast messages to several addresses. +/// +/// This is very similar to `output::broadcaster::BroadcasterInner`, but +/// generates owned futures instead. +/// +/// This object maintains a list of senders associated to each target address. +/// When a message is broadcast, the sender futures are awaited in parallel. +/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate +/// does, but the outputs of all sender futures are returned all at once rather +/// than with an asynchronous iterator (a.k.a. async stream). +pub(super) struct BroadcasterInner { + /// The list of senders with their associated line identifier. + senders: Vec<(LineId, Box>)>, +} + +impl BroadcasterInner { + /// Adds a new sender associated to the specified identifier. + /// + /// # Panics + /// + /// This method will panic if the total count of senders would reach + /// `u32::MAX - 1`. + pub(super) fn add(&mut self, sender: Box>, id: LineId) { + self.senders.push((id, sender)); + } + + /// Removes the first sender with the specified identifier, if any. + /// + /// Returns `true` if there was indeed a sender associated to the specified + /// identifier. + pub(super) fn remove(&mut self, id: LineId) -> bool { + if let Some(pos) = self.senders.iter().position(|s| s.0 == id) { + self.senders.swap_remove(pos); + + return true; + } + + false + } + + /// Removes all senders. + pub(super) fn clear(&mut self) { + self.senders.clear(); + } + + /// Returns the number of connected senders. + pub(super) fn len(&self) -> usize { + self.senders.len() + } + + /// Efficiently broadcasts a message or a query to multiple addresses. + /// + /// This method does not collect the responses from queries. + fn broadcast(&mut self, arg: T) -> BroadcastFuture { + let mut future_states = Vec::with_capacity(self.senders.len()); + + // Broadcast the message and collect all futures. + let mut iter = self.senders.iter_mut(); + while let Some(sender) = iter.next() { + // Move the argument rather than clone it for the last future. + if iter.len() == 0 { + future_states.push(SenderFutureState::Pending(sender.1.send(arg))); + break; + } + + future_states.push(SenderFutureState::Pending(sender.1.send(arg.clone()))); + } + + // Generate the global future. + BroadcastFuture::new(future_states) + } +} + +impl Default for BroadcasterInner { + fn default() -> Self { + Self { + senders: Vec::new(), + } + } +} + +/// An object that can efficiently broadcast events to several input ports. +/// +/// This is very similar to `output::broadcaster::EventBroadcaster`, but +/// generates owned futures instead. +/// +/// See `BroadcasterInner` for implementation details. +pub(super) struct EventBroadcaster { + /// The broadcaster core object. + inner: BroadcasterInner, +} + +impl EventBroadcaster { + /// Adds a new sender associated to the specified identifier. + /// + /// # Panics + /// + /// This method will panic if the total count of senders would reach + /// `u32::MAX - 1`. + pub(super) fn add(&mut self, sender: Box>, id: LineId) { + self.inner.add(sender, id); + } + + /// Removes the first sender with the specified identifier, if any. + /// + /// Returns `true` if there was indeed a sender associated to the specified + /// identifier. + pub(super) fn remove(&mut self, id: LineId) -> bool { + self.inner.remove(id) + } + + /// Removes all senders. + pub(super) fn clear(&mut self) { + self.inner.clear(); + } + + /// Returns the number of connected senders. + pub(super) fn len(&self) -> usize { + self.inner.len() + } + + /// Broadcasts an event to all addresses. + pub(super) fn broadcast( + &mut self, + arg: T, + ) -> impl Future> + Send { + enum Fut { + Empty, + Single(F1), + Multiple(F2), + } + + let fut = match self.inner.senders.as_mut_slice() { + // No sender. + [] => Fut::Empty, + // One sender. + [sender] => Fut::Single(sender.1.send(arg)), + // Multiple senders. + _ => Fut::Multiple(self.inner.broadcast(arg)), + }; + + async { + match fut { + Fut::Empty => Ok(()), + Fut::Single(fut) => fut.await.map_err(|_| BroadcastError {}), + Fut::Multiple(fut) => fut.await.map(|_| ()), + } + } + } +} + +impl Default for EventBroadcaster { + fn default() -> Self { + Self { + inner: BroadcasterInner::default(), + } + } +} + +/// An object that can efficiently broadcast queries to several replier ports. +/// +/// This is very similar to `output::broadcaster::QueryBroadcaster`, but +/// generates owned futures instead. +/// +/// See `BroadcasterInner` for implementation details. +pub(super) struct QueryBroadcaster { + /// The broadcaster core object. + inner: BroadcasterInner, +} + +impl QueryBroadcaster { + /// Adds a new sender associated to the specified identifier. + /// + /// # Panics + /// + /// This method will panic if the total count of senders would reach + /// `u32::MAX - 1`. + pub(super) fn add(&mut self, sender: Box>, id: LineId) { + self.inner.add(sender, id); + } + + /// Removes the first sender with the specified identifier, if any. + /// + /// Returns `true` if there was indeed a sender associated to the specified + /// identifier. + pub(super) fn remove(&mut self, id: LineId) -> bool { + self.inner.remove(id) + } + + /// Removes all senders. + pub(super) fn clear(&mut self) { + self.inner.clear(); + } + + /// Returns the number of connected senders. + pub(super) fn len(&self) -> usize { + self.inner.len() + } + + /// Broadcasts an event to all addresses. + pub(super) fn broadcast( + &mut self, + arg: T, + ) -> impl Future, BroadcastError>> + Send { + enum Fut { + Empty, + Single(F1), + Multiple(F2), + } + + let fut = match self.inner.senders.as_mut_slice() { + // No sender. + [] => Fut::Empty, + // One sender. + [sender] => Fut::Single(sender.1.send(arg)), + // Multiple senders. + _ => Fut::Multiple(self.inner.broadcast(arg)), + }; + + async { + match fut { + Fut::Empty => Ok(ReplyIterator(Vec::new().into_iter())), + Fut::Single(fut) => fut + .await + .map(|reply| ReplyIterator(vec![SenderFutureState::Ready(reply)].into_iter())) + .map_err(|_| BroadcastError {}), + Fut::Multiple(fut) => fut.await.map_err(|_| BroadcastError {}), + } + } + } +} + +impl Default for QueryBroadcaster { + fn default() -> Self { + Self { + inner: BroadcasterInner::default(), + } + } +} + +pin_project! { + /// A future aggregating the outputs of a collection of sender futures. + /// + /// The idea is to join all sender futures as efficiently as possible, meaning: + /// + /// - the sender futures are polled simultaneously rather than waiting for their + /// completion in a sequential manner, + /// - the happy path (all futures immediately ready) is very fast. + pub(super) struct BroadcastFuture { + // Thread-safe waker handle. + wake_sink: WakeSink, + // Tasks associated to the sender futures. + task_set: TaskSet, + // List of all sender futures or their outputs. + future_states: Vec>, + // The total count of futures that have not yet been polled to completion. + pending_futures_count: usize, + // State of completion of the future. + state: FutureState, + } +} + +impl BroadcastFuture { + /// Creates a new `BroadcastFuture`. + fn new(future_states: Vec>) -> Self { + let wake_sink = WakeSink::new(); + let wake_src = wake_sink.source(); + let pending_futures_count = future_states.len(); + + BroadcastFuture { + wake_sink, + task_set: TaskSet::with_len(wake_src, pending_futures_count), + future_states, + pending_futures_count, + state: FutureState::Uninit, + } + } +} + +impl Future for BroadcastFuture { + type Output = Result, BroadcastError>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = &mut *self; + + assert_ne!(this.state, FutureState::Completed); + + // Poll all sender futures once if this is the first time the broadcast + // future is polled. + if this.state == FutureState::Uninit { + for task_idx in 0..this.future_states.len() { + if let SenderFutureState::Pending(future) = &mut this.future_states[task_idx] { + let task_waker_ref = this.task_set.waker_of(task_idx); + let task_cx_ref = &mut Context::from_waker(&task_waker_ref); + + match future.as_mut().poll(task_cx_ref) { + Poll::Ready(Ok(output)) => { + this.future_states[task_idx] = SenderFutureState::Ready(output); + this.pending_futures_count -= 1; + } + Poll::Ready(Err(_)) => { + this.state = FutureState::Completed; + + return Poll::Ready(Err(BroadcastError {})); + } + Poll::Pending => {} + } + } + } + + if this.pending_futures_count == 0 { + this.state = FutureState::Completed; + let outputs = mem::take(&mut this.future_states).into_iter(); + + return Poll::Ready(Ok(ReplyIterator(outputs))); + } + + this.state = FutureState::Pending; + } + + // Repeatedly poll the futures of all scheduled tasks until there are no + // more scheduled tasks. + loop { + // No need to register the waker if some tasks have been scheduled. + if !this.task_set.has_scheduled() { + this.wake_sink.register(cx.waker()); + } + + // Retrieve the indices of the scheduled tasks if any. If there are + // no scheduled tasks, `Poll::Pending` is returned and this future + // will be awaken again when enough tasks have been scheduled. + // + // NOTE: the current implementation requires a notification to be + // sent each time a sub-future has made progress. We may try at some + // point to benchmark an alternative strategy where a notification + // is requested only when all pending sub-futures have made progress, + // using `take_scheduled(this.pending_futures_count)`. This would + // reduce the cost of context switch but could hurt latency. + let scheduled_tasks = match this.task_set.take_scheduled(1) { + Some(st) => st, + None => return Poll::Pending, + }; + + for task_idx in scheduled_tasks { + if let SenderFutureState::Pending(future) = &mut this.future_states[task_idx] { + let task_waker_ref = this.task_set.waker_of(task_idx); + let task_cx_ref = &mut Context::from_waker(&task_waker_ref); + + match future.as_mut().poll(task_cx_ref) { + Poll::Ready(Ok(output)) => { + this.future_states[task_idx] = SenderFutureState::Ready(output); + this.pending_futures_count -= 1; + } + Poll::Ready(Err(_)) => { + this.state = FutureState::Completed; + + return Poll::Ready(Err(BroadcastError {})); + } + Poll::Pending => {} + } + } + } + + if this.pending_futures_count == 0 { + this.state = FutureState::Completed; + let outputs = mem::take(&mut this.future_states).into_iter(); + + return Poll::Ready(Ok(ReplyIterator(outputs))); + } + } + } +} + +/// Error returned when a message could not be delivered. +#[derive(Debug)] +pub(super) struct BroadcastError {} + +#[derive(Debug, PartialEq)] +enum FutureState { + Uninit, + Pending, + Completed, +} + +/// The state of a `SenderFuture`. +enum SenderFutureState { + Pending(SenderFuture), + Ready(R), +} + +/// An iterator over the replies to a broadcasted request. +pub(crate) struct ReplyIterator(vec::IntoIter>); + +impl Iterator for ReplyIterator { + type Item = R; + + fn next(&mut self) -> Option { + self.0.next().map(|state| match state { + SenderFutureState::Ready(reply) => reply, + _ => panic!("reply missing in replies iterator"), + }) + } + + fn size_hint(&self) -> (usize, Option) { + self.0.size_hint() + } +} + +#[cfg(all(test, not(asynchronix_loom)))] +mod tests { + use std::sync::atomic::{AtomicUsize, Ordering}; + use std::sync::{Arc, Mutex}; + use std::thread; + + use futures_executor::block_on; + + use crate::channel::Receiver; + use crate::time::Scheduler; + use crate::time::{MonotonicTime, TearableAtomicTime}; + use crate::util::priority_queue::PriorityQueue; + use crate::util::sync_cell::SyncCell; + + use super::super::sender::{InputSender, ReplierSender}; + use super::*; + use crate::model::Model; + + struct Counter { + inner: Arc, + } + impl Counter { + fn new(counter: Arc) -> Self { + Self { inner: counter } + } + async fn inc(&mut self, by: usize) { + self.inner.fetch_add(by, Ordering::Relaxed); + } + async fn fetch_inc(&mut self, by: usize) -> usize { + let res = self.inner.fetch_add(by, Ordering::Relaxed); + res + } + } + impl Model for Counter {} + + #[test] + fn broadcast_event_smoke() { + const N_RECV: usize = 4; + + let mut mailboxes = Vec::new(); + let mut broadcaster = EventBroadcaster::default(); + for id in 0..N_RECV { + let mailbox = Receiver::new(10); + let address = mailbox.sender(); + let sender = Box::new(InputSender::new(Counter::inc, address)); + + broadcaster.add(sender, LineId(id as u64)); + mailboxes.push(mailbox); + } + + let th_broadcast = thread::spawn(move || { + block_on(broadcaster.broadcast(1)).unwrap(); + }); + + let counter = Arc::new(AtomicUsize::new(0)); + + let th_recv: Vec<_> = mailboxes + .into_iter() + .map(|mut mailbox| { + thread::spawn({ + let mut counter = Counter::new(counter.clone()); + + move || { + let dummy_address = Receiver::new(1).sender(); + let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); + let dummy_time = + SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); + let dummy_scheduler = + Scheduler::new(dummy_address, dummy_priority_queue, dummy_time); + block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap(); + } + }) + }) + .collect(); + + th_broadcast.join().unwrap(); + for th in th_recv { + th.join().unwrap(); + } + + assert_eq!(counter.load(Ordering::Relaxed), N_RECV); + } + + #[test] + fn broadcast_query_smoke() { + const N_RECV: usize = 4; + + let mut mailboxes = Vec::new(); + let mut broadcaster = QueryBroadcaster::default(); + for id in 0..N_RECV { + let mailbox = Receiver::new(10); + let address = mailbox.sender(); + let sender = Box::new(ReplierSender::new(Counter::fetch_inc, address)); + + broadcaster.add(sender, LineId(id as u64)); + mailboxes.push(mailbox); + } + + let th_broadcast = thread::spawn(move || { + let iter = block_on(broadcaster.broadcast(1)).unwrap(); + let sum = iter.fold(0, |acc, val| acc + val); + + assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)} + }); + + let counter = Arc::new(AtomicUsize::new(0)); + + let th_recv: Vec<_> = mailboxes + .into_iter() + .map(|mut mailbox| { + thread::spawn({ + let mut counter = Counter::new(counter.clone()); + + move || { + let dummy_address = Receiver::new(1).sender(); + let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); + let dummy_time = + SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); + let dummy_scheduler = + Scheduler::new(dummy_address, dummy_priority_queue, dummy_time); + block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap(); + thread::sleep(std::time::Duration::from_millis(100)); + } + }) + }) + .collect(); + + th_broadcast.join().unwrap(); + for th in th_recv { + th.join().unwrap(); + } + + assert_eq!(counter.load(Ordering::Relaxed), N_RECV); + } +} + +#[cfg(all(test, asynchronix_loom))] +mod tests { + use futures_channel::mpsc; + use futures_util::StreamExt; + + use loom::model::Builder; + use loom::sync::atomic::{AtomicBool, Ordering}; + use loom::thread; + + use waker_fn::waker_fn; + + use super::super::sender::SendError; + use super::*; + + // An event that may be waken spuriously. + struct TestEvent { + // The receiver is actually used only once in tests, so it is moved out + // of the `Option` on first use. + receiver: Option>>, + } + impl Sender<(), R> for TestEvent { + fn send(&mut self, _arg: ()) -> Pin> + Send>> { + let receiver = self.receiver.take().unwrap(); + + Box::pin(async move { + let mut stream = Box::pin(receiver.filter_map(|item| async { item })); + + Ok(stream.next().await.unwrap()) + }) + } + } + + // An object that can wake a `TestEvent`. + #[derive(Clone)] + struct TestEventWaker { + sender: mpsc::UnboundedSender>, + } + impl TestEventWaker { + fn wake_spurious(&self) { + let _ = self.sender.unbounded_send(None); + } + fn wake_final(&self, value: R) { + let _ = self.sender.unbounded_send(Some(value)); + } + } + + fn test_event() -> (TestEvent, TestEventWaker) { + let (sender, receiver) = mpsc::unbounded(); + + ( + TestEvent { + receiver: Some(receiver), + }, + TestEventWaker { sender }, + ) + } + + #[test] + fn loom_broadcast_basic() { + const DEFAULT_PREEMPTION_BOUND: usize = 3; + + let mut builder = Builder::new(); + if builder.preemption_bound.is_none() { + builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND); + } + + builder.check(move || { + let (test_event1, waker1) = test_event::(); + let (test_event2, waker2) = test_event::(); + let (test_event3, waker3) = test_event::(); + + let mut broadcaster = QueryBroadcaster::default(); + broadcaster.add(Box::new(test_event1), LineId(1)); + broadcaster.add(Box::new(test_event2), LineId(2)); + broadcaster.add(Box::new(test_event3), LineId(3)); + + let mut fut = Box::pin(broadcaster.broadcast(())); + let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false)); + let is_scheduled_waker = is_scheduled.clone(); + + let waker = waker_fn(move || { + // We use swap rather than a plain store to work around this + // bug: + is_scheduled_waker.swap(true, Ordering::Release); + }); + let mut cx = Context::from_waker(&waker); + + let th1 = thread::spawn(move || waker1.wake_final(3)); + let th2 = thread::spawn(move || waker2.wake_final(7)); + let th3 = thread::spawn(move || waker3.wake_final(42)); + + loop { + match fut.as_mut().poll(&mut cx) { + Poll::Ready(Ok(mut res)) => { + assert_eq!(res.next(), Some(3)); + assert_eq!(res.next(), Some(7)); + assert_eq!(res.next(), Some(42)); + assert_eq!(res.next(), None); + + return; + } + Poll::Ready(Err(_)) => panic!("sender error"), + Poll::Pending => {} + } + + // If the task has not been scheduled, exit the polling loop. + if !is_scheduled.swap(false, Ordering::Acquire) { + break; + } + } + + th1.join().unwrap(); + th2.join().unwrap(); + th3.join().unwrap(); + + assert!(is_scheduled.load(Ordering::Acquire)); + + match fut.as_mut().poll(&mut cx) { + Poll::Ready(Ok(mut res)) => { + assert_eq!(res.next(), Some(3)); + assert_eq!(res.next(), Some(7)); + assert_eq!(res.next(), Some(42)); + assert_eq!(res.next(), None); + } + Poll::Ready(Err(_)) => panic!("sender error"), + Poll::Pending => panic!("the future has not completed"), + }; + }); + } + + #[test] + fn loom_broadcast_spurious() { + const DEFAULT_PREEMPTION_BOUND: usize = 3; + + let mut builder = Builder::new(); + if builder.preemption_bound.is_none() { + builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND); + } + + builder.check(move || { + let (test_event1, waker1) = test_event::(); + let (test_event2, waker2) = test_event::(); + + let mut broadcaster = QueryBroadcaster::default(); + broadcaster.add(Box::new(test_event1), LineId(1)); + broadcaster.add(Box::new(test_event2), LineId(2)); + + let mut fut = Box::pin(broadcaster.broadcast(())); + let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false)); + let is_scheduled_waker = is_scheduled.clone(); + + let waker = waker_fn(move || { + // We use swap rather than a plain store to work around this + // bug: + is_scheduled_waker.swap(true, Ordering::Release); + }); + let mut cx = Context::from_waker(&waker); + + let spurious_waker = waker1.clone(); + let th1 = thread::spawn(move || waker1.wake_final(3)); + let th2 = thread::spawn(move || waker2.wake_final(7)); + let th_spurious = thread::spawn(move || spurious_waker.wake_spurious()); + + loop { + match fut.as_mut().poll(&mut cx) { + Poll::Ready(Ok(mut res)) => { + assert_eq!(res.next(), Some(3)); + assert_eq!(res.next(), Some(7)); + assert_eq!(res.next(), None); + + return; + } + Poll::Ready(Err(_)) => panic!("sender error"), + Poll::Pending => {} + } + + // If the task has not been scheduled, exit the polling loop. + if !is_scheduled.swap(false, Ordering::Acquire) { + break; + } + } + + th1.join().unwrap(); + th2.join().unwrap(); + th_spurious.join().unwrap(); + + assert!(is_scheduled.load(Ordering::Acquire)); + + match fut.as_mut().poll(&mut cx) { + Poll::Ready(Ok(mut res)) => { + assert_eq!(res.next(), Some(3)); + assert_eq!(res.next(), Some(7)); + assert_eq!(res.next(), None); + } + Poll::Ready(Err(_)) => panic!("sender error"), + Poll::Pending => panic!("the future has not completed"), + }; + }); + } +} diff --git a/asynchronix/src/ports/source/sender.rs b/asynchronix/src/ports/source/sender.rs new file mode 100644 index 0000000..1e83141 --- /dev/null +++ b/asynchronix/src/ports/source/sender.rs @@ -0,0 +1,136 @@ +use std::error::Error; +use std::fmt; +use std::future::Future; +use std::marker::PhantomData; +use std::pin::Pin; + +use futures_channel::oneshot; +use recycle_box::{coerce_box, RecycleBox}; + +use crate::channel; +use crate::model::Model; +use crate::ports::{InputFn, ReplierFn}; + +pub(super) type SenderFuture = Pin> + Send>>; + +/// An event or query sender abstracting over the target model and input method. +pub(super) trait Sender: Send { + /// Asynchronously send the event or request. + fn send(&mut self, arg: T) -> SenderFuture; +} + +/// An object that can send events to an input port. +pub(super) struct InputSender { + func: F, + sender: channel::Sender, + _phantom_closure: PhantomData, + _phantom_closure_marker: PhantomData, +} + +impl InputSender +where + M: Model, + F: for<'a> InputFn<'a, M, T, S>, + T: Send + 'static, +{ + pub(super) fn new(func: F, sender: channel::Sender) -> Self { + Self { + func, + sender, + _phantom_closure: PhantomData, + _phantom_closure_marker: PhantomData, + } + } +} + +impl Sender for InputSender +where + M: Model, + F: for<'a> InputFn<'a, M, T, S> + Clone, + T: Send + 'static, + S: Send + 'static, +{ + fn send(&mut self, arg: T) -> SenderFuture<()> { + let func = self.func.clone(); + let sender = self.sender.clone(); + + Box::pin(async move { + sender + .send(move |model, scheduler, recycle_box| { + let fut = func.call(model, arg, scheduler); + + coerce_box!(RecycleBox::recycle(recycle_box, fut)) + }) + .await + .map_err(|_| SendError {}) + }) + } +} + +/// An object that can send a request to a replier port and retrieve a response. +pub(super) struct ReplierSender { + func: F, + sender: channel::Sender, + _phantom_closure: PhantomData R>, + _phantom_closure_marker: PhantomData, +} + +impl ReplierSender +where + M: Model, + F: for<'a> ReplierFn<'a, M, T, R, S>, + T: Send + 'static, + R: Send + 'static, +{ + pub(super) fn new(func: F, sender: channel::Sender) -> Self { + Self { + func, + sender, + _phantom_closure: PhantomData, + _phantom_closure_marker: PhantomData, + } + } +} + +impl Sender for ReplierSender +where + M: Model, + F: for<'a> ReplierFn<'a, M, T, R, S> + Clone, + T: Send + 'static, + R: Send + 'static, + S: Send, +{ + fn send(&mut self, arg: T) -> SenderFuture { + let func = self.func.clone(); + let sender = self.sender.clone(); + let (reply_sender, reply_receiver) = oneshot::channel(); + + Box::pin(async move { + sender + .send(move |model, scheduler, recycle_box| { + let fut = async move { + let reply = func.call(model, arg, scheduler).await; + let _ = reply_sender.send(reply); + }; + + coerce_box!(RecycleBox::recycle(recycle_box, fut)) + }) + .await + .map_err(|_| SendError {})?; + + reply_receiver.await.map_err(|_| SendError {}) + }) + } +} + +/// Error returned when the mailbox was closed or dropped. +#[derive(Debug, PartialEq, Eq, Clone, Copy)] +pub(super) struct SendError {} + +impl fmt::Display for SendError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "sending message into a closed mailbox") + } +} + +impl Error for SendError {} diff --git a/asynchronix/src/rpc.rs b/asynchronix/src/rpc.rs new file mode 100644 index 0000000..d67e3d0 --- /dev/null +++ b/asynchronix/src/rpc.rs @@ -0,0 +1,10 @@ +//! Simulation management through remote procedure calls. + +mod codegen; +mod endpoint_registry; +mod generic_server; +#[cfg(feature = "grpc-server")] +pub mod grpc; +mod key_registry; + +pub use endpoint_registry::EndpointRegistry; diff --git a/asynchronix/src/rpc/api/custom_transport.proto b/asynchronix/src/rpc/api/custom_transport.proto new file mode 100644 index 0000000..46aefb4 --- /dev/null +++ b/asynchronix/src/rpc/api/custom_transport.proto @@ -0,0 +1,50 @@ +// Additional types for transport implementations which, unlike gRPC, do not +// support auto-generation from the `Simulation` service description. + +syntax = "proto3"; +package custom_transport; + +import "simulation.proto"; + +enum ServerErrorCode { + UNKNOWN_REQUEST = 0; + EMPTY_REQUEST = 1; +} + +message ServerError { + ServerErrorCode code = 1; + string message = 2; +} + +message AnyRequest { + oneof request { // Expects exactly 1 variant. + simulation.InitRequest init_request = 1; + simulation.TimeRequest time_request = 2; + simulation.StepRequest step_request = 3; + simulation.StepUntilRequest step_until_request = 4; + simulation.ScheduleEventRequest schedule_event_request = 5; + simulation.CancelEventRequest cancel_event_request = 6; + simulation.ProcessEventRequest process_event_request = 7; + simulation.ProcessQueryRequest process_query_request = 8; + simulation.ReadEventsRequest read_events_request = 9; + simulation.OpenSinkRequest open_sink_request = 10; + simulation.CloseSinkRequest close_sink_request = 11; + } +} + +message AnyReply { + oneof reply { // Contains exactly 1 variant. + simulation.InitReply init_reply = 1; + simulation.TimeReply time_reply = 2; + simulation.StepReply step_reply = 3; + simulation.StepUntilReply step_until_reply = 4; + simulation.ScheduleEventReply schedule_event_reply = 5; + simulation.CancelEventReply cancel_event_reply = 6; + simulation.ProcessEventReply process_event_reply = 7; + simulation.ProcessQueryReply process_query_reply = 8; + simulation.ReadEventsReply read_events_reply = 9; + simulation.OpenSinkReply open_sink_reply = 10; + simulation.CloseSinkReply close_sink_reply = 11; + ServerError error = 100; + } +} diff --git a/asynchronix/src/rpc/api/simulation.proto b/asynchronix/src/rpc/api/simulation.proto new file mode 100644 index 0000000..b8982a0 --- /dev/null +++ b/asynchronix/src/rpc/api/simulation.proto @@ -0,0 +1,161 @@ +// The main simulation protocol. + +syntax = "proto3"; +package simulation; + +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/protobuf/empty.proto"; + +enum ErrorCode { + INTERNAL_ERROR = 0; + SIMULATION_NOT_STARTED = 1; + MISSING_ARGUMENT = 2; + INVALID_TIME = 3; + INVALID_DURATION = 4; + INVALID_MESSAGE = 5; + INVALID_KEY = 6; + SOURCE_NOT_FOUND = 10; + SINK_NOT_FOUND = 11; + KEY_NOT_FOUND = 12; + SIMULATION_TIME_OUT_OF_RANGE = 13; +} + +message Error { + ErrorCode code = 1; + string message = 2; +} + +message EventKey { + uint64 subkey1 = 1; + uint64 subkey2 = 2; +} + +message InitRequest { optional google.protobuf.Timestamp time = 1; } +message InitReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Empty empty = 1; + Error error = 100; + } +} + +message TimeRequest {} +message TimeReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Timestamp time = 1; + Error error = 100; + } +} + +message StepRequest {} +message StepReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Timestamp time = 1; + Error error = 100; + } +} + +message StepUntilRequest { + oneof deadline { // Always returns exactly 1 variant. + google.protobuf.Timestamp time = 1; + google.protobuf.Duration duration = 2; + } +} +message StepUntilReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Timestamp time = 1; + Error error = 100; + } +} + +message ScheduleEventRequest { + oneof deadline { // Expects exactly 1 variant. + google.protobuf.Timestamp time = 1; + google.protobuf.Duration duration = 2; + } + string source_name = 3; + bytes event = 4; + optional google.protobuf.Duration period = 5; + optional bool with_key = 6; +} +message ScheduleEventReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Empty empty = 1; + EventKey key = 2; + Error error = 100; + } +} + +message CancelEventRequest { EventKey key = 1; } +message CancelEventReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Empty empty = 1; + Error error = 100; + } +} + +message ProcessEventRequest { + string source_name = 1; + bytes event = 2; +} +message ProcessEventReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Empty empty = 1; + Error error = 100; + } +} + +message ProcessQueryRequest { + string source_name = 1; + bytes request = 2; +} +message ProcessQueryReply { + // This field is hoisted because protobuf3 does not support `repeated` within + // a `oneof`. It is Always empty if an error is returned + repeated bytes replies = 1; + oneof result { // Always returns exactly 1 variant. + google.protobuf.Empty empty = 10; + Error error = 100; + } +} + +message ReadEventsRequest { string sink_name = 1; } +message ReadEventsReply { + // This field is hoisted because protobuf3 does not support `repeated` within + // a `oneof`. It is Always empty if an error is returned + repeated bytes events = 1; + oneof result { // Always returns exactly 1 variant. + google.protobuf.Empty empty = 10; + Error error = 100; + } +} + +message OpenSinkRequest { string sink_name = 1; } +message OpenSinkReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Empty empty = 10; + Error error = 100; + } +} + +message CloseSinkRequest { string sink_name = 1; } +message CloseSinkReply { + oneof result { // Always returns exactly 1 variant. + google.protobuf.Empty empty = 10; + Error error = 100; + } +} + +service Simulation { + rpc Init(InitRequest) returns (InitReply); + rpc Time(TimeRequest) returns (TimeReply); + rpc Step(StepRequest) returns (StepReply); + rpc StepUntil(StepUntilRequest) returns (StepUntilReply); + rpc ScheduleEvent(ScheduleEventRequest) returns (ScheduleEventReply); + rpc CancelEvent(CancelEventRequest) returns (CancelEventReply); + rpc ProcessEvent(ProcessEventRequest) returns (ProcessEventReply); + rpc ProcessQuery(ProcessQueryRequest) returns (ProcessQueryReply); + rpc ReadEvents(ReadEventsRequest) returns (ReadEventsReply); + rpc OpenSink(OpenSinkRequest) returns (OpenSinkReply); + rpc CloseSink(CloseSinkRequest) returns (CloseSinkReply); +} diff --git a/asynchronix/src/rpc/codegen.rs b/asynchronix/src/rpc/codegen.rs new file mode 100644 index 0000000..0dfd7c8 --- /dev/null +++ b/asynchronix/src/rpc/codegen.rs @@ -0,0 +1,5 @@ +#![allow(unreachable_pub)] +#![allow(clippy::enum_variant_names)] + +pub(crate) mod custom_transport; +pub(crate) mod simulation; diff --git a/asynchronix/src/rpc/codegen/.gitkeep b/asynchronix/src/rpc/codegen/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/asynchronix/src/rpc/codegen/custom_transport.rs b/asynchronix/src/rpc/codegen/custom_transport.rs new file mode 100644 index 0000000..61eac9d --- /dev/null +++ b/asynchronix/src/rpc/codegen/custom_transport.rs @@ -0,0 +1,111 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ServerError { + #[prost(enumeration = "ServerErrorCode", tag = "1")] + pub code: i32, + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AnyRequest { + /// Expects exactly 1 variant. + #[prost(oneof = "any_request::Request", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11")] + pub request: ::core::option::Option, +} +/// Nested message and enum types in `AnyRequest`. +pub mod any_request { + /// Expects exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Request { + #[prost(message, tag = "1")] + InitRequest(super::super::simulation::InitRequest), + #[prost(message, tag = "2")] + TimeRequest(super::super::simulation::TimeRequest), + #[prost(message, tag = "3")] + StepRequest(super::super::simulation::StepRequest), + #[prost(message, tag = "4")] + StepUntilRequest(super::super::simulation::StepUntilRequest), + #[prost(message, tag = "5")] + ScheduleEventRequest(super::super::simulation::ScheduleEventRequest), + #[prost(message, tag = "6")] + CancelEventRequest(super::super::simulation::CancelEventRequest), + #[prost(message, tag = "7")] + ProcessEventRequest(super::super::simulation::ProcessEventRequest), + #[prost(message, tag = "8")] + ProcessQueryRequest(super::super::simulation::ProcessQueryRequest), + #[prost(message, tag = "9")] + ReadEventsRequest(super::super::simulation::ReadEventsRequest), + #[prost(message, tag = "10")] + OpenSinkRequest(super::super::simulation::OpenSinkRequest), + #[prost(message, tag = "11")] + CloseSinkRequest(super::super::simulation::CloseSinkRequest), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AnyReply { + /// Contains exactly 1 variant. + #[prost(oneof = "any_reply::Reply", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 100")] + pub reply: ::core::option::Option, +} +/// Nested message and enum types in `AnyReply`. +pub mod any_reply { + /// Contains exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Reply { + #[prost(message, tag = "1")] + InitReply(super::super::simulation::InitReply), + #[prost(message, tag = "2")] + TimeReply(super::super::simulation::TimeReply), + #[prost(message, tag = "3")] + StepReply(super::super::simulation::StepReply), + #[prost(message, tag = "4")] + StepUntilReply(super::super::simulation::StepUntilReply), + #[prost(message, tag = "5")] + ScheduleEventReply(super::super::simulation::ScheduleEventReply), + #[prost(message, tag = "6")] + CancelEventReply(super::super::simulation::CancelEventReply), + #[prost(message, tag = "7")] + ProcessEventReply(super::super::simulation::ProcessEventReply), + #[prost(message, tag = "8")] + ProcessQueryReply(super::super::simulation::ProcessQueryReply), + #[prost(message, tag = "9")] + ReadEventsReply(super::super::simulation::ReadEventsReply), + #[prost(message, tag = "10")] + OpenSinkReply(super::super::simulation::OpenSinkReply), + #[prost(message, tag = "11")] + CloseSinkReply(super::super::simulation::CloseSinkReply), + #[prost(message, tag = "100")] + Error(super::ServerError), + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ServerErrorCode { + UnknownRequest = 0, + EmptyRequest = 1, +} +impl ServerErrorCode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ServerErrorCode::UnknownRequest => "UNKNOWN_REQUEST", + ServerErrorCode::EmptyRequest => "EMPTY_REQUEST", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "UNKNOWN_REQUEST" => Some(Self::UnknownRequest), + "EMPTY_REQUEST" => Some(Self::EmptyRequest), + _ => None, + } + } +} diff --git a/asynchronix/src/rpc/codegen/simulation.rs b/asynchronix/src/rpc/codegen/simulation.rs new file mode 100644 index 0000000..26f7518 --- /dev/null +++ b/asynchronix/src/rpc/codegen/simulation.rs @@ -0,0 +1,1071 @@ +// This file is @generated by prost-build. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Error { + #[prost(enumeration = "ErrorCode", tag = "1")] + pub code: i32, + #[prost(string, tag = "2")] + pub message: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventKey { + #[prost(uint64, tag = "1")] + pub subkey1: u64, + #[prost(uint64, tag = "2")] + pub subkey2: u64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InitRequest { + #[prost(message, optional, tag = "1")] + pub time: ::core::option::Option<::prost_types::Timestamp>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct InitReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "init_reply::Result", tags = "1, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `InitReply`. +pub mod init_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "1")] + Empty(()), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TimeRequest {} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TimeReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "time_reply::Result", tags = "1, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `TimeReply`. +pub mod time_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "1")] + Time(::prost_types::Timestamp), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StepRequest {} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StepReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "step_reply::Result", tags = "1, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `StepReply`. +pub mod step_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "1")] + Time(::prost_types::Timestamp), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StepUntilRequest { + /// Always returns exactly 1 variant. + #[prost(oneof = "step_until_request::Deadline", tags = "1, 2")] + pub deadline: ::core::option::Option, +} +/// Nested message and enum types in `StepUntilRequest`. +pub mod step_until_request { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Deadline { + #[prost(message, tag = "1")] + Time(::prost_types::Timestamp), + #[prost(message, tag = "2")] + Duration(::prost_types::Duration), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct StepUntilReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "step_until_reply::Result", tags = "1, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `StepUntilReply`. +pub mod step_until_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "1")] + Time(::prost_types::Timestamp), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ScheduleEventRequest { + #[prost(string, tag = "3")] + pub source_name: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "4")] + pub event: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "5")] + pub period: ::core::option::Option<::prost_types::Duration>, + #[prost(bool, optional, tag = "6")] + pub with_key: ::core::option::Option, + /// Expects exactly 1 variant. + #[prost(oneof = "schedule_event_request::Deadline", tags = "1, 2")] + pub deadline: ::core::option::Option, +} +/// Nested message and enum types in `ScheduleEventRequest`. +pub mod schedule_event_request { + /// Expects exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Deadline { + #[prost(message, tag = "1")] + Time(::prost_types::Timestamp), + #[prost(message, tag = "2")] + Duration(::prost_types::Duration), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ScheduleEventReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "schedule_event_reply::Result", tags = "1, 2, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `ScheduleEventReply`. +pub mod schedule_event_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "1")] + Empty(()), + #[prost(message, tag = "2")] + Key(super::EventKey), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CancelEventRequest { + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CancelEventReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "cancel_event_reply::Result", tags = "1, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `CancelEventReply`. +pub mod cancel_event_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "1")] + Empty(()), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProcessEventRequest { + #[prost(string, tag = "1")] + pub source_name: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "2")] + pub event: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProcessEventReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "process_event_reply::Result", tags = "1, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `ProcessEventReply`. +pub mod process_event_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "1")] + Empty(()), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProcessQueryRequest { + #[prost(string, tag = "1")] + pub source_name: ::prost::alloc::string::String, + #[prost(bytes = "vec", tag = "2")] + pub request: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ProcessQueryReply { + /// This field is hoisted because protobuf3 does not support `repeated` within + /// a `oneof`. It is Always empty if an error is returned + #[prost(bytes = "vec", repeated, tag = "1")] + pub replies: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Always returns exactly 1 variant. + #[prost(oneof = "process_query_reply::Result", tags = "10, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `ProcessQueryReply`. +pub mod process_query_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "10")] + Empty(()), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadEventsRequest { + #[prost(string, tag = "1")] + pub sink_name: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReadEventsReply { + /// This field is hoisted because protobuf3 does not support `repeated` within + /// a `oneof`. It is Always empty if an error is returned + #[prost(bytes = "vec", repeated, tag = "1")] + pub events: ::prost::alloc::vec::Vec<::prost::alloc::vec::Vec>, + /// Always returns exactly 1 variant. + #[prost(oneof = "read_events_reply::Result", tags = "10, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `ReadEventsReply`. +pub mod read_events_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "10")] + Empty(()), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OpenSinkRequest { + #[prost(string, tag = "1")] + pub sink_name: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OpenSinkReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "open_sink_reply::Result", tags = "10, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `OpenSinkReply`. +pub mod open_sink_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "10")] + Empty(()), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CloseSinkRequest { + #[prost(string, tag = "1")] + pub sink_name: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CloseSinkReply { + /// Always returns exactly 1 variant. + #[prost(oneof = "close_sink_reply::Result", tags = "10, 100")] + pub result: ::core::option::Option, +} +/// Nested message and enum types in `CloseSinkReply`. +pub mod close_sink_reply { + /// Always returns exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Result { + #[prost(message, tag = "10")] + Empty(()), + #[prost(message, tag = "100")] + Error(super::Error), + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ErrorCode { + InternalError = 0, + SimulationNotStarted = 1, + MissingArgument = 2, + InvalidTime = 3, + InvalidDuration = 4, + InvalidMessage = 5, + InvalidKey = 6, + SourceNotFound = 10, + SinkNotFound = 11, + KeyNotFound = 12, + SimulationTimeOutOfRange = 13, +} +impl ErrorCode { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ErrorCode::InternalError => "INTERNAL_ERROR", + ErrorCode::SimulationNotStarted => "SIMULATION_NOT_STARTED", + ErrorCode::MissingArgument => "MISSING_ARGUMENT", + ErrorCode::InvalidTime => "INVALID_TIME", + ErrorCode::InvalidDuration => "INVALID_DURATION", + ErrorCode::InvalidMessage => "INVALID_MESSAGE", + ErrorCode::InvalidKey => "INVALID_KEY", + ErrorCode::SourceNotFound => "SOURCE_NOT_FOUND", + ErrorCode::SinkNotFound => "SINK_NOT_FOUND", + ErrorCode::KeyNotFound => "KEY_NOT_FOUND", + ErrorCode::SimulationTimeOutOfRange => "SIMULATION_TIME_OUT_OF_RANGE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "INTERNAL_ERROR" => Some(Self::InternalError), + "SIMULATION_NOT_STARTED" => Some(Self::SimulationNotStarted), + "MISSING_ARGUMENT" => Some(Self::MissingArgument), + "INVALID_TIME" => Some(Self::InvalidTime), + "INVALID_DURATION" => Some(Self::InvalidDuration), + "INVALID_MESSAGE" => Some(Self::InvalidMessage), + "INVALID_KEY" => Some(Self::InvalidKey), + "SOURCE_NOT_FOUND" => Some(Self::SourceNotFound), + "SINK_NOT_FOUND" => Some(Self::SinkNotFound), + "KEY_NOT_FOUND" => Some(Self::KeyNotFound), + "SIMULATION_TIME_OUT_OF_RANGE" => Some(Self::SimulationTimeOutOfRange), + _ => None, + } + } +} +/// Generated server implementations. +pub mod simulation_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with SimulationServer. + #[async_trait] + pub trait Simulation: Send + Sync + 'static { + async fn init( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn time( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn step( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn step_until( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn schedule_event( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn cancel_event( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn process_event( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn process_query( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn read_events( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn open_sink( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn close_sink( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + } + #[derive(Debug)] + pub struct SimulationServer { + inner: _Inner, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + struct _Inner(Arc); + impl SimulationServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + let inner = _Inner(inner); + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for SimulationServer + where + T: Simulation, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + let inner = self.inner.clone(); + match req.uri().path() { + "/simulation.Simulation/Init" => { + #[allow(non_camel_case_types)] + struct InitSvc(pub Arc); + impl tonic::server::UnaryService + for InitSvc { + type Response = super::InitReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::init(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = InitSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/Time" => { + #[allow(non_camel_case_types)] + struct TimeSvc(pub Arc); + impl tonic::server::UnaryService + for TimeSvc { + type Response = super::TimeReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::time(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = TimeSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/Step" => { + #[allow(non_camel_case_types)] + struct StepSvc(pub Arc); + impl tonic::server::UnaryService + for StepSvc { + type Response = super::StepReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::step(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = StepSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/StepUntil" => { + #[allow(non_camel_case_types)] + struct StepUntilSvc(pub Arc); + impl< + T: Simulation, + > tonic::server::UnaryService + for StepUntilSvc { + type Response = super::StepUntilReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::step_until(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = StepUntilSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/ScheduleEvent" => { + #[allow(non_camel_case_types)] + struct ScheduleEventSvc(pub Arc); + impl< + T: Simulation, + > tonic::server::UnaryService + for ScheduleEventSvc { + type Response = super::ScheduleEventReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::schedule_event(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ScheduleEventSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/CancelEvent" => { + #[allow(non_camel_case_types)] + struct CancelEventSvc(pub Arc); + impl< + T: Simulation, + > tonic::server::UnaryService + for CancelEventSvc { + type Response = super::CancelEventReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::cancel_event(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CancelEventSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/ProcessEvent" => { + #[allow(non_camel_case_types)] + struct ProcessEventSvc(pub Arc); + impl< + T: Simulation, + > tonic::server::UnaryService + for ProcessEventSvc { + type Response = super::ProcessEventReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::process_event(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ProcessEventSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/ProcessQuery" => { + #[allow(non_camel_case_types)] + struct ProcessQuerySvc(pub Arc); + impl< + T: Simulation, + > tonic::server::UnaryService + for ProcessQuerySvc { + type Response = super::ProcessQueryReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::process_query(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ProcessQuerySvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/ReadEvents" => { + #[allow(non_camel_case_types)] + struct ReadEventsSvc(pub Arc); + impl< + T: Simulation, + > tonic::server::UnaryService + for ReadEventsSvc { + type Response = super::ReadEventsReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::read_events(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = ReadEventsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/OpenSink" => { + #[allow(non_camel_case_types)] + struct OpenSinkSvc(pub Arc); + impl< + T: Simulation, + > tonic::server::UnaryService + for OpenSinkSvc { + type Response = super::OpenSinkReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::open_sink(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = OpenSinkSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/simulation.Simulation/CloseSink" => { + #[allow(non_camel_case_types)] + struct CloseSinkSvc(pub Arc); + impl< + T: Simulation, + > tonic::server::UnaryService + for CloseSinkSvc { + type Response = super::CloseSinkReply; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::close_sink(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CloseSinkSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for SimulationServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl Clone for _Inner { + fn clone(&self) -> Self { + Self(Arc::clone(&self.0)) + } + } + impl std::fmt::Debug for _Inner { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", self.0) + } + } + impl tonic::server::NamedService for SimulationServer { + const NAME: &'static str = "simulation.Simulation"; + } +} diff --git a/asynchronix/src/rpc/endpoint_registry.rs b/asynchronix/src/rpc/endpoint_registry.rs new file mode 100644 index 0000000..8a71ffb --- /dev/null +++ b/asynchronix/src/rpc/endpoint_registry.rs @@ -0,0 +1,307 @@ +use std::collections::hash_map::Entry; +use std::collections::HashMap; +use std::fmt; +use std::time::Duration; + +use rmp_serde::decode::Error as RmpDecodeError; +use rmp_serde::encode::Error as RmpEncodeError; +use serde::de::DeserializeOwned; +use serde::Serialize; + +use crate::ports::{EventSinkStream, EventSource, QuerySource, ReplyReceiver}; +use crate::time::{Action, ActionKey}; + +/// A registry that holds all sources and sinks meant to be accessed through +/// remote procedure calls. +#[derive(Default)] +pub struct EndpointRegistry { + event_sources: HashMap>, + query_sources: HashMap>, + sinks: HashMap>, +} + +impl EndpointRegistry { + /// Creates an empty `EndpointRegistry`. + pub fn new() -> Self { + Self::default() + } + + /// Adds an event source to the registry. + /// + /// If the specified name is already in use for another event source, the source + /// provided as argument is returned in the error. + pub fn add_event_source( + &mut self, + source: EventSource, + name: impl Into, + ) -> Result<(), EventSource> + where + T: DeserializeOwned + Clone + Send + 'static, + { + match self.event_sources.entry(name.into()) { + Entry::Vacant(s) => { + s.insert(Box::new(source)); + + Ok(()) + } + Entry::Occupied(_) => Err(source), + } + } + + /// Returns a mutable reference to the specified event source if it is in + /// the registry. + pub(crate) fn get_event_source_mut(&mut self, name: &str) -> Option<&mut dyn EventSourceAny> { + self.event_sources.get_mut(name).map(|s| s.as_mut()) + } + + /// Adds an query source to the registry. + /// + /// If the specified name is already in use for another query source, the source + /// provided as argument is returned in the error. + pub fn add_query_source( + &mut self, + source: QuerySource, + name: impl Into, + ) -> Result<(), QuerySource> + where + T: DeserializeOwned + Clone + Send + 'static, + R: Serialize + Send + 'static, + { + match self.query_sources.entry(name.into()) { + Entry::Vacant(s) => { + s.insert(Box::new(source)); + + Ok(()) + } + Entry::Occupied(_) => Err(source), + } + } + + /// Returns a mutable reference to the specified query source if it is in + /// the registry. + pub(crate) fn get_query_source_mut(&mut self, name: &str) -> Option<&mut dyn QuerySourceAny> { + self.query_sources.get_mut(name).map(|s| s.as_mut()) + } + + /// Adds a sink to the registry. + /// + /// If the specified name is already in use for another sink, the sink + /// provided as argument is returned in the error. + pub fn add_sink(&mut self, sink: S, name: impl Into) -> Result<(), S> + where + S: EventSinkStream + Send + 'static, + S::Item: Serialize, + { + match self.sinks.entry(name.into()) { + Entry::Vacant(s) => { + s.insert(Box::new(sink)); + + Ok(()) + } + Entry::Occupied(_) => Err(sink), + } + } + + /// Returns a mutable reference to the specified sink if it is in the + /// registry. + pub(crate) fn get_sink_mut(&mut self, name: &str) -> Option<&mut dyn EventSinkStreamAny> { + self.sinks.get_mut(name).map(|s| s.as_mut()) + } +} + +impl fmt::Debug for EndpointRegistry { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "EndpointRegistry ({} sources, {} sinks)", + self.event_sources.len(), + self.sinks.len() + ) + } +} + +/// A type-erased `EventSource` that operates on MessagePack-encoded serialized +/// events. +pub(crate) trait EventSourceAny: Send + 'static { + /// Returns an action which, when processed, broadcasts an event to all + /// connected input ports. + /// + /// The argument is expected to conform to the serde MessagePack encoding. + fn event(&mut self, msgpack_arg: &[u8]) -> Result; + + /// Returns a cancellable action and a cancellation key; when processed, the + /// action broadcasts an event to all connected input ports. + /// + /// The argument is expected to conform to the serde MessagePack encoding. + fn keyed_event(&mut self, msgpack_arg: &[u8]) -> Result<(Action, ActionKey), RmpDecodeError>; + + /// Returns a periodically recurring action which, when processed, + /// broadcasts an event to all connected input ports. + /// + /// The argument is expected to conform to the serde MessagePack encoding. + fn periodic_event( + &mut self, + period: Duration, + msgpack_arg: &[u8], + ) -> Result; + + /// Returns a cancellable, periodically recurring action and a cancellation + /// key; when processed, the action broadcasts an event to all connected + /// input ports. + /// + /// The argument is expected to conform to the serde MessagePack encoding. + fn keyed_periodic_event( + &mut self, + period: Duration, + msgpack_arg: &[u8], + ) -> Result<(Action, ActionKey), RmpDecodeError>; + + /// Human-readable name of the event type, as returned by + /// `any::type_name()`. + fn event_type_name(&self) -> &'static str; +} + +impl EventSourceAny for EventSource +where + T: DeserializeOwned + Clone + Send + 'static, +{ + fn event(&mut self, msgpack_arg: &[u8]) -> Result { + rmp_serde::from_read(msgpack_arg).map(|arg| self.event(arg)) + } + fn keyed_event(&mut self, msgpack_arg: &[u8]) -> Result<(Action, ActionKey), RmpDecodeError> { + rmp_serde::from_read(msgpack_arg).map(|arg| self.keyed_event(arg)) + } + fn periodic_event( + &mut self, + period: Duration, + msgpack_arg: &[u8], + ) -> Result { + rmp_serde::from_read(msgpack_arg).map(|arg| self.periodic_event(period, arg)) + } + fn keyed_periodic_event( + &mut self, + period: Duration, + msgpack_arg: &[u8], + ) -> Result<(Action, ActionKey), RmpDecodeError> { + rmp_serde::from_read(msgpack_arg).map(|arg| self.keyed_periodic_event(period, arg)) + } + fn event_type_name(&self) -> &'static str { + std::any::type_name::() + } +} + +/// A type-erased `QuerySource` that operates on MessagePack-encoded serialized +/// queries and returns MessagePack-encoded replies. +pub(crate) trait QuerySourceAny: Send + 'static { + /// Returns an action which, when processed, broadcasts a query to all + /// connected replier ports. + /// + /// + /// The argument is expected to conform to the serde MessagePack encoding. + fn query( + &mut self, + msgpack_arg: &[u8], + ) -> Result<(Action, Box), RmpDecodeError>; + + /// Human-readable name of the request type, as returned by + /// `any::type_name()`. + fn request_type_name(&self) -> &'static str; + + /// Human-readable name of the reply type, as returned by + /// `any::type_name()`. + fn reply_type_name(&self) -> &'static str; +} + +impl QuerySourceAny for QuerySource +where + T: DeserializeOwned + Clone + Send + 'static, + R: Serialize + Send + 'static, +{ + fn query( + &mut self, + msgpack_arg: &[u8], + ) -> Result<(Action, Box), RmpDecodeError> { + rmp_serde::from_read(msgpack_arg).map(|arg| { + let (action, reply_recv) = self.query(arg); + let reply_recv: Box = Box::new(reply_recv); + + (action, reply_recv) + }) + } + + fn request_type_name(&self) -> &'static str { + std::any::type_name::() + } + + fn reply_type_name(&self) -> &'static str { + std::any::type_name::() + } +} + +/// A type-erased `EventSinkStream`. +pub(crate) trait EventSinkStreamAny: Send + 'static { + /// Human-readable name of the event type, as returned by + /// `any::type_name()`. + fn event_type_name(&self) -> &'static str; + + /// Starts or resumes the collection of new events. + fn open(&mut self); + + /// Pauses the collection of new events. + fn close(&mut self); + + /// Encode and collect all events in a vector. + fn collect(&mut self) -> Result>, RmpEncodeError>; +} + +impl EventSinkStreamAny for E +where + E: EventSinkStream + Send + 'static, + E::Item: Serialize, +{ + fn event_type_name(&self) -> &'static str { + std::any::type_name::() + } + + fn open(&mut self) { + self.open(); + } + + fn close(&mut self) { + self.close(); + } + + fn collect(&mut self) -> Result>, RmpEncodeError> { + EventSinkStream::try_fold(self, Vec::new(), |mut encoded_events, event| { + rmp_serde::to_vec_named(&event).map(|encoded_event| { + encoded_events.push(encoded_event); + + encoded_events + }) + }) + } +} + +/// A type-erased `ReplyReceiver` that returns MessagePack-encoded replies.. +pub(crate) trait ReplyReceiverAny { + /// Take the replies, if any, encode them and collect them in a vector. + fn take_collect(&mut self) -> Option>, RmpEncodeError>>; +} + +impl ReplyReceiverAny for ReplyReceiver { + fn take_collect(&mut self) -> Option>, RmpEncodeError>> { + let replies = self.take()?; + + let encoded_replies = (move || { + let mut encoded_replies = Vec::new(); + for reply in replies { + let encoded_reply = rmp_serde::to_vec_named(&reply)?; + encoded_replies.push(encoded_reply); + } + + Ok(encoded_replies) + })(); + + Some(encoded_replies) + } +} diff --git a/asynchronix/src/rpc/generic_server.rs b/asynchronix/src/rpc/generic_server.rs new file mode 100644 index 0000000..edeb06d --- /dev/null +++ b/asynchronix/src/rpc/generic_server.rs @@ -0,0 +1,673 @@ +use std::time::Duration; + +use bytes::Buf; +use prost::Message; +use prost_types::Timestamp; +use tai_time::MonotonicTime; + +use crate::rpc::key_registry::{KeyRegistry, KeyRegistryId}; +use crate::rpc::EndpointRegistry; +use crate::simulation::{SimInit, Simulation}; + +use super::codegen::custom_transport::*; +use super::codegen::simulation::*; + +/// Transport-independent server implementation. +/// +/// This implementation implements the protobuf services without any +/// transport-specific management. +pub(crate) struct GenericServer { + sim_gen: F, + sim_context: Option<(Simulation, EndpointRegistry, KeyRegistry)>, +} + +impl GenericServer +where + F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, +{ + /// Creates a new `GenericServer` without any active simulation. + pub(crate) fn new(sim_gen: F) -> Self { + Self { + sim_gen, + sim_context: None, + } + } + + /// Processes an encoded `AnyRequest` message and returns an encoded + /// `AnyReply`. + #[allow(dead_code)] + pub(crate) fn service_request(&mut self, request_buf: B) -> Vec + where + B: Buf, + { + let reply = match AnyRequest::decode(request_buf) { + Ok(AnyRequest { request: Some(req) }) => match req { + any_request::Request::InitRequest(request) => { + any_reply::Reply::InitReply(self.init(request)) + } + any_request::Request::TimeRequest(request) => { + any_reply::Reply::TimeReply(self.time(request)) + } + any_request::Request::StepRequest(request) => { + any_reply::Reply::StepReply(self.step(request)) + } + any_request::Request::StepUntilRequest(request) => { + any_reply::Reply::StepUntilReply(self.step_until(request)) + } + any_request::Request::ScheduleEventRequest(request) => { + any_reply::Reply::ScheduleEventReply(self.schedule_event(request)) + } + any_request::Request::CancelEventRequest(request) => { + any_reply::Reply::CancelEventReply(self.cancel_event(request)) + } + any_request::Request::ProcessEventRequest(request) => { + any_reply::Reply::ProcessEventReply(self.process_event(request)) + } + any_request::Request::ProcessQueryRequest(request) => { + any_reply::Reply::ProcessQueryReply(self.process_query(request)) + } + any_request::Request::ReadEventsRequest(request) => { + any_reply::Reply::ReadEventsReply(self.read_events(request)) + } + any_request::Request::OpenSinkRequest(request) => { + any_reply::Reply::OpenSinkReply(self.open_sink(request)) + } + any_request::Request::CloseSinkRequest(request) => { + any_reply::Reply::CloseSinkReply(self.close_sink(request)) + } + }, + Ok(AnyRequest { request: None }) => any_reply::Reply::Error(ServerError { + code: ServerErrorCode::EmptyRequest as i32, + message: "the message did not contain any request".to_string(), + }), + Err(err) => any_reply::Reply::Error(ServerError { + code: ServerErrorCode::UnknownRequest as i32, + message: format!("bad request: {}", err), + }), + }; + + let reply = AnyReply { reply: Some(reply) }; + + reply.encode_to_vec() + } + + /// Initialize a simulation with the provided time. + /// + /// If a simulation is already active, it is destructed and replaced with a + /// new simulation. + /// + /// If the initialization time is not provided, it is initialized with the + /// epoch of `MonotonicTime` (1970-01-01 00:00:00 TAI). + pub(crate) fn init(&mut self, request: InitRequest) -> InitReply { + let start_time = request.time.unwrap_or_default(); + let reply = if let Some(start_time) = timestamp_to_monotonic(start_time) { + let (sim_init, endpoint_registry) = (self.sim_gen)(); + let simulation = sim_init.init(start_time); + self.sim_context = Some((simulation, endpoint_registry, KeyRegistry::default())); + + init_reply::Result::Empty(()) + } else { + init_reply::Result::Error(Error { + code: ErrorCode::InvalidTime as i32, + message: "out-of-range nanosecond field".to_string(), + }) + }; + + InitReply { + result: Some(reply), + } + } + + /// Returns the current simulation time. + pub(crate) fn time(&mut self, _request: TimeRequest) -> TimeReply { + let reply = match &self.sim_context { + Some((simulation, ..)) => { + if let Some(timestamp) = monotonic_to_timestamp(simulation.time()) { + time_reply::Result::Time(timestamp) + } else { + time_reply::Result::Error(Error { + code: ErrorCode::SimulationTimeOutOfRange as i32, + message: "the final simulation time is out of range".to_string(), + }) + } + } + None => time_reply::Result::Error(Error { + code: ErrorCode::SimulationNotStarted as i32, + message: "the simulation was not started".to_string(), + }), + }; + + TimeReply { + result: Some(reply), + } + } + + /// Advances simulation time to that of the next scheduled event, processing + /// that event as well as all other event scheduled for the same time. + /// + /// Processing is gated by a (possibly blocking) call to + /// [`Clock::synchronize()`](crate::time::Clock::synchronize) on the + /// configured simulation clock. This method blocks until all newly + /// processed events have completed. + pub(crate) fn step(&mut self, _request: StepRequest) -> StepReply { + let reply = match &mut self.sim_context { + Some((simulation, ..)) => { + simulation.step(); + if let Some(timestamp) = monotonic_to_timestamp(simulation.time()) { + step_reply::Result::Time(timestamp) + } else { + step_reply::Result::Error(Error { + code: ErrorCode::SimulationTimeOutOfRange as i32, + message: "the final simulation time is out of range".to_string(), + }) + } + } + None => step_reply::Result::Error(Error { + code: ErrorCode::SimulationNotStarted as i32, + message: "the simulation was not started".to_string(), + }), + }; + + StepReply { + result: Some(reply), + } + } + + /// Iteratively advances the simulation time until the specified deadline, + /// as if by calling + /// [`Simulation::step()`](crate::simulation::Simulation::step) repeatedly. + /// + /// This method blocks until all events scheduled up to the specified target + /// time have completed. The simulation time upon completion is equal to the + /// specified target time, whether or not an event was scheduled for that + /// time. + pub(crate) fn step_until(&mut self, request: StepUntilRequest) -> StepUntilReply { + let reply = move || -> Result { + let deadline = request + .deadline + .ok_or((ErrorCode::MissingArgument, "missing deadline argument"))?; + + let simulation = match deadline { + step_until_request::Deadline::Time(time) => { + let time = timestamp_to_monotonic(time) + .ok_or((ErrorCode::InvalidTime, "out-of-range nanosecond field"))?; + + let (simulation, ..) = self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started", + ))?; + + simulation.step_until(time).map_err(|_| { + ( + ErrorCode::InvalidTime, + "the specified deadline lies in the past", + ) + })?; + + simulation + } + + step_until_request::Deadline::Duration(duration) => { + let duration = to_positive_duration(duration).ok_or(( + ErrorCode::InvalidDuration, + "the specified deadline lies in the past", + ))?; + + let (simulation, ..) = self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started", + ))?; + + simulation.step_by(duration); + + simulation + } + }; + + let timestamp = monotonic_to_timestamp(simulation.time()).ok_or(( + ErrorCode::SimulationTimeOutOfRange, + "the final simulation time is out of range", + ))?; + + Ok(timestamp) + }(); + + StepUntilReply { + result: Some(match reply { + Ok(timestamp) => step_until_reply::Result::Time(timestamp), + Err((code, message)) => step_until_reply::Result::Error(Error { + code: code as i32, + message: message.to_string(), + }), + }), + } + } + + /// Schedules an event at a future time. + pub(crate) fn schedule_event(&mut self, request: ScheduleEventRequest) -> ScheduleEventReply { + let reply = move || -> Result, (ErrorCode, String)> { + let source_name = &request.source_name; + let msgpack_event = &request.event; + let with_key = request.with_key.unwrap_or_default(); + let period = request + .period + .map(|period| { + to_strictly_positive_duration(period).ok_or(( + ErrorCode::InvalidDuration, + "the specified event period is not strictly positive".to_string(), + )) + }) + .transpose()?; + + let (simulation, endpoint_registry, key_registry) = + self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started".to_string(), + ))?; + + let deadline = request.deadline.ok_or(( + ErrorCode::MissingArgument, + "missing deadline argument".to_string(), + ))?; + + let deadline = match deadline { + schedule_event_request::Deadline::Time(time) => timestamp_to_monotonic(time) + .ok_or(( + ErrorCode::InvalidTime, + "out-of-range nanosecond field".to_string(), + ))?, + schedule_event_request::Deadline::Duration(duration) => { + let duration = to_strictly_positive_duration(duration).ok_or(( + ErrorCode::InvalidDuration, + "the specified scheduling deadline is not in the future".to_string(), + ))?; + + simulation.time() + duration + } + }; + + let source = endpoint_registry.get_event_source_mut(source_name).ok_or(( + ErrorCode::SourceNotFound, + "no event source is registered with the name '{}'".to_string(), + ))?; + + let (action, action_key) = match (with_key, period) { + (false, None) => source.event(msgpack_event).map(|action| (action, None)), + (false, Some(period)) => source + .periodic_event(period, msgpack_event) + .map(|action| (action, None)), + (true, None) => source + .keyed_event(msgpack_event) + .map(|(action, key)| (action, Some(key))), + (true, Some(period)) => source + .keyed_periodic_event(period, msgpack_event) + .map(|(action, key)| (action, Some(key))), + } + .map_err(|_| { + ( + ErrorCode::InvalidMessage, + format!( + "the event could not be deserialized as type '{}'", + source.event_type_name() + ), + ) + })?; + + let key_id = action_key.map(|action_key| { + // Free stale keys from the registry. + key_registry.remove_expired_keys(simulation.time()); + + if period.is_some() { + key_registry.insert_eternal_key(action_key) + } else { + key_registry.insert_key(action_key, deadline) + } + }); + + simulation.process(action); + + Ok(key_id) + }(); + + ScheduleEventReply { + result: Some(match reply { + Ok(Some(key_id)) => { + let (subkey1, subkey2) = key_id.into_raw_parts(); + schedule_event_reply::Result::Key(EventKey { + subkey1: subkey1 + .try_into() + .expect("action key index is too large to be serialized"), + subkey2, + }) + } + Ok(None) => schedule_event_reply::Result::Empty(()), + Err((code, message)) => schedule_event_reply::Result::Error(Error { + code: code as i32, + message, + }), + }), + } + } + + /// Cancels a keyed event. + pub(crate) fn cancel_event(&mut self, request: CancelEventRequest) -> CancelEventReply { + let reply = move || -> Result<(), (ErrorCode, String)> { + let key = request.key.ok_or(( + ErrorCode::MissingArgument, + "missing key argument".to_string(), + ))?; + let subkey1: usize = key + .subkey1 + .try_into() + .map_err(|_| (ErrorCode::InvalidKey, "invalid event key".to_string()))?; + let subkey2 = key.subkey2; + + let (simulation, _, key_registry) = self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started".to_string(), + ))?; + + let key_id = KeyRegistryId::from_raw_parts(subkey1, subkey2); + + key_registry.remove_expired_keys(simulation.time()); + let key = key_registry.extract_key(key_id).ok_or(( + ErrorCode::InvalidKey, + "invalid or expired event key".to_string(), + ))?; + + key.cancel(); + + Ok(()) + }(); + + CancelEventReply { + result: Some(match reply { + Ok(()) => cancel_event_reply::Result::Empty(()), + Err((code, message)) => cancel_event_reply::Result::Error(Error { + code: code as i32, + message, + }), + }), + } + } + + /// Broadcasts an event from an event source immediately, blocking until + /// completion. + /// + /// Simulation time remains unchanged. + pub(crate) fn process_event(&mut self, request: ProcessEventRequest) -> ProcessEventReply { + let reply = move || -> Result<(), (ErrorCode, String)> { + let source_name = &request.source_name; + let msgpack_event = &request.event; + + let (simulation, registry, _) = self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started".to_string(), + ))?; + + let source = registry.get_event_source_mut(source_name).ok_or(( + ErrorCode::SourceNotFound, + "no source is registered with the name '{}'".to_string(), + ))?; + + let event = source.event(msgpack_event).map_err(|_| { + ( + ErrorCode::InvalidMessage, + format!( + "the event could not be deserialized as type '{}'", + source.event_type_name() + ), + ) + })?; + + simulation.process(event); + + Ok(()) + }(); + + ProcessEventReply { + result: Some(match reply { + Ok(()) => process_event_reply::Result::Empty(()), + Err((code, message)) => process_event_reply::Result::Error(Error { + code: code as i32, + message, + }), + }), + } + } + + /// Broadcasts an event from an event source immediately, blocking until + /// completion. + /// + /// Simulation time remains unchanged. + pub(crate) fn process_query(&mut self, request: ProcessQueryRequest) -> ProcessQueryReply { + let reply = move || -> Result>, (ErrorCode, String)> { + let source_name = &request.source_name; + let msgpack_request = &request.request; + + let (simulation, registry, _) = self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started".to_string(), + ))?; + + let source = registry.get_query_source_mut(source_name).ok_or(( + ErrorCode::SourceNotFound, + "no source is registered with the name '{}'".to_string(), + ))?; + + let (query, mut promise) = source.query(msgpack_request).map_err(|_| { + ( + ErrorCode::InvalidMessage, + format!( + "the request could not be deserialized as type '{}'", + source.request_type_name() + ), + ) + })?; + + simulation.process(query); + + let replies = promise.take_collect().ok_or(( + ErrorCode::InternalError, + "a reply to the query was expected but none was available".to_string(), + ))?; + + replies.map_err(|_| { + ( + ErrorCode::InvalidMessage, + format!( + "the reply could not be serialized as type '{}'", + source.reply_type_name() + ), + ) + }) + }(); + + match reply { + Ok(replies) => ProcessQueryReply { + replies, + result: Some(process_query_reply::Result::Empty(())), + }, + Err((code, message)) => ProcessQueryReply { + replies: Vec::new(), + result: Some(process_query_reply::Result::Error(Error { + code: code as i32, + message, + })), + }, + } + } + + /// Read all events from an event sink. + pub(crate) fn read_events(&mut self, request: ReadEventsRequest) -> ReadEventsReply { + let reply = move || -> Result>, (ErrorCode, String)> { + let sink_name = &request.sink_name; + + let (_, registry, _) = self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started".to_string(), + ))?; + + let sink = registry.get_sink_mut(sink_name).ok_or(( + ErrorCode::SinkNotFound, + "no sink is registered with the name '{}'".to_string(), + ))?; + + sink.collect().map_err(|_| { + ( + ErrorCode::InvalidMessage, + format!( + "the event could not be serialized from type '{}'", + sink.event_type_name() + ), + ) + }) + }(); + + match reply { + Ok(events) => ReadEventsReply { + events, + result: Some(read_events_reply::Result::Empty(())), + }, + Err((code, message)) => ReadEventsReply { + events: Vec::new(), + result: Some(read_events_reply::Result::Error(Error { + code: code as i32, + message, + })), + }, + } + } + + /// Opens an event sink. + pub(crate) fn open_sink(&mut self, request: OpenSinkRequest) -> OpenSinkReply { + let reply = move || -> Result<(), (ErrorCode, String)> { + let sink_name = &request.sink_name; + + let (_, registry, _) = self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started".to_string(), + ))?; + + let sink = registry.get_sink_mut(sink_name).ok_or(( + ErrorCode::SinkNotFound, + "no sink is registered with the name '{}'".to_string(), + ))?; + + sink.open(); + + Ok(()) + }(); + + match reply { + Ok(()) => OpenSinkReply { + result: Some(open_sink_reply::Result::Empty(())), + }, + Err((code, message)) => OpenSinkReply { + result: Some(open_sink_reply::Result::Error(Error { + code: code as i32, + message, + })), + }, + } + } + + /// Closes an event sink. + pub(crate) fn close_sink(&mut self, request: CloseSinkRequest) -> CloseSinkReply { + let reply = move || -> Result<(), (ErrorCode, String)> { + let sink_name = &request.sink_name; + + let (_, registry, _) = self.sim_context.as_mut().ok_or(( + ErrorCode::SimulationNotStarted, + "the simulation was not started".to_string(), + ))?; + + let sink = registry.get_sink_mut(sink_name).ok_or(( + ErrorCode::SinkNotFound, + "no sink is registered with the name '{}'".to_string(), + ))?; + + sink.close(); + + Ok(()) + }(); + + match reply { + Ok(()) => CloseSinkReply { + result: Some(close_sink_reply::Result::Empty(())), + }, + Err((code, message)) => CloseSinkReply { + result: Some(close_sink_reply::Result::Error(Error { + code: code as i32, + message, + })), + }, + } + } +} + +/// Attempts a cast from a `MonotonicTime` to a protobuf `Timestamp`. +/// +/// This will fail if the time is outside the protobuf-specified range for +/// timestamps (0001-01-01 00:00:00 to 9999-12-31 23:59:59). +fn monotonic_to_timestamp(monotonic_time: MonotonicTime) -> Option { + // Unix timestamp for 0001-01-01 00:00:00, the minimum accepted by + // protobuf's specification for the `Timestamp` type. + const MIN_SECS: i64 = -62135596800; + // Unix timestamp for 9999-12-31 23:59:59, the maximum accepted by + // protobuf's specification for the `Timestamp` type. + const MAX_SECS: i64 = 253402300799; + + let secs = monotonic_time.as_secs(); + if !(MIN_SECS..=MAX_SECS).contains(&secs) { + return None; + } + + Some(Timestamp { + seconds: secs, + nanos: monotonic_time.subsec_nanos() as i32, + }) +} + +/// Attempts a cast from a protobuf `Timestamp` to a `MonotonicTime`. +/// +/// This should never fail provided that the `Timestamp` complies with the +/// protobuf specification. It can only fail if the nanosecond part is negative +/// or greater than 999'999'999. +fn timestamp_to_monotonic(timestamp: Timestamp) -> Option { + let nanos: u32 = timestamp.nanos.try_into().ok()?; + + MonotonicTime::new(timestamp.seconds, nanos) +} + +/// Attempts a cast from a protobuf `Duration` to a `std::time::Duration`. +/// +/// If the `Duration` complies with the protobuf specification, this can only +/// fail if the duration is negative. +fn to_positive_duration(duration: prost_types::Duration) -> Option { + if duration.seconds < 0 || duration.nanos < 0 { + return None; + } + + Some(Duration::new( + duration.seconds as u64, + duration.nanos as u32, + )) +} + +/// Attempts a cast from a protobuf `Duration` to a strictly positive +/// `std::time::Duration`. +/// +/// If the `Duration` complies with the protobuf specification, this can only +/// fail if the duration is negative or null. +fn to_strictly_positive_duration(duration: prost_types::Duration) -> Option { + if duration.seconds < 0 || duration.nanos < 0 || (duration.seconds == 0 && duration.nanos == 0) + { + return None; + } + + Some(Duration::new( + duration.seconds as u64, + duration.nanos as u32, + )) +} diff --git a/asynchronix/src/rpc/grpc.rs b/asynchronix/src/rpc/grpc.rs new file mode 100644 index 0000000..1cb2960 --- /dev/null +++ b/asynchronix/src/rpc/grpc.rs @@ -0,0 +1,146 @@ +//! GRPC simulation server. + +use std::net::SocketAddr; +use std::sync::Mutex; +use std::sync::MutexGuard; + +use tonic::{transport::Server, Request, Response, Status}; + +use crate::rpc::EndpointRegistry; +use crate::simulation::SimInit; + +use super::codegen::simulation::*; +use super::generic_server::GenericServer; + +/// Runs a GRPC simulation server. +/// +/// The first argument is a closure that is called every time the simulation is +/// started by the remote client. It must create a new `SimInit` object +/// complemented by a registry that exposes the public event and query +/// interface. +pub fn run(sim_gen: F, addr: SocketAddr) -> Result<(), Box> +where + F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, +{ + // Use a single-threaded server. + let rt = tokio::runtime::Builder::new_current_thread().build()?; + + let sim_manager = GrpcServer::new(sim_gen); + + rt.block_on(async move { + Server::builder() + .add_service(simulation_server::SimulationServer::new(sim_manager)) + .serve(addr) + .await?; + + Ok(()) + }) +} + +struct GrpcServer +where + F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, +{ + inner: Mutex>, +} + +impl GrpcServer +where + F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, +{ + fn new(sim_gen: F) -> Self { + Self { + inner: Mutex::new(GenericServer::new(sim_gen)), + } + } + + fn inner(&self) -> MutexGuard<'_, GenericServer> { + self.inner.lock().unwrap() + } +} + +#[tonic::async_trait] +impl simulation_server::Simulation for GrpcServer +where + F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, +{ + async fn init(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().init(request))) + } + async fn time(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().time(request))) + } + async fn step(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().step(request))) + } + async fn step_until( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().step_until(request))) + } + async fn schedule_event( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().schedule_event(request))) + } + async fn cancel_event( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().cancel_event(request))) + } + async fn process_event( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().process_event(request))) + } + async fn process_query( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().process_query(request))) + } + async fn read_events( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().read_events(request))) + } + async fn open_sink( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().open_sink(request))) + } + async fn close_sink( + &self, + request: Request, + ) -> Result, Status> { + let request = request.into_inner(); + + Ok(Response::new(self.inner().close_sink(request))) + } +} diff --git a/asynchronix/src/rpc/key_registry.rs b/asynchronix/src/rpc/key_registry.rs new file mode 100644 index 0000000..fa76e47 --- /dev/null +++ b/asynchronix/src/rpc/key_registry.rs @@ -0,0 +1,47 @@ +use crate::time::{ActionKey, MonotonicTime}; +use crate::util::indexed_priority_queue::{IndexedPriorityQueue, InsertKey}; + +pub(crate) type KeyRegistryId = InsertKey; + +/// A collection of `ActionKey`s indexed by a unique identifier. +#[derive(Default)] +pub(crate) struct KeyRegistry { + keys: IndexedPriorityQueue, +} + +impl KeyRegistry { + /// Inserts an `ActionKey` into the registry. + /// + /// The provided expiration deadline is the latest time at which the key may + /// still be active. + pub(crate) fn insert_key( + &mut self, + action_key: ActionKey, + expiration: MonotonicTime, + ) -> KeyRegistryId { + self.keys.insert(expiration, action_key) + } + + /// Inserts a non-expiring `ActionKey` into the registry. + pub(crate) fn insert_eternal_key(&mut self, action_key: ActionKey) -> KeyRegistryId { + self.keys.insert(MonotonicTime::MAX, action_key) + } + + /// Removes an `ActionKey` from the registry and returns it. + /// + /// Returns `None` if the key was not found in the registry. + pub(crate) fn extract_key(&mut self, key_id: KeyRegistryId) -> Option { + self.keys.extract(key_id).map(|(_, key)| key) + } + + /// Remove keys with an expiration deadline strictly predating the argument. + pub(crate) fn remove_expired_keys(&mut self, now: MonotonicTime) { + while let Some(expiration) = self.keys.peek_key() { + if *expiration >= now { + return; + } + + self.keys.pull(); + } + } +} diff --git a/asynchronix/src/simulation.rs b/asynchronix/src/simulation.rs index 1fba49f..af3dfe9 100644 --- a/asynchronix/src/simulation.rs +++ b/asynchronix/src/simulation.rs @@ -14,8 +14,9 @@ //! using the [`Address`]es of the target models, //! 3. instantiation of a [`SimInit`] simulation builder and migration of all //! models and mailboxes to the builder with [`SimInit::add_model()`], -//! 4. initialization of a [`Simulation`] instance with [`SimInit::init()`] or -//! [`SimInit::init_with_clock()`], +//! 4. initialization of a [`Simulation`] instance with [`SimInit::init()`], +//! possibly preceded by the setup of a custom clock with +//! [`SimInit::set_clock()`], //! 5. discrete-time simulation, which typically involves scheduling events and //! incrementing simulation time while observing the models outputs. //! @@ -76,7 +77,7 @@ //! such pathological deadlocks and the "expected" deadlock that occurs when all //! events in a given time slice have completed and all models are starved on an //! empty mailbox. Consequently, blocking method such as [`SimInit::init()`], -//! [`Simulation::step()`], [`Simulation::send_event()`], etc., will return +//! [`Simulation::step()`], [`Simulation::process_event()`], etc., will return //! without error after a pathological deadlock, leaving the user responsible //! for inferring the deadlock from the behavior of the simulation in the next //! steps. This is obviously not ideal, but is hopefully only a temporary state @@ -86,17 +87,19 @@ //! //! Although uncommon, there is sometimes a need for connecting and/or //! disconnecting models after they have been migrated to the simulation. -//! Likewise, one may want to connect or disconnect an [`EventSlot`] or -//! [`EventStream`] after the simulation has been instantiated. +//! Likewise, one may want to connect or disconnect an +//! [`EventSlot`](crate::ports::EventSlot) or +//! [`EventBuffer`](crate::ports::EventBuffer) after the simulation has been +//! instantiated. //! //! There is actually a very simple solution to this problem: since the -//! [`InputFn`](crate::model::InputFn) trait also matches closures of type -//! `FnOnce(&mut impl Model)`, it is enough to invoke -//! [`Simulation::send_event()`] with a closure that connects or disconnects a -//! port, such as: +//! [`InputFn`] trait also matches closures of type `FnOnce(&mut impl Model)`, +//! it is enough to invoke [`Simulation::process_event()`] with a closure that +//! connects or disconnects a port, such as: //! //! ``` -//! # use asynchronix::model::{Model, Output}; +//! # use asynchronix::model::Model; +//! # use asynchronix::ports::Output; //! # use asynchronix::time::{MonotonicTime, Scheduler}; //! # use asynchronix::simulation::{Mailbox, SimInit}; //! # pub struct ModelA { @@ -111,7 +114,7 @@ //! # let modelA_addr = Mailbox::::new().address(); //! # let modelB_addr = Mailbox::::new().address(); //! # let mut simu = SimInit::new().init(MonotonicTime::EPOCH); -//! simu.send_event( +//! simu.process_event( //! |m: &mut ModelA| { //! m.output.connect(ModelB::input, modelB_addr); //! }, @@ -119,11 +122,9 @@ //! &modelA_addr //! ); //! ``` -mod endpoints; mod mailbox; mod sim_init; -pub use endpoints::{EventSlot, EventStream}; pub use mailbox::{Address, Mailbox}; pub use sim_init::SimInit; @@ -136,23 +137,22 @@ use std::time::Duration; use recycle_box::{coerce_box, RecycleBox}; use crate::executor::Executor; -use crate::model::{InputFn, Model, ReplierFn}; +use crate::model::Model; +use crate::ports::{InputFn, ReplierFn}; use crate::time::{ - self, Clock, Deadline, EventKey, MonotonicTime, NoClock, ScheduledEvent, SchedulerQueue, - SchedulingError, TearableAtomicTime, + self, Action, ActionKey, Clock, Deadline, MonotonicTime, SchedulerQueue, SchedulingError, + TearableAtomicTime, }; -use crate::util::futures::SeqFuture; +use crate::util::seq_futures::SeqFuture; use crate::util::slot; use crate::util::sync_cell::SyncCell; /// Simulation environment. /// /// A `Simulation` is created by calling -/// [`SimInit::init()`](crate::simulation::SimInit::init) or -/// [`SimInit::init_with_clock()`](crate::simulation::SimInit::init_with_clock) -/// method on a simulation initializer. It contains an asynchronous executor -/// that runs all simulation models added beforehand to -/// [`SimInit`](crate::simulation::SimInit). +/// [`SimInit::init()`](crate::simulation::SimInit::init) on a simulation +/// initializer. It contains an asynchronous executor that runs all simulation +/// models added beforehand to [`SimInit`]. /// /// A [`Simulation`] object also manages an event scheduling queue and /// simulation time. The scheduling queue can be accessed from the simulation @@ -163,10 +163,10 @@ use crate::util::sync_cell::SyncCell; /// method. /// /// Events and queries can be scheduled immediately, *i.e.* for the current -/// simulation time, using [`send_event()`](Simulation::send_event) and -/// [`send_query()`](Simulation::send_query). Calling these methods will block -/// until all computations triggered by such event or query have completed. In -/// the case of queries, the response is returned. +/// simulation time, using [`process_event()`](Simulation::process_event) and +/// [`send_query()`](Simulation::process_query). Calling these methods will +/// block until all computations triggered by such event or query have +/// completed. In the case of queries, the response is returned. /// /// Events can also be scheduled at a future simulation time using one of the /// [`schedule_*()`](Simulation::schedule_event) method. These methods queue an @@ -193,32 +193,18 @@ pub struct Simulation { } impl Simulation { - /// Creates a new `Simulation`. + /// Creates a new `Simulation` with the specified clock. pub(crate) fn new( executor: Executor, scheduler_queue: Arc>, time: SyncCell, + clock: Box, ) -> Self { Self { executor, scheduler_queue, time, - clock: Box::new(NoClock::new()), - } - } - - /// Creates a new `Simulation` with the specified clock. - pub(crate) fn with_clock( - executor: Executor, - scheduler_queue: Arc>, - time: SyncCell, - clock: impl Clock + 'static, - ) -> Self { - Self { - executor, - scheduler_queue, - time, - clock: Box::new(clock), + clock, } } @@ -267,6 +253,37 @@ impl Simulation { Ok(()) } + /// Schedules an action at a future time. + /// + /// An error is returned if the specified time is not in the future of the + /// current simulation time. + /// + /// If multiple actions send events at the same simulation time to the same + /// model, these events are guaranteed to be processed according to the + /// scheduling order of the actions. + pub fn schedule( + &mut self, + deadline: impl Deadline, + action: Action, + ) -> Result<(), SchedulingError> { + let now = self.time(); + let time = deadline.into_time(now); + if now >= time { + return Err(SchedulingError::InvalidScheduledTime); + } + + let mut scheduler_queue = self.scheduler_queue.lock().unwrap(); + + // The channel ID is set to the same value for all actions. This + // ensures that the relative scheduling order of all source events is + // preserved, which is important if some of them target the same models. + // The value 0 was chosen as it prevents collisions with channel IDs as + // the latter are always non-zero. + scheduler_queue.insert((time, 0), action); + + Ok(()) + } + /// Schedules an event at a future time. /// /// An error is returned if the specified time is not in the future of the @@ -294,6 +311,7 @@ impl Simulation { if now >= time { return Err(SchedulingError::InvalidScheduledTime); } + time::schedule_event_at_unchecked(time, func, arg, address.into().0, &self.scheduler_queue); Ok(()) @@ -314,7 +332,7 @@ impl Simulation { func: F, arg: T, address: impl Into>, - ) -> Result + ) -> Result where M: Model, F: for<'a> InputFn<'a, M, T, S>, @@ -397,7 +415,7 @@ impl Simulation { func: F, arg: T, address: impl Into>, - ) -> Result + ) -> Result where M: Model, F: for<'a> InputFn<'a, M, T, S> + Clone, @@ -424,10 +442,19 @@ impl Simulation { Ok(event_key) } - /// Sends and processes an event, blocking until completion. + /// Processes an action immediately, blocking until completion. + /// + /// Simulation time remains unchanged. The periodicity of the action, if + /// any, is ignored. + pub fn process(&mut self, action: Action) { + action.spawn_and_forget(&self.executor); + self.executor.run(); + } + + /// Processes an event immediately, blocking until completion. /// /// Simulation time remains unchanged. - pub fn send_event(&mut self, func: F, arg: T, address: impl Into>) + pub fn process_event(&mut self, func: F, arg: T, address: impl Into>) where M: Model, F: for<'a> InputFn<'a, M, T, S>, @@ -454,10 +481,10 @@ impl Simulation { self.executor.run(); } - /// Sends and processes a query, blocking until completion. + /// Processes a query immediately, blocking until completion. /// /// Simulation time remains unchanged. - pub fn send_query( + pub fn process_query( &mut self, func: F, arg: T, @@ -497,36 +524,34 @@ impl Simulation { reply_reader.try_read().map_err(|_| QueryError {}) } - /// Advances simulation time to that of the next scheduled event if its + /// Advances simulation time to that of the next scheduled action if its /// scheduling time does not exceed the specified bound, processing that - /// event as well as all other events scheduled for the same time. + /// action as well as all other actions scheduled for the same time. /// - /// If at least one event was found that satisfied the time bound, the + /// If at least one action was found that satisfied the time bound, the /// corresponding new simulation time is returned. fn step_to_next_bounded(&mut self, upper_time_bound: MonotonicTime) -> Option { - // Function pulling the next event. If the event is periodic, it is + // Function pulling the next action. If the action is periodic, it is // immediately re-scheduled. - fn pull_next_event( - scheduler_queue: &mut MutexGuard, - ) -> Box { - let ((time, channel_id), event) = scheduler_queue.pull().unwrap(); - if let Some((event_clone, period)) = event.next() { - scheduler_queue.insert((time + period, channel_id), event_clone); + fn pull_next_action(scheduler_queue: &mut MutexGuard) -> Action { + let ((time, channel_id), action) = scheduler_queue.pull().unwrap(); + if let Some((action_clone, period)) = action.next() { + scheduler_queue.insert((time + period, channel_id), action_clone); } - event + action } // Closure returning the next key which time stamp is no older than the - // upper bound, if any. Cancelled events are pulled and discarded. + // upper bound, if any. Cancelled actions are pulled and discarded. let peek_next_key = |scheduler_queue: &mut MutexGuard| { loop { match scheduler_queue.peek() { - Some((&k, t)) if k.0 <= upper_time_bound => { - if !t.is_cancelled() { - break Some(k); + Some((&key, action)) if key.0 <= upper_time_bound => { + if !action.is_cancelled() { + break Some(key); } - // Discard cancelled events. + // Discard cancelled actions. scheduler_queue.pull(); } _ => break None, @@ -540,37 +565,37 @@ impl Simulation { self.time.write(current_key.0); loop { - let event = pull_next_event(&mut scheduler_queue); + let action = pull_next_action(&mut scheduler_queue); let mut next_key = peek_next_key(&mut scheduler_queue); if next_key != Some(current_key) { - // Since there are no other events targeting the same mailbox - // and the same time, the event is spawned immediately. - event.spawn_and_forget(&self.executor); + // Since there are no other actions targeting the same mailbox + // and the same time, the action is spawned immediately. + action.spawn_and_forget(&self.executor); } else { // To ensure that their relative order of execution is - // preserved, all event targeting the same mailbox are executed - // sequentially within a single compound future. - let mut event_sequence = SeqFuture::new(); - event_sequence.push(event.into_future()); + // preserved, all actions targeting the same mailbox are + // executed sequentially within a single compound future. + let mut action_sequence = SeqFuture::new(); + action_sequence.push(action.into_future()); loop { - let event = pull_next_event(&mut scheduler_queue); - event_sequence.push(event.into_future()); + let action = pull_next_action(&mut scheduler_queue); + action_sequence.push(action.into_future()); next_key = peek_next_key(&mut scheduler_queue); if next_key != Some(current_key) { break; } } - // Spawn a compound future that sequentially polls all events + // Spawn a compound future that sequentially polls all actions // targeting the same mailbox. - self.executor.spawn_and_forget(event_sequence); + self.executor.spawn_and_forget(action_sequence); } current_key = match next_key { - // If the next event is scheduled at the same time, update the + // If the next action is scheduled at the same time, update the // key and continue. Some(k) if k.0 == current_key.0 => k, - // Otherwise wait until all events have completed and return. + // Otherwise wait until all actions have completed and return. _ => { drop(scheduler_queue); // make sure the queue's mutex is released. let current_time = current_key.0; @@ -584,10 +609,10 @@ impl Simulation { } } - /// Iteratively advances simulation time and processes all events scheduled + /// Iteratively advances simulation time and processes all actions scheduled /// up to the specified target time. /// - /// Once the method returns it is guaranteed that (i) all events scheduled + /// Once the method returns it is guaranteed that (i) all actions scheduled /// up to the specified target time have completed and (ii) the final /// simulation time matches the target time. /// @@ -598,7 +623,7 @@ impl Simulation { match self.step_to_next_bounded(target_time) { // The target time was reached exactly. Some(t) if t == target_time => return, - // No events are scheduled before or at the target time. + // No actions are scheduled before or at the target time. None => { // Update the simulation time. self.time.write(target_time); diff --git a/asynchronix/src/simulation/endpoints.rs b/asynchronix/src/simulation/endpoints.rs deleted file mode 100644 index eed6c10..0000000 --- a/asynchronix/src/simulation/endpoints.rs +++ /dev/null @@ -1,69 +0,0 @@ -use std::fmt; -use std::sync::{Arc, Mutex, TryLockError, TryLockResult}; - -use crate::util::spsc_queue; - -/// An iterator that returns all events that were broadcast by an output port. -/// -/// Events are returned in first-in-first-out order. Note that even if the -/// iterator returns `None`, it may still produce more items after simulation -/// time is incremented. -pub struct EventStream { - consumer: spsc_queue::Consumer, -} - -impl EventStream { - /// Creates a new `EventStream`. - pub(crate) fn new(consumer: spsc_queue::Consumer) -> Self { - Self { consumer } - } -} - -impl Iterator for EventStream { - type Item = T; - - fn next(&mut self) -> Option { - self.consumer.pop() - } -} - -impl fmt::Debug for EventStream { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("EventStream").finish_non_exhaustive() - } -} - -/// A single-value slot that holds the last event that was broadcast by an -/// output port. -pub struct EventSlot { - slot: Arc>>, -} - -impl EventSlot { - /// Creates a new `EventSlot`. - pub(crate) fn new(slot: Arc>>) -> Self { - Self { slot } - } - - /// Take the last event, if any, leaving the slot empty. - /// - /// Note that even after the event is taken, it may become populated anew - /// after simulation time is incremented. - pub fn take(&mut self) -> Option { - // We don't actually need to take self by mutable reference, but this - // signature is probably less surprising for the user and more - // consistent with `EventStream`. It also prevents multi-threaded - // access, which would be likely to be misused. - match self.slot.try_lock() { - TryLockResult::Ok(mut v) => v.take(), - TryLockResult::Err(TryLockError::WouldBlock) => None, - TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(), - } - } -} - -impl fmt::Debug for EventSlot { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("EventSlot").finish_non_exhaustive() - } -} diff --git a/asynchronix/src/simulation/sim_init.rs b/asynchronix/src/simulation/sim_init.rs index dbfa7ff..11c774f 100644 --- a/asynchronix/src/simulation/sim_init.rs +++ b/asynchronix/src/simulation/sim_init.rs @@ -3,7 +3,7 @@ use std::sync::{Arc, Mutex}; use crate::executor::Executor; use crate::model::Model; -use crate::time::{Clock, Scheduler}; +use crate::time::{Clock, NoClock, Scheduler}; use crate::time::{MonotonicTime, SchedulerQueue, TearableAtomicTime}; use crate::util::priority_queue::PriorityQueue; use crate::util::sync_cell::SyncCell; @@ -15,6 +15,7 @@ pub struct SimInit { executor: Executor, scheduler_queue: Arc>, time: SyncCell, + clock: Box, } impl SimInit { @@ -35,6 +36,7 @@ impl SimInit { executor: Executor::new(num_threads), scheduler_queue: Arc::new(Mutex::new(PriorityQueue::new())), time: SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)), + clock: Box::new(NoClock::new()), } } @@ -55,35 +57,25 @@ impl SimInit { self } + /// Synchronize the simulation with the provided [`Clock`]. + /// + /// If the clock isn't explicitly set then the default [`NoClock`] is used, + /// resulting in the simulation running as fast as possible. + pub fn set_clock(mut self, clock: impl Clock + 'static) -> Self { + self.clock = Box::new(clock); + + self + } + /// Builds a simulation initialized at the specified simulation time, /// executing the [`Model::init()`](crate::model::Model::init) method on all /// model initializers. - /// - /// This is equivalent to calling [`SimInit::init_with_clock()`] with a - /// [`NoClock`](crate::time::NoClock) argument and effectively makes the - /// simulation run as fast as possible. pub fn init(mut self, start_time: MonotonicTime) -> Simulation { self.time.write(start_time); + self.clock.synchronize(start_time); self.executor.run(); - Simulation::new(self.executor, self.scheduler_queue, self.time) - } - - /// Builds a simulation synchronized with the provided - /// [`Clock`](crate::time::Clock) and initialized at the specified - /// simulation time, executing the - /// [`Model::init()`](crate::model::Model::init) method on all model - /// initializers. - pub fn init_with_clock( - mut self, - start_time: MonotonicTime, - mut clock: impl Clock + 'static, - ) -> Simulation { - self.time.write(start_time); - clock.synchronize(start_time); - self.executor.run(); - - Simulation::with_clock(self.executor, self.scheduler_queue, self.time, clock) + Simulation::new(self.executor, self.scheduler_queue, self.time, self.clock) } } diff --git a/asynchronix/src/time.rs b/asynchronix/src/time.rs index fc8232e..acc2f50 100644 --- a/asynchronix/src/time.rs +++ b/asynchronix/src/time.rs @@ -51,12 +51,13 @@ mod clock; mod monotonic_time; mod scheduler; +pub use tai_time::MonotonicTime; + pub use clock::{AutoSystemClock, Clock, NoClock, SyncStatus, SystemClock}; pub(crate) use monotonic_time::TearableAtomicTime; -pub use monotonic_time::{MonotonicTime, SystemTimeError}; pub(crate) use scheduler::{ schedule_event_at_unchecked, schedule_keyed_event_at_unchecked, schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked, - ScheduledEvent, SchedulerQueue, + KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction, SchedulerQueue, }; -pub use scheduler::{Deadline, EventKey, Scheduler, SchedulingError}; +pub use scheduler::{Action, ActionKey, Deadline, Scheduler, SchedulingError}; diff --git a/asynchronix/src/time/clock.rs b/asynchronix/src/time/clock.rs index 54a7f95..c9027e1 100644 --- a/asynchronix/src/time/clock.rs +++ b/asynchronix/src/time/clock.rs @@ -1,14 +1,16 @@ use std::time::{Duration, Instant, SystemTime}; +use tai_time::MonotonicClock; + use crate::time::MonotonicTime; /// A type that can be used to synchronize a simulation. /// -/// This trait abstract over the different types of clocks, such as +/// This trait abstracts over different types of clocks, such as /// as-fast-as-possible and real-time clocks. /// -/// A clock can be associated to a simulation at initialization time by calling -/// [`SimInit::init_with_clock()`](crate::simulation::SimInit::init_with_clock). +/// A clock can be associated to a simulation prior to initialization by calling +/// [`SimInit::set_clock()`](crate::simulation::SimInit::set_clock). pub trait Clock: Send { /// Blocks until the deadline. fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus; @@ -49,10 +51,7 @@ impl Clock for NoClock { /// This clock accepts an arbitrary reference time and remains synchronized with /// the system's monotonic clock. #[derive(Copy, Clone, Debug)] -pub struct SystemClock { - wall_clock_ref: Instant, - simulation_ref: MonotonicTime, -} +pub struct SystemClock(MonotonicClock); impl SystemClock { /// Constructs a `SystemClock` with an offset between simulation clock and @@ -69,7 +68,7 @@ impl SystemClock { /// use asynchronix::simulation::SimInit; /// use asynchronix::time::{MonotonicTime, SystemClock}; /// - /// let t0 = MonotonicTime::new(1_234_567_890, 0); + /// let t0 = MonotonicTime::new(1_234_567_890, 0).unwrap(); /// /// // Make the simulation start in 1s. /// let clock = SystemClock::from_instant(t0, Instant::now() + Duration::from_secs(1)); @@ -77,13 +76,14 @@ impl SystemClock { /// let simu = SimInit::new() /// // .add_model(...) /// // .add_model(...) - /// .init_with_clock(t0, clock); + /// .set_clock(clock) + /// .init(t0); /// ``` pub fn from_instant(simulation_ref: MonotonicTime, wall_clock_ref: Instant) -> Self { - Self { - wall_clock_ref, + Self(MonotonicClock::init_from_instant( simulation_ref, - } + wall_clock_ref, + )) } /// Constructs a `SystemClock` with an offset between simulation clock and @@ -109,7 +109,7 @@ impl SystemClock { /// use asynchronix::simulation::SimInit; /// use asynchronix::time::{MonotonicTime, SystemClock}; /// - /// let t0 = MonotonicTime::new(1_234_567_890, 0); + /// let t0 = MonotonicTime::new(1_234_567_890, 0).unwrap(); /// /// // Make the simulation start at the next full second boundary. /// let now_secs = UNIX_EPOCH.elapsed().unwrap().as_secs(); @@ -120,58 +120,14 @@ impl SystemClock { /// let simu = SimInit::new() /// // .add_model(...) /// // .add_model(...) - /// .init_with_clock(t0, clock); + /// .set_clock(clock) + /// .init(t0); /// ``` pub fn from_system_time(simulation_ref: MonotonicTime, wall_clock_ref: SystemTime) -> Self { - // Select the best-correlated `Instant`/`SystemTime` pair from several - // samples to improve robustness towards possible thread suspension - // between the calls to `SystemTime::now()` and `Instant::now()`. - const SAMPLES: usize = 3; - - let mut last_instant = Instant::now(); - let mut min_delta = Duration::MAX; - let mut ref_time = None; - - // Select the best-correlated instant/date pair. - for _ in 0..SAMPLES { - // The inner loop is to work around monotonic clock platform bugs - // that may cause `checked_duration_since` to fail. - let (date, instant, delta) = loop { - let date = SystemTime::now(); - let instant = Instant::now(); - let delta = instant.checked_duration_since(last_instant); - last_instant = instant; - - if let Some(delta) = delta { - break (date, instant, delta); - } - }; - - // Store the current instant/date if the time elapsed since the last - // measurement is shorter than the previous candidate. - if min_delta > delta { - min_delta = delta; - ref_time = Some((instant, date)); - } - } - - // Set the selected instant/date as the wall clock reference and adjust - // the simulation reference accordingly. - let (instant_ref, date_ref) = ref_time.unwrap(); - let simulation_ref = if date_ref > wall_clock_ref { - let correction = date_ref.duration_since(wall_clock_ref).unwrap(); - - simulation_ref + correction - } else { - let correction = wall_clock_ref.duration_since(date_ref).unwrap(); - - simulation_ref - correction - }; - - Self { - wall_clock_ref: instant_ref, + Self(MonotonicClock::init_from_system_time( simulation_ref, - } + wall_clock_ref, + )) } } @@ -179,22 +135,14 @@ impl Clock for SystemClock { /// Blocks until the system time corresponds to the specified simulation /// time. fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus { - let target_time = if deadline >= self.simulation_ref { - self.wall_clock_ref + deadline.duration_since(self.simulation_ref) - } else { - self.wall_clock_ref - self.simulation_ref.duration_since(deadline) - }; + let now = self.0.now(); + if now <= deadline { + spin_sleep::sleep(deadline.duration_since(now)); - let now = Instant::now(); - - match target_time.checked_duration_since(now) { - Some(sleep_duration) => { - spin_sleep::sleep(sleep_duration); - - SyncStatus::Synchronized - } - None => SyncStatus::OutOfSync(now.duration_since(target_time)), + return SyncStatus::Synchronized; } + + SyncStatus::OutOfSync(now.duration_since(deadline)) } } @@ -233,3 +181,29 @@ impl Clock for AutoSystemClock { } } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn smoke_system_clock() { + let t0 = MonotonicTime::EPOCH; + const TOLERANCE: f64 = 0.0005; // [s] + + let now = Instant::now(); + let mut clock = SystemClock::from_instant(t0, now); + let t1 = t0 + Duration::from_millis(200); + clock.synchronize(t1); + let elapsed = now.elapsed().as_secs_f64(); + let dt = t1.duration_since(t0).as_secs_f64(); + + assert!( + (dt - elapsed) <= TOLERANCE, + "Expected t = {:.6}s +/- {:.6}s, measured t = {:.6}s", + dt, + TOLERANCE, + elapsed, + ); + } +} diff --git a/asynchronix/src/time/monotonic_time.rs b/asynchronix/src/time/monotonic_time.rs index e27ff56..de8568e 100644 --- a/asynchronix/src/time/monotonic_time.rs +++ b/asynchronix/src/time/monotonic_time.rs @@ -1,483 +1,10 @@ //! Monotonic simulation time. - -use std::error::Error; -use std::fmt; -use std::ops::{Add, AddAssign, Sub, SubAssign}; use std::sync::atomic::{AtomicI64, AtomicU32, Ordering}; -use std::time::{Duration, SystemTime}; + +use super::MonotonicTime; use crate::util::sync_cell::TearableAtomic; -const NANOS_PER_SEC: u32 = 1_000_000_000; - -/// A nanosecond-precision monotonic clock timestamp. -/// -/// A timestamp specifies a [TAI] point in time. It is represented as a 64-bit -/// signed number of seconds and a positive number of nanoseconds, counted with -/// reference to 1970-01-01 00:00:00 TAI. This timestamp format has a number of -/// desirable properties: -/// -/// - it enables cheap inter-operation with the standard [`Duration`] type which -/// uses a very similar internal representation, -/// - it constitutes a strict 96-bit superset of 80-bit PTP IEEE-1588 -/// timestamps, with the same epoch, -/// - if required, exact conversion to a Unix timestamp is trivial and only -/// requires subtracting from this timestamp the number of leap seconds -/// between TAI and UTC time (see also the -/// [`as_unix_secs()`](MonotonicTime::as_unix_secs) method). -/// -/// Although no date-time conversion methods are provided, conversion from -/// timestamp to TAI date-time representations and back can be easily performed -/// using `NaiveDateTime` from the [chrono] crate or `OffsetDateTime` from the -/// [time] crate, treating the timestamp as a regular (UTC) Unix timestamp. -/// -/// [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time -/// [chrono]: https://crates.io/crates/chrono -/// [time]: https://crates.io/crates/time -/// -/// # Examples -/// -/// ``` -/// use std::time::Duration; -/// use asynchronix::time::MonotonicTime; -/// -/// // Set the timestamp to 2009-02-13 23:31:30.987654321 TAI. -/// let mut timestamp = MonotonicTime::new(1_234_567_890, 987_654_321); -/// -/// // Increment the timestamp by 123.456s. -/// timestamp += Duration::new(123, 456_000_000); -/// -/// assert_eq!(timestamp, MonotonicTime::new(1_234_568_014, 443_654_321)); -/// assert_eq!(timestamp.as_secs(), 1_234_568_014); -/// assert_eq!(timestamp.subsec_nanos(), 443_654_321); -/// ``` -#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)] -#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] -pub struct MonotonicTime { - /// The number of whole seconds in the future (if positive) or in the past - /// (if negative) of 1970-01-01 00:00:00 TAI. - /// - /// Note that the automatic derivation of `PartialOrd` relies on - /// lexicographical comparison so the `secs` field must appear before - /// `nanos` in declaration order to be given higher priority. - secs: i64, - /// The sub-second number of nanoseconds in the future of the point in time - /// defined by `secs`. - nanos: u32, -} - -impl MonotonicTime { - /// The epoch used by `MonotonicTime`, equal to 1970-01-01 00:00:00 TAI. - /// - /// This epoch coincides with the PTP epoch defined in the IEEE-1588 - /// standard. - pub const EPOCH: Self = Self { secs: 0, nanos: 0 }; - - /// The minimum possible `MonotonicTime` timestamp. - pub const MIN: Self = Self { - secs: i64::MIN, - nanos: 0, - }; - - /// The maximum possible `MonotonicTime` timestamp. - pub const MAX: Self = Self { - secs: i64::MAX, - nanos: NANOS_PER_SEC - 1, - }; - - /// Creates a timestamp directly from timestamp parts. - /// - /// The number of seconds is relative to the [`EPOCH`](MonotonicTime::EPOCH) - /// (1970-01-01 00:00:00 TAI). It is negative for dates in the past of the - /// epoch. - /// - /// The number of nanoseconds is always positive and always points towards - /// the future. - /// - /// # Panics - /// - /// This constructor will panic if the number of nanoseconds is greater than - /// or equal to 1 second. - /// - /// # Example - /// - /// ``` - /// use std::time::Duration; - /// use asynchronix::time::MonotonicTime; - /// - /// // A timestamp set to 2009-02-13 23:31:30.987654321 TAI. - /// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321); - /// - /// // A timestamp set 3.5s before the epoch. - /// let timestamp = MonotonicTime::new(-4, 500_000_000); - /// assert_eq!(timestamp, MonotonicTime::EPOCH - Duration::new(3, 500_000_000)); - /// ``` - pub const fn new(secs: i64, subsec_nanos: u32) -> Self { - assert!( - subsec_nanos < NANOS_PER_SEC, - "invalid number of nanoseconds" - ); - - Self { - secs, - nanos: subsec_nanos, - } - } - - /// Creates a timestamp from the current system time. - /// - /// The argument is the current difference between TAI and UTC time in - /// seconds (a.k.a. leap seconds). For reference, this offset has been +37s - /// since 2017-01-01, a value which is to remain valid until at least - /// 2024-06-29. See the [official IERS bulletin - /// C](https://datacenter.iers.org/data/latestVersion/bulletinC.txt) for - /// leap second announcements or the [IETF - /// table](https://www.ietf.org/timezones/data/leap-seconds.list) for - /// current and historical values. - /// - /// # Errors - /// - /// This method will return an error if the reported system time is in the - /// past of the Unix epoch or if the offset-adjusted timestamp is outside - /// the representable range. - /// - /// # Examples - /// - /// ``` - /// use asynchronix::time::MonotonicTime; - /// - /// // Compute the current TAI time assuming that the current difference - /// // between TAI and UTC time is 37s. - /// let timestamp = MonotonicTime::from_system(37).unwrap(); - /// ``` - pub fn from_system(leap_secs: i64) -> Result { - let utc_timestamp = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .map_err(|_| SystemTimeError::InvalidSystemTime)?; - - Self::new(leap_secs, 0) - .checked_add(utc_timestamp) - .ok_or(SystemTimeError::OutOfRange) - } - - /// Returns the number of whole seconds relative to - /// [`EPOCH`](MonotonicTime::EPOCH) (1970-01-01 00:00:00 TAI). - /// - /// Consistently with the interpretation of seconds and nanoseconds in the - /// [`new()`](Self::new) constructor, seconds are always rounded towards - /// `-∞`. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// use asynchronix::time::MonotonicTime; - /// - /// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321); - /// assert_eq!(timestamp.as_secs(), 1_234_567_890); - /// - /// let timestamp = MonotonicTime::EPOCH - Duration::new(3, 500_000_000); - /// assert_eq!(timestamp.as_secs(), -4); - /// ``` - pub const fn as_secs(&self) -> i64 { - self.secs - } - - /// Returns the number of seconds of the corresponding Unix time. - /// - /// The argument is the difference between TAI and UTC time in seconds - /// (a.k.a. leap seconds) applicable at the date represented by the - /// timestamp. See the [official IERS bulletin - /// C](https://datacenter.iers.org/data/latestVersion/bulletinC.txt) for - /// leap second announcements or the [IETF - /// table](https://www.ietf.org/timezones/data/leap-seconds.list) for - /// current and historical values. - /// - /// This method merely subtracts the offset from the value returned by - /// [`as_secs()`](Self::as_secs) and checks for potential overflow; its main - /// purpose is to prevent mistakes regarding the direction in which the - /// offset should be applied. - /// - /// Note that the nanosecond part of a Unix timestamp can be simply - /// retrieved with [`subsec_nanos()`](Self::subsec_nanos) since UTC and TAI - /// differ by a whole number of seconds. - /// - /// # Panics - /// - /// This will panic if the offset-adjusted timestamp cannot be represented - /// as an `i64`. - /// - /// # Examples - /// - /// ``` - /// use asynchronix::time::MonotonicTime; - /// - /// // Set the date to 2000-01-01 00:00:00 TAI. - /// let timestamp = MonotonicTime::new(946_684_800, 0); - /// - /// // Convert to a Unix timestamp, accounting for the +32s difference between - /// // TAI and UTC on 2000-01-01. - /// let unix_secs = timestamp.as_unix_secs(32); - /// ``` - pub const fn as_unix_secs(&self, leap_secs: i64) -> i64 { - if let Some(secs) = self.secs.checked_sub(leap_secs) { - secs - } else { - panic!("timestamp outside representable range"); - } - } - - /// Returns the sub-second fractional part in nanoseconds. - /// - /// Note that nanoseconds always point towards the future even if the date - /// is in the past of the [`EPOCH`](MonotonicTime::EPOCH). - /// - /// # Examples - /// - /// ``` - /// use asynchronix::time::MonotonicTime; - /// - /// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321); - /// assert_eq!(timestamp.subsec_nanos(), 987_654_321); - /// ``` - pub const fn subsec_nanos(&self) -> u32 { - self.nanos - } - - /// Adds a duration to a timestamp, checking for overflow. - /// - /// Returns `None` if overflow occurred. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// use asynchronix::time::MonotonicTime; - /// - /// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321); - /// assert!(timestamp.checked_add(Duration::new(10, 123_456_789)).is_some()); - /// assert!(timestamp.checked_add(Duration::MAX).is_none()); - /// ``` - pub const fn checked_add(self, rhs: Duration) -> Option { - // A durations in seconds greater than `i64::MAX` is actually fine as - // long as the number of seconds does not effectively overflow which is - // why the below does not use `checked_add`. So technically the below - // addition may wrap around on the negative side due to the - // unsigned-to-signed cast of the duration, but this does not - // necessarily indicate an actual overflow. Actual overflow can be ruled - // out by verifying that the new timestamp is in the future of the old - // timestamp. - let mut secs = self.secs.wrapping_add(rhs.as_secs() as i64); - - // Check for overflow. - if secs < self.secs { - return None; - } - - let mut nanos = self.nanos + rhs.subsec_nanos(); - if nanos >= NANOS_PER_SEC { - secs = if let Some(s) = secs.checked_add(1) { - s - } else { - return None; - }; - nanos -= NANOS_PER_SEC; - } - - Some(Self { secs, nanos }) - } - - /// Subtracts a duration from a timestamp, checking for overflow. - /// - /// Returns `None` if overflow occurred. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// use asynchronix::time::MonotonicTime; - /// - /// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321); - /// assert!(timestamp.checked_sub(Duration::new(10, 123_456_789)).is_some()); - /// assert!(timestamp.checked_sub(Duration::MAX).is_none()); - /// ``` - pub const fn checked_sub(self, rhs: Duration) -> Option { - // A durations in seconds greater than `i64::MAX` is actually fine as - // long as the number of seconds does not effectively overflow, which is - // why the below does not use `checked_sub`. So technically the below - // subtraction may wrap around on the positive side due to the - // unsigned-to-signed cast of the duration, but this does not - // necessarily indicate an actual overflow. Actual overflow can be ruled - // out by verifying that the new timestamp is in the past of the old - // timestamp. - let mut secs = self.secs.wrapping_sub(rhs.as_secs() as i64); - - // Check for overflow. - if secs > self.secs { - return None; - } - - let nanos = if self.nanos < rhs.subsec_nanos() { - secs = if let Some(s) = secs.checked_sub(1) { - s - } else { - return None; - }; - - (self.nanos + NANOS_PER_SEC) - rhs.subsec_nanos() - } else { - self.nanos - rhs.subsec_nanos() - }; - - Some(Self { secs, nanos }) - } - - /// Subtracts a timestamp from another timestamp. - /// - /// # Panics - /// - /// Panics if the argument lies in the future of `self`. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// use asynchronix::time::MonotonicTime; - /// - /// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321); - /// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789); - /// assert_eq!( - /// timestamp_later.duration_since(timestamp_earlier), - /// Duration::new(20, 135_802_468) - /// ); - /// ``` - pub fn duration_since(self, earlier: Self) -> Duration { - self.checked_duration_since(earlier) - .expect("attempt to substract a timestamp from an earlier timestamp") - } - - /// Computes the duration elapsed between a timestamp and an earlier - /// timestamp, checking that the timestamps are appropriately ordered. - /// - /// Returns `None` if the argument lies in the future of `self`. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// use asynchronix::time::MonotonicTime; - /// - /// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321); - /// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789); - /// assert!(timestamp_later.checked_duration_since(timestamp_earlier).is_some()); - /// assert!(timestamp_earlier.checked_duration_since(timestamp_later).is_none()); - /// ``` - pub const fn checked_duration_since(self, earlier: Self) -> Option { - // If the subtraction of the nanosecond fractions would overflow, carry - // over one second to the nanoseconds. - let (secs, nanos) = if earlier.nanos > self.nanos { - if let Some(s) = self.secs.checked_sub(1) { - (s, self.nanos + NANOS_PER_SEC) - } else { - return None; - } - } else { - (self.secs, self.nanos) - }; - - // Make sure the computation of the duration will not overflow the - // seconds. - if secs < earlier.secs { - return None; - } - - // This subtraction may wrap around if the difference between the two - // timestamps is more than `i64::MAX`, but even if it does the result - // will be correct once cast to an unsigned integer. - let delta_secs = secs.wrapping_sub(earlier.secs) as u64; - - // The below subtraction is guaranteed to never overflow. - let delta_nanos = nanos - earlier.nanos; - - Some(Duration::new(delta_secs, delta_nanos)) - } -} - -impl Add for MonotonicTime { - type Output = Self; - - /// Adds a duration to a timestamp. - /// - /// # Panics - /// - /// This function panics if the resulting timestamp cannot be - /// represented. See [`MonotonicTime::checked_add`] for a panic-free - /// version. - fn add(self, other: Duration) -> Self { - self.checked_add(other) - .expect("overflow when adding duration to timestamp") - } -} - -impl Sub for MonotonicTime { - type Output = Self; - - /// Subtracts a duration from a timestamp. - /// - /// # Panics - /// - /// This function panics if the resulting timestamp cannot be - /// represented. See [`MonotonicTime::checked_sub`] for a panic-free - /// version. - fn sub(self, other: Duration) -> Self { - self.checked_sub(other) - .expect("overflow when subtracting duration from timestamp") - } -} - -impl AddAssign for MonotonicTime { - /// Increments the timestamp by a duration. - /// - /// # Panics - /// - /// This function panics if the resulting timestamp cannot be represented. - fn add_assign(&mut self, other: Duration) { - *self = *self + other; - } -} - -impl SubAssign for MonotonicTime { - /// Decrements the timestamp by a duration. - /// - /// # Panics - /// - /// This function panics if the resulting timestamp cannot be represented. - fn sub_assign(&mut self, other: Duration) { - *self = *self - other; - } -} - -/// An error that may be returned when initializing a [`MonotonicTime`] from -/// system time. -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -pub enum SystemTimeError { - /// The system time is in the past of the Unix epoch. - InvalidSystemTime, - /// The system time cannot be represented as a `MonotonicTime`. - OutOfRange, -} - -impl fmt::Display for SystemTimeError { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::InvalidSystemTime => write!(fmt, "invalid system time"), - Self::OutOfRange => write!(fmt, "timestamp outside representable range"), - } - } -} - -impl Error for SystemTimeError {} - /// A tearable atomic adapter over a `MonotonicTime`. /// /// This makes it possible to store the simulation time in a `SyncCell`, an @@ -490,8 +17,8 @@ pub(crate) struct TearableAtomicTime { impl TearableAtomicTime { pub(crate) fn new(time: MonotonicTime) -> Self { Self { - secs: AtomicI64::new(time.secs), - nanos: AtomicU32::new(time.nanos), + secs: AtomicI64::new(time.as_secs()), + nanos: AtomicU32::new(time.subsec_nanos()), } } } @@ -502,170 +29,17 @@ impl TearableAtomic for TearableAtomicTime { fn tearable_load(&self) -> MonotonicTime { // Load each field separately. This can never create invalid values of a // `MonotonicTime`, even if the load is torn. - MonotonicTime { - secs: self.secs.load(Ordering::Relaxed), - nanos: self.nanos.load(Ordering::Relaxed), - } + MonotonicTime::new( + self.secs.load(Ordering::Relaxed), + self.nanos.load(Ordering::Relaxed), + ) + .unwrap() } fn tearable_store(&self, value: MonotonicTime) { // Write each field separately. This can never create invalid values of // a `MonotonicTime`, even if the store is torn. - self.secs.store(value.secs, Ordering::Relaxed); - self.nanos.store(value.nanos, Ordering::Relaxed); - } -} - -#[cfg(all(test, not(asynchronix_loom)))] -mod tests { - use super::*; - - #[test] - fn time_equality() { - let t0 = MonotonicTime::new(123, 123_456_789); - let t1 = MonotonicTime::new(123, 123_456_789); - let t2 = MonotonicTime::new(123, 123_456_790); - let t3 = MonotonicTime::new(124, 123_456_789); - - assert_eq!(t0, t1); - assert_ne!(t0, t2); - assert_ne!(t0, t3); - } - - #[test] - fn time_ordering() { - let t0 = MonotonicTime::new(0, 1); - let t1 = MonotonicTime::new(1, 0); - - assert!(t1 > t0); - } - - #[cfg(not(miri))] - #[test] - fn time_from_system_smoke() { - const START_OF_2022: i64 = 1640995200; - const START_OF_2050: i64 = 2524608000; - - let now_secs = MonotonicTime::from_system(0).unwrap().as_secs(); - - assert!(now_secs > START_OF_2022); - assert!(now_secs < START_OF_2050); - } - - #[test] - #[should_panic] - fn time_invalid() { - MonotonicTime::new(123, 1_000_000_000); - } - - #[test] - fn time_duration_since_smoke() { - let t0 = MonotonicTime::new(100, 100_000_000); - let t1 = MonotonicTime::new(123, 223_456_789); - - assert_eq!( - t1.checked_duration_since(t0), - Some(Duration::new(23, 123_456_789)) - ); - } - - #[test] - fn time_duration_with_carry() { - let t0 = MonotonicTime::new(100, 200_000_000); - let t1 = MonotonicTime::new(101, 100_000_000); - - assert_eq!( - t1.checked_duration_since(t0), - Some(Duration::new(0, 900_000_000)) - ); - } - - #[test] - fn time_duration_since_extreme() { - const MIN_TIME: MonotonicTime = MonotonicTime::new(i64::MIN, 0); - const MAX_TIME: MonotonicTime = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1); - - assert_eq!( - MAX_TIME.checked_duration_since(MIN_TIME), - Some(Duration::new(u64::MAX, NANOS_PER_SEC - 1)) - ); - } - - #[test] - fn time_duration_since_invalid() { - let t0 = MonotonicTime::new(100, 0); - let t1 = MonotonicTime::new(99, 0); - - assert_eq!(t1.checked_duration_since(t0), None); - } - - #[test] - fn time_add_duration_smoke() { - let t = MonotonicTime::new(-100, 100_000_000); - let dt = Duration::new(400, 300_000_000); - - assert_eq!(t + dt, MonotonicTime::new(300, 400_000_000)); - } - - #[test] - fn time_add_duration_with_carry() { - let t = MonotonicTime::new(-100, 900_000_000); - let dt1 = Duration::new(400, 100_000_000); - let dt2 = Duration::new(400, 300_000_000); - - assert_eq!(t + dt1, MonotonicTime::new(301, 0)); - assert_eq!(t + dt2, MonotonicTime::new(301, 200_000_000)); - } - - #[test] - fn time_add_duration_extreme() { - let t = MonotonicTime::new(i64::MIN, 0); - let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1); - - assert_eq!(t + dt, MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1)); - } - - #[test] - #[should_panic] - fn time_add_duration_overflow() { - let t = MonotonicTime::new(i64::MIN, 1); - let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1); - - let _ = t + dt; - } - - #[test] - fn time_sub_duration_smoke() { - let t = MonotonicTime::new(100, 500_000_000); - let dt = Duration::new(400, 300_000_000); - - assert_eq!(t - dt, MonotonicTime::new(-300, 200_000_000)); - } - - #[test] - fn time_sub_duration_with_carry() { - let t = MonotonicTime::new(100, 100_000_000); - let dt1 = Duration::new(400, 100_000_000); - let dt2 = Duration::new(400, 300_000_000); - - assert_eq!(t - dt1, MonotonicTime::new(-300, 0)); - assert_eq!(t - dt2, MonotonicTime::new(-301, 800_000_000)); - } - - #[test] - fn time_sub_duration_extreme() { - let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1); - let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1); - - assert_eq!(t - dt, MonotonicTime::new(i64::MIN, 0)); - } - - #[test] - #[should_panic] - fn time_sub_duration_overflow() { - let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 2); - let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1); - - let _ = t - dt; + self.secs.store(value.as_secs(), Ordering::Relaxed); + self.nanos.store(value.subsec_nanos(), Ordering::Relaxed); } } diff --git a/asynchronix/src/time/scheduler.rs b/asynchronix/src/time/scheduler.rs index 3ab2ead..740f644 100644 --- a/asynchronix/src/time/scheduler.rs +++ b/asynchronix/src/time/scheduler.rs @@ -1,27 +1,35 @@ //! Scheduling functions and types. use std::error::Error; -use std::fmt; use std::future::Future; -use std::marker::PhantomData; +use std::hash::{Hash, Hasher}; use std::pin::Pin; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::{Arc, Mutex}; use std::task::{Context, Poll}; use std::time::Duration; +use std::{fmt, ptr}; use pin_project_lite::pin_project; use recycle_box::{coerce_box, RecycleBox}; -use crate::channel::{ChannelId, Sender}; +use crate::channel::Sender; use crate::executor::Executor; -use crate::model::{InputFn, Model}; +use crate::model::Model; +use crate::ports::InputFn; use crate::time::{MonotonicTime, TearableAtomicTime}; use crate::util::priority_queue::PriorityQueue; use crate::util::sync_cell::SyncCellReader; /// Shorthand for the scheduler queue type. -pub(crate) type SchedulerQueue = PriorityQueue<(MonotonicTime, ChannelId), Box>; + +// Why use both time and channel ID as the key? The short answer is that this +// ensures that events targeting the same model are sent in the order they were +// scheduled. More precisely, this ensures that events targeting the same model +// are ordered contiguously in the priority queue, which in turns allows the +// event loop to easily aggregate such events into single futures and thus +// control their relative order of execution. +pub(crate) type SchedulerQueue = PriorityQueue<(MonotonicTime, usize), Action>; /// Trait abstracting over time-absolute and time-relative deadlines. /// @@ -81,7 +89,9 @@ impl Deadline for MonotonicTime { /// /// ``` /// use std::time::Duration; -/// use asynchronix::model::{Model, Output}; use asynchronix::time::Scheduler; +/// use asynchronix::model::Model; +/// use asynchronix::ports::Output; +/// use asynchronix::time::Scheduler; /// /// #[derive(Default)] /// pub struct DelayedGreeter { @@ -141,8 +151,8 @@ impl Scheduler { /// /// fn is_third_millenium(scheduler: &Scheduler) -> bool { /// let time = scheduler.time(); - /// - /// time >= MonotonicTime::new(978307200, 0) && time < MonotonicTime::new(32535216000, 0) + /// time >= MonotonicTime::new(978307200, 0).unwrap() + /// && time < MonotonicTime::new(32535216000, 0).unwrap() /// } /// ``` pub fn time(&self) -> MonotonicTime { @@ -203,7 +213,8 @@ impl Scheduler { Ok(()) } - /// Schedules a cancellable event at a future time and returns an event key. + /// Schedules a cancellable event at a future time and returns an action + /// key. /// /// An error is returned if the specified deadline is not in the future of /// the current simulation time. @@ -212,12 +223,12 @@ impl Scheduler { /// /// ``` /// use asynchronix::model::Model; - /// use asynchronix::time::{EventKey, MonotonicTime, Scheduler}; + /// use asynchronix::time::{ActionKey, MonotonicTime, Scheduler}; /// /// // An alarm clock that can be cancelled. /// #[derive(Default)] /// pub struct CancellableAlarmClock { - /// event_key: Option, + /// event_key: Option, /// } /// /// impl CancellableAlarmClock { @@ -248,7 +259,7 @@ impl Scheduler { deadline: impl Deadline, func: F, arg: T, - ) -> Result + ) -> Result where F: for<'a> InputFn<'a, M, T, S>, T: Send + Clone + 'static, @@ -337,7 +348,7 @@ impl Scheduler { } /// Schedules a cancellable, periodically recurring event at a future time - /// and returns an event key. + /// and returns an action key. /// /// An error is returned if the specified deadline is not in the future of /// the current simulation time or if the specified period is null. @@ -348,13 +359,13 @@ impl Scheduler { /// use std::time::Duration; /// /// use asynchronix::model::Model; - /// use asynchronix::time::{EventKey, MonotonicTime, Scheduler}; + /// use asynchronix::time::{ActionKey, MonotonicTime, Scheduler}; /// /// // An alarm clock beeping at 1Hz that can be cancelled before it sets off, or /// // stopped after it sets off. /// #[derive(Default)] /// pub struct CancellableBeepingAlarmClock { - /// event_key: Option, + /// event_key: Option, /// } /// /// impl CancellableBeepingAlarmClock { @@ -391,7 +402,7 @@ impl Scheduler { period: Duration, func: F, arg: T, - ) -> Result + ) -> Result where F: for<'a> InputFn<'a, M, T, S> + Clone, T: Send + Clone + 'static, @@ -425,34 +436,55 @@ impl fmt::Debug for Scheduler { } } -/// Handle to a scheduled event. +/// Handle to a scheduled action. /// -/// An `EventKey` can be used to cancel a future event. +/// An `ActionKey` can be used to cancel a scheduled action. #[derive(Clone, Debug)] -#[must_use = "prefer unkeyed scheduling methods if the event is never cancelled"] -pub struct EventKey { +#[must_use = "prefer unkeyed scheduling methods if the action is never cancelled"] +pub struct ActionKey { is_cancelled: Arc, } -impl EventKey { - /// Creates a key for a pending event. +impl ActionKey { + /// Creates a key for a pending action. pub(crate) fn new() -> Self { Self { is_cancelled: Arc::new(AtomicBool::new(false)), } } - /// Checks whether the event was cancelled. + /// Checks whether the action was cancelled. pub(crate) fn is_cancelled(&self) -> bool { self.is_cancelled.load(Ordering::Relaxed) } - /// Cancels the associated event. + /// Cancels the associated action. pub fn cancel(self) { self.is_cancelled.store(true, Ordering::Relaxed); } } +impl PartialEq for ActionKey { + /// Implements equality by considering clones to be equivalent, rather than + /// keys with the same `is_cancelled` value. + fn eq(&self, other: &Self) -> bool { + ptr::addr_eq(&*self.is_cancelled, &*other.is_cancelled) + } +} + +impl Eq for ActionKey {} + +impl Hash for ActionKey { + /// Implements `Hash`` by considering clones to be equivalent, rather than + /// keys with the same `is_cancelled` value. + fn hash(&self, state: &mut H) + where + H: Hasher, + { + ptr::hash(&*self.is_cancelled, state) + } +} + /// Error returned when the scheduled time or the repetition period are invalid. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum SchedulingError { @@ -477,9 +509,73 @@ impl fmt::Display for SchedulingError { impl Error for SchedulingError {} +/// A possibly periodic, possibly cancellable action that can be scheduled or +/// processed immediately. +pub struct Action { + inner: Box, +} + +impl Action { + /// Creates a new `Action` from an `ActionInner`. + pub(crate) fn new(s: S) -> Self { + Self { inner: Box::new(s) } + } + + /// Reports whether the action was cancelled. + pub(crate) fn is_cancelled(&self) -> bool { + self.inner.is_cancelled() + } + + /// If this is a periodic action, returns a boxed clone of this action and + /// its repetition period; otherwise returns `None`. + pub(crate) fn next(&self) -> Option<(Action, Duration)> { + self.inner + .next() + .map(|(inner, period)| (Self { inner }, period)) + } + + /// Returns a boxed future that performs the action. + pub(crate) fn into_future(self) -> Pin + Send>> { + self.inner.into_future() + } + + /// Spawns the future that performs the action onto the provided executor. + /// + /// This method is typically more efficient that spawning the boxed future + /// from `into_future` since it can directly spawn the unboxed future. + pub(crate) fn spawn_and_forget(self, executor: &Executor) { + self.inner.spawn_and_forget(executor) + } +} + +impl fmt::Debug for Action { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("SchedulableEvent").finish_non_exhaustive() + } +} + +/// Trait abstracting over the inner type of an action. +pub(crate) trait ActionInner: Send + 'static { + /// Reports whether the action was cancelled. + fn is_cancelled(&self) -> bool; + + /// If this is a periodic action, returns a boxed clone of this action and + /// its repetition period; otherwise returns `None`. + fn next(&self) -> Option<(Box, Duration)>; + + /// Returns a boxed future that performs the action. + fn into_future(self: Box) -> Pin + Send>>; + + /// Spawns the future that performs the action onto the provided executor. + /// + /// This method is typically more efficient that spawning the boxed future + /// from `into_future` since it can directly spawn the unboxed future. + fn spawn_and_forget(self: Box, executor: &Executor); +} + /// Schedules an event at a future time. /// -/// This method does not check whether the specified time lies in the future +/// This function does not check whether the specified time lies in the future /// of the current simulation time. pub(crate) fn schedule_event_at_unchecked( time: MonotonicTime, @@ -495,15 +591,15 @@ pub(crate) fn schedule_event_at_unchecked( { let channel_id = sender.channel_id(); - let event_dispatcher = Box::new(new_event_dispatcher(func, arg, sender)); + let action = Action::new(OnceAction::new(process_event(func, arg, sender))); let mut scheduler_queue = scheduler_queue.lock().unwrap(); - scheduler_queue.insert((time, channel_id), event_dispatcher); + scheduler_queue.insert((time, channel_id), action); } -/// Schedules an event at a future time, returning an event key. +/// Schedules an event at a future time, returning an action key. /// -/// This method does not check whether the specified time lies in the future +/// This function does not check whether the specified time lies in the future /// of the current simulation time. pub(crate) fn schedule_keyed_event_at_unchecked( time: MonotonicTime, @@ -511,31 +607,29 @@ pub(crate) fn schedule_keyed_event_at_unchecked( arg: T, sender: Sender, scheduler_queue: &Mutex, -) -> EventKey +) -> ActionKey where M: Model, F: for<'a> InputFn<'a, M, T, S>, T: Send + Clone + 'static, S: Send + 'static, { - let event_key = EventKey::new(); + let event_key = ActionKey::new(); let channel_id = sender.channel_id(); - let event_dispatcher = Box::new(KeyedEventDispatcher::new( + let action = Action::new(KeyedOnceAction::new( + |ek| send_keyed_event(ek, func, arg, sender), event_key.clone(), - func, - arg, - sender, )); let mut scheduler_queue = scheduler_queue.lock().unwrap(); - scheduler_queue.insert((time, channel_id), event_dispatcher); + scheduler_queue.insert((time, channel_id), action); event_key } /// Schedules a periodic event at a future time. /// -/// This method does not check whether the specified time lies in the future +/// This function does not check whether the specified time lies in the future /// of the current simulation time. pub(crate) fn schedule_periodic_event_at_unchecked( time: MonotonicTime, @@ -552,15 +646,18 @@ pub(crate) fn schedule_periodic_event_at_unchecked( { let channel_id = sender.channel_id(); - let event_dispatcher = Box::new(PeriodicEventDispatcher::new(func, arg, sender, period)); + let action = Action::new(PeriodicAction::new( + || process_event(func, arg, sender), + period, + )); let mut scheduler_queue = scheduler_queue.lock().unwrap(); - scheduler_queue.insert((time, channel_id), event_dispatcher); + scheduler_queue.insert((time, channel_id), action); } -/// Schedules an event at a future time, returning an event key. +/// Schedules an event at a future time, returning an action key. /// -/// This method does not check whether the specified time lies in the future +/// This function does not check whether the specified time lies in the future /// of the current simulation time. pub(crate) fn schedule_periodic_keyed_event_at_unchecked( time: MonotonicTime, @@ -569,84 +666,52 @@ pub(crate) fn schedule_periodic_keyed_event_at_unchecked( arg: T, sender: Sender, scheduler_queue: &Mutex, -) -> EventKey +) -> ActionKey where M: Model, F: for<'a> InputFn<'a, M, T, S> + Clone, T: Send + Clone + 'static, S: Send + 'static, { - let event_key = EventKey::new(); + let event_key = ActionKey::new(); let channel_id = sender.channel_id(); - let event_dispatcher = Box::new(PeriodicKeyedEventDispatcher::new( - event_key.clone(), - func, - arg, - sender, + let action = Action::new(KeyedPeriodicAction::new( + |ek| send_keyed_event(ek, func, arg, sender), period, + event_key.clone(), )); let mut scheduler_queue = scheduler_queue.lock().unwrap(); - scheduler_queue.insert((time, channel_id), event_dispatcher); + scheduler_queue.insert((time, channel_id), action); event_key } -/// Trait for objects that can be converted to a future dispatching a scheduled -/// event. -pub(crate) trait ScheduledEvent: Send { - /// Reports whether the associated event was cancelled. - fn is_cancelled(&self) -> bool; - - /// Returns a boxed clone of this event and the repetition period if this is - /// a periodic even, otherwise returns `None`. - fn next(&self) -> Option<(Box, Duration)>; - - /// Returns a boxed future dispatching the associated event. - fn into_future(self: Box) -> Pin + Send>>; - - /// Spawns the future that dispatches the associated event onto the provided - /// executor. - /// - /// This method is typically more efficient that spawning the boxed future - /// from `into_future` since it can directly spawn the unboxed future. - fn spawn_and_forget(self: Box, executor: &Executor); -} - pin_project! { - /// Object that can be converted to a future dispatching a non-cancellable - /// event. + /// An object that can be converted to a future performing a single + /// non-cancellable action. /// - /// Note that this particular event dispatcher is in fact already a future: - /// since the future cannot be cancelled and the dispatcher does not need to - /// be cloned, there is no need to defer the construction of the future. - /// This makes `into_future` a trivial cast, which saves a boxing operation. - pub(crate) struct EventDispatcher { + /// Note that this particular action is in fact already a future: since the + /// future cannot be cancelled and the action does not need to be cloned, + /// there is no need to defer the construction of the future. This makes + /// `into_future` a trivial cast, which saves a boxing operation. + pub(crate) struct OnceAction { #[pin] fut: F, } } -/// Constructs a new `EventDispatcher`. -/// -/// Due to some limitations of type inference or of my understanding of it, the -/// constructor for this event dispatchers is a freestanding function. -fn new_event_dispatcher( - func: F, - arg: T, - sender: Sender, -) -> EventDispatcher> +impl OnceAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, + F: Future + Send + 'static, { - let fut = dispatch_event(func, arg, sender); - - EventDispatcher { fut } + /// Constructs a new `OnceAction`. + pub(crate) fn new(fut: F) -> Self { + OnceAction { fut } + } } -impl Future for EventDispatcher +impl Future for OnceAction where F: Future, { @@ -658,14 +723,14 @@ where } } -impl ScheduledEvent for EventDispatcher +impl ActionInner for OnceAction where F: Future + Send + 'static, { fn is_cancelled(&self) -> bool { false } - fn next(&self) -> Option<(Box, Duration)> { + fn next(&self) -> Option<(Box, Duration)> { None } fn into_future(self: Box) -> Pin + Send>> { @@ -677,230 +742,155 @@ where } } -/// Object that can be converted to a future dispatching a non-cancellable periodic -/// event. -pub(crate) struct PeriodicEventDispatcher +/// An object that can be converted to a future performing a non-cancellable, +/// periodic action. +pub(crate) struct PeriodicAction where - M: Model, + G: (FnOnce() -> F) + Clone + Send + 'static, + F: Future + Send + 'static, { - func: F, - arg: T, - sender: Sender, + /// A clonable generator for the associated future. + gen: G, + /// The action repetition period. period: Duration, - _input_kind: PhantomData, } -impl PeriodicEventDispatcher +impl PeriodicAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, + G: (FnOnce() -> F) + Clone + Send + 'static, + F: Future + Send + 'static, { - /// Constructs a new `PeriodicEventDispatcher`. - fn new(func: F, arg: T, sender: Sender, period: Duration) -> Self { - Self { - func, - arg, - sender, - period, - _input_kind: PhantomData, - } + /// Constructs a new `PeriodicAction`. + pub(crate) fn new(gen: G, period: Duration) -> Self { + Self { gen, period } } } -impl ScheduledEvent for PeriodicEventDispatcher +impl ActionInner for PeriodicAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S> + Clone, - T: Send + Clone + 'static, - S: Send + 'static, + G: (FnOnce() -> F) + Clone + Send + 'static, + F: Future + Send + 'static, { fn is_cancelled(&self) -> bool { false } - fn next(&self) -> Option<(Box, Duration)> { - let event = Box::new(Self::new( - self.func.clone(), - self.arg.clone(), - self.sender.clone(), - self.period, - )); + fn next(&self) -> Option<(Box, Duration)> { + let event = Box::new(Self::new(self.gen.clone(), self.period)); Some((event, self.period)) } fn into_future(self: Box) -> Pin + Send>> { - let Self { - func, arg, sender, .. - } = *self; - - Box::pin(dispatch_event(func, arg, sender)) + Box::pin((self.gen)()) } fn spawn_and_forget(self: Box, executor: &Executor) { - let Self { - func, arg, sender, .. - } = *self; - - let fut = dispatch_event(func, arg, sender); - executor.spawn_and_forget(fut); + executor.spawn_and_forget((self.gen)()); } } -/// Object that can be converted to a future dispatching a cancellable event. -pub(crate) struct KeyedEventDispatcher +/// An object that can be converted to a future performing a single, cancellable +/// action. +pub(crate) struct KeyedOnceAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, + G: (FnOnce(ActionKey) -> F) + Send + 'static, + F: Future + Send + 'static, { - event_key: EventKey, - func: F, - arg: T, - sender: Sender, - _input_kind: PhantomData, + /// A generator for the associated future. + gen: G, + /// The event cancellation key. + event_key: ActionKey, } -impl KeyedEventDispatcher +impl KeyedOnceAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, + G: (FnOnce(ActionKey) -> F) + Send + 'static, + F: Future + Send + 'static, { - /// Constructs a new `KeyedEventDispatcher`. - fn new(event_key: EventKey, func: F, arg: T, sender: Sender) -> Self { - Self { - event_key, - func, - arg, - sender, - _input_kind: PhantomData, - } + /// Constructs a new `KeyedOnceAction`. + pub(crate) fn new(gen: G, event_key: ActionKey) -> Self { + Self { gen, event_key } } } -impl ScheduledEvent for KeyedEventDispatcher +impl ActionInner for KeyedOnceAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, - S: Send + 'static, + G: (FnOnce(ActionKey) -> F) + Send + 'static, + F: Future + Send + 'static, { fn is_cancelled(&self) -> bool { self.event_key.is_cancelled() } - fn next(&self) -> Option<(Box, Duration)> { + fn next(&self) -> Option<(Box, Duration)> { None } fn into_future(self: Box) -> Pin + Send>> { - let Self { - event_key, - func, - arg, - sender, - .. - } = *self; - - Box::pin(dispatch_keyed_event(event_key, func, arg, sender)) + Box::pin((self.gen)(self.event_key)) } fn spawn_and_forget(self: Box, executor: &Executor) { - let Self { - event_key, - func, - arg, - sender, - .. - } = *self; - - let fut = dispatch_keyed_event(event_key, func, arg, sender); - executor.spawn_and_forget(fut); + executor.spawn_and_forget((self.gen)(self.event_key)); } } -/// Object that can be converted to a future dispatching a cancellable event. -pub(crate) struct PeriodicKeyedEventDispatcher +/// An object that can be converted to a future performing a periodic, +/// cancellable action. +pub(crate) struct KeyedPeriodicAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, + G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static, + F: Future + Send + 'static, { - event_key: EventKey, - func: F, - arg: T, - sender: Sender, + /// A clonable generator for associated future. + gen: G, + /// The repetition period. period: Duration, - _input_kind: PhantomData, + /// The event cancellation key. + event_key: ActionKey, } -impl PeriodicKeyedEventDispatcher +impl KeyedPeriodicAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, + G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static, + F: Future + Send + 'static, { - /// Constructs a new `KeyedEventDispatcher`. - fn new(event_key: EventKey, func: F, arg: T, sender: Sender, period: Duration) -> Self { + /// Constructs a new `KeyedPeriodicAction`. + pub(crate) fn new(gen: G, period: Duration, event_key: ActionKey) -> Self { Self { - event_key, - func, - arg, - sender, + gen, period, - _input_kind: PhantomData, + event_key, } } } -impl ScheduledEvent for PeriodicKeyedEventDispatcher +impl ActionInner for KeyedPeriodicAction where - M: Model, - F: for<'a> InputFn<'a, M, T, S> + Clone, - T: Send + Clone + 'static, - S: Send + 'static, + G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static, + F: Future + Send + 'static, { fn is_cancelled(&self) -> bool { self.event_key.is_cancelled() } - fn next(&self) -> Option<(Box, Duration)> { + fn next(&self) -> Option<(Box, Duration)> { let event = Box::new(Self::new( - self.event_key.clone(), - self.func.clone(), - self.arg.clone(), - self.sender.clone(), + self.gen.clone(), self.period, + self.event_key.clone(), )); Some((event, self.period)) } fn into_future(self: Box) -> Pin + Send>> { - let Self { - event_key, - func, - arg, - sender, - .. - } = *self; - - Box::pin(dispatch_keyed_event(event_key, func, arg, sender)) + Box::pin((self.gen)(self.event_key)) } fn spawn_and_forget(self: Box, executor: &Executor) { - let Self { - event_key, - func, - arg, - sender, - .. - } = *self; - - let fut = dispatch_keyed_event(event_key, func, arg, sender); - executor.spawn_and_forget(fut); + executor.spawn_and_forget((self.gen)(self.event_key)); } } -/// Asynchronously dispatch a regular, non-cancellable event. -async fn dispatch_event(func: F, arg: T, sender: Sender) +/// Asynchronously sends a non-cancellable event to a model input. +pub(crate) async fn process_event(func: F, arg: T, sender: Sender) where M: Model, F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, + T: Send + 'static, { let _ = sender .send( @@ -916,9 +906,13 @@ where .await; } -/// Asynchronously dispatch a cancellable event. -async fn dispatch_keyed_event(event_key: EventKey, func: F, arg: T, sender: Sender) -where +/// Asynchronously sends a cancellable event to a model input. +pub(crate) async fn send_keyed_event( + event_key: ActionKey, + func: F, + arg: T, + sender: Sender, +) where M: Model, F: for<'a> InputFn<'a, M, T, S>, T: Send + Clone + 'static, diff --git a/asynchronix/src/util.rs b/asynchronix/src/util.rs index 3b8e9c0..f618e05 100644 --- a/asynchronix/src/util.rs +++ b/asynchronix/src/util.rs @@ -1,7 +1,8 @@ pub(crate) mod bit; -pub(crate) mod futures; +pub(crate) mod indexed_priority_queue; pub(crate) mod priority_queue; pub(crate) mod rng; +pub(crate) mod seq_futures; pub(crate) mod slot; -pub(crate) mod spsc_queue; pub(crate) mod sync_cell; +pub(crate) mod task_set; diff --git a/asynchronix/src/util/bit.rs b/asynchronix/src/util/bit.rs index 435bb45..c1e6368 100644 --- a/asynchronix/src/util/bit.rs +++ b/asynchronix/src/util/bit.rs @@ -1,7 +1,5 @@ //! Bit manipulation and algorithms. -#![allow(unused)] - /// Find the position of the `Nᵗʰ` set bit starting the search from the least /// significant bit. /// diff --git a/asynchronix/src/util/indexed_priority_queue.rs b/asynchronix/src/util/indexed_priority_queue.rs new file mode 100644 index 0000000..d9339b9 --- /dev/null +++ b/asynchronix/src/util/indexed_priority_queue.rs @@ -0,0 +1,696 @@ +//! Associative priority queue. + +#![allow(unused)] + +use std::mem; + +/// An associative container optimized for extraction of the value with the +/// lowest key and deletion of arbitrary key-value pairs. +/// +/// This implementation has the same theoretical complexity for insert and pull +/// operations as a conventional array-based binary heap but does differ from +/// the latter in some important aspects: +/// +/// - elements can be deleted in *O*(log(*N*)) time rather than *O*(*N*) time +/// using a unique index returned at insertion time. +/// - same-key elements are guaranteed to be pulled in FIFO order, +/// +/// Under the hood, the priority queue relies on a binary heap cross-indexed +/// with values stored in a slab allocator. Each item of the binary heap +/// contains an index pointing to the associated slab-allocated node, as well as +/// the user-provided key. Each slab node contains the value associated to the +/// key and a back-pointing index to the binary heap. The heap items also +/// contain a unique epoch which allows same-key nodes to be sorted by insertion +/// order. The epoch is used as well to build unique indices that enable +/// efficient deletion of arbitrary key-value pairs. +/// +/// The slab-based design is what makes *O*(log(*N*)) deletion possible, but it +/// does come with some trade-offs: +/// +/// - its memory footprint is higher because it needs 2 extra pointer-sized +/// indices for each element to cross-index the heap and the slab, +/// - its computational footprint is higher because of the extra cost associated +/// with random slab access; that being said, array-based binary heaps are not +/// extremely cache-friendly to start with so unless the slab becomes very +/// fragmented, this is not expected to introduce more than a reasonable +/// constant-factor penalty compared to a conventional binary heap. +/// +/// The computational penalty is partially offset by the fact that the value +/// never needs to be moved from the moment it is inserted until it is pulled. +/// +/// Note that the `Copy` bound on they keys could be lifted but this would make +/// the implementation slightly less efficient unless `unsafe` is used. +pub(crate) struct IndexedPriorityQueue +where + K: Copy + Clone + Ord, +{ + heap: Vec>, + slab: Vec>, + first_free_node: Option, + next_epoch: u64, +} + +impl IndexedPriorityQueue { + /// Creates an empty `PriorityQueue`. + pub(crate) fn new() -> Self { + Self { + heap: Vec::new(), + slab: Vec::new(), + first_free_node: None, + next_epoch: 0, + } + } + + /// Creates an empty `PriorityQueue` with at least the specified capacity. + pub(crate) fn with_capacity(capacity: usize) -> Self { + Self { + heap: Vec::with_capacity(capacity), + slab: Vec::with_capacity(capacity), + first_free_node: None, + next_epoch: 0, + } + } + + /// Returns the number of key-value pairs in the priority queue. + pub(crate) fn len(&self) -> usize { + self.heap.len() + } + + /// Inserts a new key-value pair and returns a unique insertion key. + /// + /// This operation has *O*(log(*N*)) amortized worse-case theoretical + /// complexity and *O*(1) amortized theoretical complexity for a + /// sufficiently random heap. + pub(crate) fn insert(&mut self, key: K, value: V) -> InsertKey { + // Build a unique key from the user-provided key and a unique epoch. + let epoch = self.next_epoch; + assert_ne!(epoch, u64::MAX); + self.next_epoch += 1; + let unique_key = UniqueKey { key, epoch }; + + // Add a new node to the slab, either by re-using a free node or by + // appending a new one. + let slab_idx = match self.first_free_node { + Some(idx) => { + self.first_free_node = self.slab[idx].unwrap_next_free_node(); + + self.slab[idx] = Node::HeapNode(HeapNode { + value, + heap_idx: 0, // temporary value overridden in `sift_up` + }); + + idx + } + None => { + let idx = self.slab.len(); + self.slab.push(Node::HeapNode(HeapNode { + value, + heap_idx: 0, // temporary value overridden in `sift_up` + })); + + idx + } + }; + + // Add a new node at the bottom of the heap. + let heap_idx = self.heap.len(); + self.heap.push(Item { + key: unique_key, // temporary value overridden in `sift_up` + slab_idx: 0, // temporary value overridden in `sift_up` + }); + + // Sift up the new node. + self.sift_up( + Item { + key: unique_key, + slab_idx, + }, + heap_idx, + ); + + InsertKey { slab_idx, epoch } + } + + /// Pulls the value with the lowest key. + /// + /// If there are several equal lowest keys, the value which was inserted + /// first is returned. + /// + /// This operation has *O*(log(N)) non-amortized theoretical complexity. + pub(crate) fn pull(&mut self) -> Option<(K, V)> { + let item = self.heap.first()?; + let top_slab_idx = item.slab_idx; + let key = item.key.key; + + // Free the top node, extracting its value. + let value = mem::replace( + &mut self.slab[top_slab_idx], + Node::FreeNode(FreeNode { + next: self.first_free_node, + }), + ) + .unwrap_value(); + + self.first_free_node = Some(top_slab_idx); + + // Sift the last node at the bottom of the heap from the top of the heap. + let last_item = self.heap.pop().unwrap(); + if last_item.slab_idx != top_slab_idx { + self.sift_down(last_item, 0); + } + + Some((key, value)) + } + + /// Peeks a reference to the key-value pair with the lowest key, leaving it + /// in the queue. + /// + /// If there are several equal lowest keys, a reference to the key-value + /// pair which was inserted first is returned. + /// + /// This operation has *O*(1) non-amortized theoretical complexity. + pub(crate) fn peek(&self) -> Option<(&K, &V)> { + let item = self.heap.first()?; + let top_slab_idx = item.slab_idx; + let key = &item.key.key; + let value = self.slab[top_slab_idx].unwrap_value_ref(); + + Some((key, value)) + } + + /// Peeks a reference to the lowest key, leaving it in the queue. + /// + /// If there are several equal lowest keys, a reference to the key which was + /// inserted first is returned. + /// + /// This operation has *O*(1) non-amortized theoretical complexity. + pub(crate) fn peek_key(&self) -> Option<&K> { + let item = self.heap.first()?; + + Some(&item.key.key) + } + + /// Removes the key-value pair associated to the provided insertion key if + /// it is still in the queue and returns it. + /// + /// Using an insertion key returned from another `PriorityQueue` is a logic + /// error and could result in the deletion of an arbitrary key-value pair. + /// + /// This operation has guaranteed *O*(log(*N*)) theoretical complexity. + pub(crate) fn extract(&mut self, insert_key: InsertKey) -> Option<(K, V)> { + let slab_idx = insert_key.slab_idx; + + // Check that (i) there is a node at this index, (ii) this node is in + // the heap and (iii) this node has the correct epoch. + match self.slab.get(slab_idx) { + None | Some(Node::FreeNode(_)) => return None, + Some(Node::HeapNode(node)) => { + if self.heap[node.heap_idx].key.epoch != insert_key.epoch { + return None; + } + } + }; + + // Free the node, extracting its content. + let node = mem::replace( + &mut self.slab[slab_idx], + Node::FreeNode(FreeNode { + next: self.first_free_node, + }), + ) + .unwrap_heap_node(); + + self.first_free_node = Some(slab_idx); + + // Save the key before the node is removed from the heap. + let key = self.heap[node.heap_idx].key.key; + + // If the last item of the heap is not the one to be deleted, sift it up + // or down as appropriate starting from the vacant spot. + let last_item = self.heap.pop().unwrap(); + if let Some(item) = self.heap.get(node.heap_idx) { + if last_item.key < item.key { + self.sift_up(last_item, node.heap_idx); + } else { + self.sift_down(last_item, node.heap_idx); + } + } + + Some((key, node.value)) + } + + /// Take a heap item and, starting at `heap_idx`, move it up the heap while + /// a parent has a larger key. + #[inline] + fn sift_up(&mut self, item: Item, heap_idx: usize) { + let mut child_heap_idx = heap_idx; + let key = &item.key; + + while child_heap_idx != 0 { + let parent_heap_idx = (child_heap_idx - 1) / 2; + + // Stop when the key is larger or equal to the parent's. + if key >= &self.heap[parent_heap_idx].key { + break; + } + + // Move the parent down one level. + self.heap[child_heap_idx] = self.heap[parent_heap_idx]; + let parent_slab_idx = self.heap[parent_heap_idx].slab_idx; + *self.slab[parent_slab_idx].unwrap_heap_index_mut() = child_heap_idx; + + // Stop when the key is larger or equal to the parent's. + if key >= &self.heap[parent_heap_idx].key { + break; + } + // Make the former parent the new child. + child_heap_idx = parent_heap_idx; + } + + // Move the original item to the current child. + self.heap[child_heap_idx] = item; + *self.slab[item.slab_idx].unwrap_heap_index_mut() = child_heap_idx; + } + + /// Take a heap item and, starting at `heap_idx`, move it down the heap + /// while a child has a smaller key. + #[inline] + fn sift_down(&mut self, item: Item, heap_idx: usize) { + let mut parent_heap_idx = heap_idx; + let mut child_heap_idx = 2 * parent_heap_idx + 1; + let key = &item.key; + + while child_heap_idx < self.heap.len() { + // If the sibling exists and has a smaller key, make it the + // candidate for swapping. + if let Some(other_child) = self.heap.get(child_heap_idx + 1) { + child_heap_idx += (self.heap[child_heap_idx].key > other_child.key) as usize; + } + + // Stop when the key is smaller or equal to the child with the smallest key. + if key <= &self.heap[child_heap_idx].key { + break; + } + + // Move the child up one level. + self.heap[parent_heap_idx] = self.heap[child_heap_idx]; + let child_slab_idx = self.heap[child_heap_idx].slab_idx; + *self.slab[child_slab_idx].unwrap_heap_index_mut() = parent_heap_idx; + + // Make the child the new parent. + parent_heap_idx = child_heap_idx; + child_heap_idx = 2 * parent_heap_idx + 1; + } + + // Move the original item to the current parent. + self.heap[parent_heap_idx] = item; + *self.slab[item.slab_idx].unwrap_heap_index_mut() = parent_heap_idx; + } +} + +impl Default for IndexedPriorityQueue { + fn default() -> Self { + Self::new() + } +} + +/// Data related to a single key-value pair stored in the heap. +#[derive(Copy, Clone)] +struct Item { + // A unique key by which the heap is sorted. + key: UniqueKey, + // An index pointing to the corresponding node in the slab. + slab_idx: usize, +} + +/// Data related to a single key-value pair stored in the slab. +enum Node { + FreeNode(FreeNode), + HeapNode(HeapNode), +} + +impl Node { + /// Unwraps the `FreeNode::next` field. + fn unwrap_next_free_node(&self) -> Option { + match self { + Self::FreeNode(n) => n.next, + _ => panic!("the node was expected to be a free node"), + } + } + + /// Unwraps a `HeapNode`. + fn unwrap_heap_node(self) -> HeapNode { + match self { + Self::HeapNode(n) => n, + _ => panic!("the node was expected to be a heap node"), + } + } + + /// Unwraps the `HeapNode::value` field. + fn unwrap_value(self) -> V { + match self { + Self::HeapNode(n) => n.value, + _ => panic!("the node was expected to be a heap node"), + } + } + + /// Unwraps the `HeapNode::value` field. + fn unwrap_value_ref(&self) -> &V { + match self { + Self::HeapNode(n) => &n.value, + _ => panic!("the node was expected to be a heap node"), + } + } + + /// Unwraps a mutable reference to the `HeapNode::heap_idx` field. + fn unwrap_heap_index_mut(&mut self) -> &mut usize { + match self { + Self::HeapNode(n) => &mut n.heap_idx, + _ => panic!("the node was expected to be a heap node"), + } + } +} + +/// A node that is no longer in the binary heap. +struct FreeNode { + // An index pointing to the next free node, if any. + next: Option, +} + +/// A node currently in the binary heap. +struct HeapNode { + // The value associated to this node. + value: V, + // Index of the node in the heap. + heap_idx: usize, +} + +/// A unique insertion key that can be used for key-value pair deletion. +#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)] +pub(crate) struct InsertKey { + // An index pointing to a node in the slab. + slab_idx: usize, + // The epoch when the node was inserted. + epoch: u64, +} + +impl InsertKey { + // Creates an `InsertKey` directly from its raw components. + // + // This method is safe: the worse than can happen is for the key to be + // invalid, in which case it will simply be rejected by + // `IndexedPriorityQueue::extract`. + pub(crate) fn from_raw_parts(slab_idx: usize, epoch: u64) -> Self { + Self { slab_idx, epoch } + } + + // Decomposes an `InsertKey` into its raw components. + pub(crate) fn into_raw_parts(self) -> (usize, u64) { + (self.slab_idx, self.epoch) + } +} + +/// A unique key made of the user-provided key complemented by a unique epoch. +/// +/// Implementation note: `UniqueKey` automatically derives `PartialOrd`, which +/// implies that lexicographic order between `key` and `epoch` must be preserved +/// to make sure that `key` has a higher sorting priority than `epoch`. +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] +struct UniqueKey { + /// The user-provided key. + key: K, + /// A unique epoch that indicates the insertion date. + epoch: u64, +} + +#[cfg(all(test, not(asynchronix_loom)))] +mod tests { + use std::fmt::Debug; + + use super::*; + + enum Op { + Insert(K, V), + InsertAndMark(K, V), + Pull(Option<(K, V)>), + ExtractMarked(Option<(K, V)>), + } + + fn check( + operations: impl Iterator>, + ) { + let mut queue = IndexedPriorityQueue::new(); + let mut marked = None; + + for op in operations { + match op { + Op::Insert(key, value) => { + queue.insert(key, value); + } + Op::InsertAndMark(key, value) => { + marked = Some(queue.insert(key, value)); + } + Op::Pull(kv) => { + assert_eq!(queue.pull(), kv); + } + Op::ExtractMarked(kv) => { + assert_eq!( + queue.extract(marked.take().expect("no item was marked for deletion")), + kv + ) + } + } + } + } + + #[test] + fn indexed_priority_queue_smoke() { + let operations = [ + Op::Insert(5, 'a'), + Op::Insert(2, 'b'), + Op::Insert(3, 'c'), + Op::Insert(4, 'd'), + Op::Insert(9, 'e'), + Op::Insert(1, 'f'), + Op::Insert(8, 'g'), + Op::Insert(0, 'h'), + Op::Insert(7, 'i'), + Op::Insert(6, 'j'), + Op::Pull(Some((0, 'h'))), + Op::Pull(Some((1, 'f'))), + Op::Pull(Some((2, 'b'))), + Op::Pull(Some((3, 'c'))), + Op::Pull(Some((4, 'd'))), + Op::Pull(Some((5, 'a'))), + Op::Pull(Some((6, 'j'))), + Op::Pull(Some((7, 'i'))), + Op::Pull(Some((8, 'g'))), + Op::Pull(Some((9, 'e'))), + ]; + + check(operations.into_iter()); + } + + #[test] + fn indexed_priority_queue_interleaved() { + let operations = [ + Op::Insert(2, 'a'), + Op::Insert(7, 'b'), + Op::Insert(5, 'c'), + Op::Pull(Some((2, 'a'))), + Op::Insert(4, 'd'), + Op::Pull(Some((4, 'd'))), + Op::Insert(8, 'e'), + Op::Insert(2, 'f'), + Op::Pull(Some((2, 'f'))), + Op::Pull(Some((5, 'c'))), + Op::Pull(Some((7, 'b'))), + Op::Insert(5, 'g'), + Op::Insert(3, 'h'), + Op::Pull(Some((3, 'h'))), + Op::Pull(Some((5, 'g'))), + Op::Pull(Some((8, 'e'))), + Op::Pull(None), + ]; + + check(operations.into_iter()); + } + + #[test] + fn indexed_priority_queue_equal_keys() { + let operations = [ + Op::Insert(4, 'a'), + Op::Insert(1, 'b'), + Op::Insert(3, 'c'), + Op::Pull(Some((1, 'b'))), + Op::Insert(4, 'd'), + Op::Insert(8, 'e'), + Op::Insert(3, 'f'), + Op::Pull(Some((3, 'c'))), + Op::Pull(Some((3, 'f'))), + Op::Pull(Some((4, 'a'))), + Op::Insert(8, 'g'), + Op::Pull(Some((4, 'd'))), + Op::Pull(Some((8, 'e'))), + Op::Pull(Some((8, 'g'))), + Op::Pull(None), + ]; + + check(operations.into_iter()); + } + + #[test] + fn indexed_priority_queue_extract_valid() { + let operations = [ + Op::Insert(8, 'a'), + Op::Insert(1, 'b'), + Op::Insert(3, 'c'), + Op::InsertAndMark(3, 'd'), + Op::Insert(2, 'e'), + Op::Pull(Some((1, 'b'))), + Op::Insert(4, 'f'), + Op::ExtractMarked(Some((3, 'd'))), + Op::Insert(5, 'g'), + Op::Pull(Some((2, 'e'))), + Op::Pull(Some((3, 'c'))), + Op::Pull(Some((4, 'f'))), + Op::Pull(Some((5, 'g'))), + Op::Pull(Some((8, 'a'))), + Op::Pull(None), + ]; + + check(operations.into_iter()); + } + + #[test] + fn indexed_priority_queue_extract_invalid() { + let operations = [ + Op::Insert(0, 'a'), + Op::Insert(7, 'b'), + Op::InsertAndMark(2, 'c'), + Op::Insert(4, 'd'), + Op::Pull(Some((0, 'a'))), + Op::Insert(2, 'e'), + Op::Pull(Some((2, 'c'))), + Op::Insert(4, 'f'), + Op::ExtractMarked(None), + Op::Pull(Some((2, 'e'))), + Op::Pull(Some((4, 'd'))), + Op::Pull(Some((4, 'f'))), + Op::Pull(Some((7, 'b'))), + Op::Pull(None), + ]; + + check(operations.into_iter()); + } + + #[test] + fn indexed_priority_queue_fuzz() { + use std::cell::Cell; + use std::collections::BTreeMap; + + use crate::util::rng::Rng; + + // Number of fuzzing operations. + const ITER: usize = if cfg!(miri) { 1000 } else { 10_000_000 }; + + // Inclusive upper bound for randomly generated keys. + const MAX_KEY: u64 = 99; + + // Probabilistic weight of each of the 4 operations. + // + // The weight for pull values should probably stay close to the sum of + // the two insertion weights to prevent queue size runaway. + const INSERT_WEIGHT: u64 = 5; + const INSERT_AND_MARK_WEIGHT: u64 = 1; + const PULL_WEIGHT: u64 = INSERT_WEIGHT + INSERT_AND_MARK_WEIGHT; + const DELETE_MARKED_WEIGHT: u64 = 1; + + // Defines 4 basic operations on the priority queue, each of them being + // performed on both the tested implementation and on a shadow queue + // implemented with a `BTreeMap`. Any mismatch between the outcomes of + // pull and delete operations between the two queues triggers a panic. + let epoch: Cell = Cell::new(0); + let marked: Cell> = Cell::new(None); + let shadow_marked: Cell> = Cell::new(None); + + let insert_fn = |queue: &mut IndexedPriorityQueue, + shadow_queue: &mut BTreeMap<(u64, usize), u64>, + key, + value| { + queue.insert(key, value); + shadow_queue.insert((key, epoch.get()), value); + epoch.set(epoch.get() + 1); + }; + + let insert_and_mark_fn = |queue: &mut IndexedPriorityQueue, + shadow_queue: &mut BTreeMap<(u64, usize), u64>, + key, + value| { + marked.set(Some(queue.insert(key, value))); + shadow_queue.insert((key, epoch.get()), value); + shadow_marked.set(Some((key, epoch.get()))); + epoch.set(epoch.get() + 1); + }; + + let pull_fn = |queue: &mut IndexedPriorityQueue, + shadow_queue: &mut BTreeMap<(u64, usize), u64>| { + let value = queue.pull(); + let shadow_value = match shadow_queue.iter().next() { + Some((&unique_key, &value)) => { + shadow_queue.remove(&unique_key); + Some((unique_key.0, value)) + } + None => None, + }; + assert_eq!(value, shadow_value); + }; + + let delete_marked_fn = + |queue: &mut IndexedPriorityQueue, + shadow_queue: &mut BTreeMap<(u64, usize), u64>| { + let success = match marked.take() { + Some(delete_key) => Some(queue.extract(delete_key).is_some()), + None => None, + }; + let shadow_success = match shadow_marked.take() { + Some(delete_key) => Some(shadow_queue.remove(&delete_key).is_some()), + None => None, + }; + assert_eq!(success, shadow_success); + }; + + // Fuzz away. + let mut queue = IndexedPriorityQueue::new(); + let mut shadow_queue = BTreeMap::new(); + + let rng = Rng::new(12345); + const TOTAL_WEIGHT: u64 = + INSERT_WEIGHT + INSERT_AND_MARK_WEIGHT + PULL_WEIGHT + DELETE_MARKED_WEIGHT; + + for _ in 0..ITER { + // Randomly choose one of the 4 possible operations, respecting the + // probability weights. + let mut op = rng.gen_bounded(TOTAL_WEIGHT); + if op < INSERT_WEIGHT { + let key = rng.gen_bounded(MAX_KEY + 1); + let val = rng.gen(); + insert_fn(&mut queue, &mut shadow_queue, key, val); + continue; + } + op -= INSERT_WEIGHT; + if op < INSERT_AND_MARK_WEIGHT { + let key = rng.gen_bounded(MAX_KEY + 1); + let val = rng.gen(); + insert_and_mark_fn(&mut queue, &mut shadow_queue, key, val); + continue; + } + op -= INSERT_AND_MARK_WEIGHT; + if op < PULL_WEIGHT { + pull_fn(&mut queue, &mut shadow_queue); + continue; + } + delete_marked_fn(&mut queue, &mut shadow_queue); + } + } +} diff --git a/asynchronix/src/util/priority_queue.rs b/asynchronix/src/util/priority_queue.rs index 293211f..f8cf5cf 100644 --- a/asynchronix/src/util/priority_queue.rs +++ b/asynchronix/src/util/priority_queue.rs @@ -111,7 +111,7 @@ impl PriorityQueue { #[cfg(all(test, not(asynchronix_loom)))] mod tests { - use super::*; + use super::PriorityQueue; #[test] fn priority_smoke() { diff --git a/asynchronix/src/util/rng.rs b/asynchronix/src/util/rng.rs index d716052..c5ae38b 100644 --- a/asynchronix/src/util/rng.rs +++ b/asynchronix/src/util/rng.rs @@ -1,7 +1,5 @@ //! Pseudo-random number generation. -#![allow(unused)] - use std::cell::Cell; /// A pseudo-random generator for 64-bit integers based on Wang Yi's Wyrand. diff --git a/asynchronix/src/util/futures.rs b/asynchronix/src/util/seq_futures.rs similarity index 62% rename from asynchronix/src/util/futures.rs rename to asynchronix/src/util/seq_futures.rs index 026e563..4c13861 100644 --- a/asynchronix/src/util/futures.rs +++ b/asynchronix/src/util/seq_futures.rs @@ -1,11 +1,7 @@ -//! Futures and future-related functions. - -#![allow(unused)] +//! Sequential composition of futures into a single future. use std::future::Future; use std::pin::Pin; -use std::sync::atomic::AtomicBool; -use std::sync::Arc; use std::task::{Context, Poll}; /// An owned future which sequentially polls a collection of futures. @@ -53,39 +49,3 @@ impl Future for SeqFuture { Poll::Pending } } - -trait RevocableFuture: Future { - fn is_revoked() -> bool; -} - -struct NeverRevokedFuture { - inner: F, -} - -impl NeverRevokedFuture { - fn new(fut: F) -> Self { - Self { inner: fut } - } -} -impl Future for NeverRevokedFuture { - type Output = T::Output; - - #[inline(always)] - fn poll( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { - unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) } - } -} - -impl RevocableFuture for NeverRevokedFuture { - fn is_revoked() -> bool { - false - } -} - -struct ConcurrentlyRevocableFuture { - inner: F, - is_revoked: Arc, -} diff --git a/asynchronix/src/util/slot.rs b/asynchronix/src/util/slot.rs index 8126dbb..8dbef05 100644 --- a/asynchronix/src/util/slot.rs +++ b/asynchronix/src/util/slot.rs @@ -1,8 +1,6 @@ //! A primitive similar to a one-shot channel but without any signaling //! capability. -#![allow(unused)] - use std::error::Error; use std::fmt; use std::marker::PhantomData; @@ -327,8 +325,6 @@ pub(crate) fn slot() -> (SlotWriter, SlotReader) { mod tests { use super::*; - use std::io::Read; - use std::sync::Arc; use std::thread; #[test] @@ -358,9 +354,9 @@ mod tests { #[test] fn slot_multi_threaded_write() { - let (mut writer, mut reader) = slot(); + let (writer, mut reader) = slot(); - let th = thread::spawn(move || { + thread::spawn(move || { assert!(writer.write(42).is_ok()); }); @@ -370,15 +366,13 @@ mod tests { return; } } - - th.join().unwrap(); } #[test] fn slot_multi_threaded_drop_writer() { - let (mut writer, mut reader) = slot::(); + let (writer, mut reader) = slot::(); - let th = thread::spawn(move || { + thread::spawn(move || { drop(writer); }); @@ -389,8 +383,6 @@ mod tests { return; } } - - th.join().unwrap(); } } diff --git a/asynchronix/src/util/spsc_queue.rs b/asynchronix/src/util/spsc_queue.rs deleted file mode 100644 index 9a81b2c..0000000 --- a/asynchronix/src/util/spsc_queue.rs +++ /dev/null @@ -1,393 +0,0 @@ -//! Single-producer single-consumer unbounded FIFO queue that stores values in -//! fixed-size memory segments. - -#![allow(unused)] - -use std::cell::Cell; -use std::error::Error; -use std::fmt; -use std::marker::PhantomData; -use std::mem::MaybeUninit; -use std::panic::{RefUnwindSafe, UnwindSafe}; -use std::ptr::{self, NonNull}; -use std::sync::atomic::Ordering; - -use crossbeam_utils::CachePadded; - -use crate::loom_exports::cell::UnsafeCell; -use crate::loom_exports::sync::atomic::{AtomicBool, AtomicPtr}; -use crate::loom_exports::sync::Arc; - -/// The number of slots in a single segment. -const SEGMENT_LEN: usize = 32; - -/// A slot containing a single value. -struct Slot { - has_value: AtomicBool, - value: UnsafeCell>, -} - -impl Default for Slot { - fn default() -> Self { - Slot { - has_value: AtomicBool::new(false), - value: UnsafeCell::new(MaybeUninit::uninit()), - } - } -} - -/// A memory segment containing `SEGMENT_LEN` slots. -struct Segment { - /// Address of the next segment. - /// - /// A null pointer means that the next segment is not allocated yet. - next_segment: AtomicPtr>, - data: [Slot; SEGMENT_LEN], -} - -impl Segment { - /// Allocates a new segment. - fn allocate_new() -> NonNull { - let segment = Self { - next_segment: AtomicPtr::new(ptr::null_mut()), - data: Default::default(), - }; - - // Safety: the pointer is non-null since it comes from a box. - unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(segment))) } - } -} - -/// The head of the queue from which values are popped. -struct Head { - /// Pointer to the segment at the head of the queue. - segment: NonNull>, - /// Index of the next value to be read. - /// - /// If the index is equal to the segment length, it is necessary to move to - /// the next segment before the next value can be read. - next_read_idx: usize, -} - -/// The tail of the queue to which values are pushed. -struct Tail { - /// Pointer to the segment at the tail of the queue. - segment: NonNull>, - /// Index of the next value to be written. - /// - /// If the index is equal to the segment length, a new segment must be - /// allocated before a new value can be written. - next_write_idx: usize, -} - -/// A single-producer, single-consumer unbounded FIFO queue. -struct Queue { - head: CachePadded>>, - tail: CachePadded>>, -} - -impl Queue { - /// Creates a new queue. - fn new() -> Self { - let segment = Segment::allocate_new(); - - let head = Head { - segment, - next_read_idx: 0, - }; - let tail = Tail { - segment, - next_write_idx: 0, - }; - - Self { - head: CachePadded::new(UnsafeCell::new(head)), - tail: CachePadded::new(UnsafeCell::new(tail)), - } - } - - /// Pushes a new value. - /// - /// # Safety - /// - /// The method cannot be called from multiple threads concurrently. - unsafe fn push(&self, value: T) { - // Safety: this is the only thread accessing the tail. - let tail = self.tail.with_mut(|p| &mut *p); - - // If the whole segment has been written, allocate a new segment. - if tail.next_write_idx == SEGMENT_LEN { - let old_segment = tail.segment; - tail.segment = Segment::allocate_new(); - - // Safety: the old segment is still allocated since the consumer - // cannot deallocate it before `next_segment` is set to a non-null - // value. - old_segment - .as_ref() - .next_segment - .store(tail.segment.as_ptr(), Ordering::Release); - - tail.next_write_idx = 0; - } - - // Safety: the tail segment is allocated since the consumer cannot - // deallocate it before `next_segment` is set to a non-null value. - let data = &tail.segment.as_ref().data[tail.next_write_idx]; - - // Safety: we have exclusive access to the slot value since the consumer - // cannot access it before `has_value` is set to true. - data.value.with_mut(|p| (*p).write(value)); - - // Ordering: this Release store synchronizes with the Acquire load in - // `pop` and ensures that the value is visible to the consumer once - // `has_value` reads `true`. - data.has_value.store(true, Ordering::Release); - - tail.next_write_idx += 1; - } - - /// Pops a new value. - /// - /// # Safety - /// - /// The method cannot be called from multiple threads concurrently. - unsafe fn pop(&self) -> Option { - // Safety: this is the only thread accessing the head. - let head = self.head.with_mut(|p| &mut *p); - - // If the whole segment has been read, try to move to the next segment. - if head.next_read_idx == SEGMENT_LEN { - // Read the next segment or return `None` if it is not ready yet. - // - // Safety: the head segment is still allocated since we are the only - // thread that can deallocate it. - let next_segment = head.segment.as_ref().next_segment.load(Ordering::Acquire); - let next_segment = NonNull::new(next_segment)?; - - // Deallocate the old segment. - // - // Safety: the pointer was initialized from a box and the segment is - // still allocated since we are the only thread that can deallocate - // it. - let _ = Box::from_raw(head.segment.as_ptr()); - - // Update the segment and the next index. - head.segment = next_segment; - head.next_read_idx = 0; - } - - let data = &head.segment.as_ref().data[head.next_read_idx]; - - // Ordering: this Acquire load synchronizes with the Release store in - // `push` and ensures that the value is visible once `has_value` reads - // `true`. - if !data.has_value.load(Ordering::Acquire) { - return None; - } - - // Safety: since `has_value` is `true` then we have exclusive ownership - // of the value and we know that it was initialized. - let value = data.value.with(|p| (*p).assume_init_read()); - - head.next_read_idx += 1; - - Some(value) - } -} - -impl Drop for Queue { - fn drop(&mut self) { - unsafe { - // Drop all values. - while self.pop().is_some() {} - - // All values have been dropped: the last segment can be freed. - - // Safety: this is the only thread accessing the head since both the - // consumer and producer have been dropped. - let head = self.head.with_mut(|p| &mut *p); - - // Safety: the pointer was initialized from a box and the segment is - // still allocated since we are the only thread that can deallocate - // it. - let _ = Box::from_raw(head.segment.as_ptr()); - } - } -} - -unsafe impl Send for Queue {} -unsafe impl Sync for Queue {} - -impl UnwindSafe for Queue {} -impl RefUnwindSafe for Queue {} - -/// A handle to a single-producer, single-consumer queue that can push values. -pub(crate) struct Producer { - queue: Arc>, - _non_sync_phantom: PhantomData>, -} -impl Producer { - /// Pushes a value to the queue. - pub(crate) fn push(&self, value: T) -> Result<(), PushError> { - if Arc::strong_count(&self.queue) == 1 { - return Err(PushError {}); - } - - unsafe { self.queue.push(value) }; - - Ok(()) - } -} - -#[derive(Debug, PartialEq, Eq, Clone, Copy)] -/// Error returned when a push failed due to the consumer being dropped. -pub(crate) struct PushError {} - -impl fmt::Display for PushError { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "sending message into a closed mailbox") - } -} - -impl Error for PushError {} - -/// A handle to a single-producer, single-consumer queue that can pop values. -pub(crate) struct Consumer { - queue: Arc>, - _non_sync_phantom: PhantomData>, -} -impl Consumer { - /// Pops a value from the queue. - pub(crate) fn pop(&self) -> Option { - unsafe { self.queue.pop() } - } -} - -/// Creates the producer and consumer handles of a single-producer, -/// single-consumer queue. -pub(crate) fn spsc_queue() -> (Producer, Consumer) { - let queue = Arc::new(Queue::new()); - - let producer = Producer { - queue: queue.clone(), - _non_sync_phantom: PhantomData, - }; - let consumer = Consumer { - queue, - _non_sync_phantom: PhantomData, - }; - - (producer, consumer) -} - -/// Loom tests. -#[cfg(all(test, not(asynchronix_loom)))] -mod tests { - use super::*; - - use std::thread; - - #[test] - fn spsc_queue_basic() { - const VALUE_COUNT: usize = if cfg!(miri) { 1000 } else { 100_000 }; - - let (producer, consumer) = spsc_queue(); - - let th = thread::spawn(move || { - for i in 0..VALUE_COUNT { - let value = loop { - if let Some(v) = consumer.pop() { - break v; - } - }; - - assert_eq!(value, i); - } - }); - - for i in 0..VALUE_COUNT { - producer.push(i).unwrap(); - } - - th.join().unwrap(); - } -} - -/// Loom tests. -#[cfg(all(test, asynchronix_loom))] -mod tests { - use super::*; - - use loom::model::Builder; - use loom::thread; - - #[test] - fn loom_spsc_queue_basic() { - const DEFAULT_PREEMPTION_BOUND: usize = 4; - const VALUE_COUNT: usize = 10; - - let mut builder = Builder::new(); - if builder.preemption_bound.is_none() { - builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND); - } - - builder.check(move || { - let (producer, consumer) = spsc_queue(); - - let th = thread::spawn(move || { - let mut value = 0; - for _ in 0..VALUE_COUNT { - if let Some(v) = consumer.pop() { - assert_eq!(v, value); - value += 1; - } - } - }); - - for i in 0..VALUE_COUNT { - let _ = producer.push(i); - } - - th.join().unwrap(); - }); - } - - #[test] - fn loom_spsc_queue_new_segment() { - const DEFAULT_PREEMPTION_BOUND: usize = 4; - const VALUE_COUNT_BEFORE: usize = 5; - const VALUE_COUNT_AFTER: usize = 5; - - let mut builder = Builder::new(); - if builder.preemption_bound.is_none() { - builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND); - } - - builder.check(move || { - let (producer, consumer) = spsc_queue(); - - // Fill up the first segment except for the last `VALUE_COUNT_BEFORE` slots. - for i in 0..(SEGMENT_LEN - VALUE_COUNT_BEFORE) { - producer.push(i).unwrap(); - consumer.pop(); - } - - let th = thread::spawn(move || { - let mut value = SEGMENT_LEN - VALUE_COUNT_BEFORE; - for _ in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) { - if let Some(v) = consumer.pop() { - assert_eq!(v, value); - value += 1; - } - } - }); - - for i in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) { - let _ = producer.push(i); - } - - th.join().unwrap(); - }); - } -} diff --git a/asynchronix/src/model/ports/broadcaster/task_set.rs b/asynchronix/src/util/task_set.rs similarity index 62% rename from asynchronix/src/model/ports/broadcaster/task_set.rs rename to asynchronix/src/util/task_set.rs index 6538ee4..90f3e41 100644 --- a/asynchronix/src/model/ports/broadcaster/task_set.rs +++ b/asynchronix/src/util/task_set.rs @@ -1,3 +1,5 @@ +//! Primitive for the efficient management of concurrent tasks. + use std::sync::atomic::Ordering; use std::sync::Arc; @@ -21,31 +23,36 @@ const COUNTDOWN_MASK: u64 = !INDEX_MASK; /// scheduled tasks. const COUNTDOWN_ONE: u64 = 1 << 32; -/// A set of tasks that may be scheduled cheaply and can be requested to wake a -/// parent task only when a given amount of tasks have been scheduled. +/// A primitive that simplifies the management of a set of tasks scheduled +/// concurrently. /// -/// This object maintains both a list of all active tasks and a list of the -/// subset of active tasks currently scheduled. The latter is stored in a -/// Treiber stack which links tasks through indices rather than pointers. Using -/// indices has two advantages: (i) it enables a fully safe implementation and -/// (ii) it makes it possible to use a single CAS to simultaneously move the -/// head and decrement the outstanding amount of tasks to be scheduled before -/// the parent task is notified. -pub(super) struct TaskSet { - /// Set of all active tasks, scheduled or not. +/// A `TaskSet` maintains both a vector-based list of tasks (or more accurately, +/// task waker handles) and a linked list of the subset of tasks that are +/// currently scheduled. The latter is stored in a vector-based Treiber stack +/// which links tasks through indices rather than pointers. Using indices has +/// two advantages: (i) it makes a fully safe implementation possible and (ii) +/// it can take advantage of a single CAS to simultaneously move the head and +/// decrement the outstanding amount of tasks to be scheduled before the parent +/// task is notified. +/// +/// This can be used to implement primitives similar to `FuturesOrdered` or +/// `FuturesUnordered` in the `futures` crate. +/// +/// The `notify_count` argument of `TaskSet::take_scheduled()` can be set to +/// more than 1 to wake the parent task less frequently. For instance, if +/// `notify_count` is set to the number of pending sub-tasks, the parent task +/// will only be woken once all subtasks have been woken. + +pub(crate) struct TaskSet { + /// Set of all tasks, scheduled or not. /// - /// In some rare cases, the back of the vector can also contain inactive - /// (retired) tasks. + /// In some cases, the use of `resize()` to shrink the task set may leave + /// inactive tasks at the back of the vector, in which case the length of + /// the vector will exceed `task_count`. tasks: Vec>, - /// Head of the Treiber stack for scheduled tasks. - /// - /// The lower bits specify the index of the last scheduled task, if any, - /// whereas the upper bits specify the countdown of tasks still to be - /// scheduled before the parent task is notified. - head: Arc, - /// A notifier used to wake the parent task. - notifier: WakeSource, - /// Count of all active tasks, scheduled or not. + /// Shared Treiber stack head and parent task notifier. + shared: Arc, + /// Count of all tasks, scheduled or not. task_count: usize, } @@ -53,35 +60,71 @@ impl TaskSet { /// Creates an initially empty set of tasks associated to the parent task /// which notifier is provided. #[allow(clippy::assertions_on_constants)] - pub(super) fn new(notifier: WakeSource) -> Self { + pub(crate) fn new(notifier: WakeSource) -> Self { // Only 32-bit targets and above are supported. assert!(usize::BITS >= u32::BITS); Self { tasks: Vec::new(), - head: Arc::new(AtomicU64::new(EMPTY as u64)), - notifier, + shared: Arc::new(Shared { + head: AtomicU64::new(EMPTY as u64), + notifier, + }), task_count: 0, } } - /// Steals scheduled tasks if any and returns an iterator over their - /// indices, otherwise returns `None` and requests a notification to be sent - /// after `notify_count` tasks have been scheduled. + /// Creates a set of `len` tasks associated to the parent task which + /// notifier is provided. + #[allow(clippy::assertions_on_constants)] + pub(crate) fn with_len(notifier: WakeSource, len: usize) -> Self { + // Only 32-bit targets and above are supported. + assert!(usize::BITS >= u32::BITS); + + assert!(len <= EMPTY as usize && len <= SLEEPING as usize); + let len = len as u32; + + let shared = Arc::new(Shared { + head: AtomicU64::new(EMPTY as u64), + notifier, + }); + + let tasks: Vec<_> = (0..len) + .map(|idx| { + Arc::new(Task { + idx, + shared: shared.clone(), + next: AtomicU32::new(SLEEPING), + }) + }) + .collect(); + + Self { + tasks, + shared, + task_count: len as usize, + } + } + + /// Take all scheduled tasks and returns an iterator over their indices, or + /// if there are no currently scheduled tasks returns `None` and requests a + /// notification to be sent after `notify_count` tasks have been scheduled. /// - /// In all cases, the list of scheduled tasks is guaranteed to be empty - /// after this call. + /// In all cases, the list of scheduled tasks will be empty right after this + /// call. /// - /// If some tasks were stolen, no notification is requested. + /// If there were scheduled tasks, no notification is requested because this + /// method is expected to be called repeatedly until it returns `None`. + /// Failure to do so will result in missed notifications. /// - /// If no tasks were stolen, the notification is guaranteed to be triggered - /// no later than after `notify_count` tasks have been scheduled, though it - /// may in some cases be triggered earlier. If the specified `notify_count` - /// is zero then no notification is requested. - pub(super) fn steal_scheduled(&self, notify_count: usize) -> Option> { + /// If no tasks were scheduled, the notification is guaranteed to be + /// triggered no later than after `notify_count` tasks have been scheduled, + /// though it may in some cases be triggered earlier. If the specified + /// `notify_count` is zero then no notification is requested. + pub(crate) fn take_scheduled(&self, notify_count: usize) -> Option> { let countdown = u32::try_from(notify_count).unwrap(); - let mut head = self.head.load(Ordering::Relaxed); + let mut head = self.shared.head.load(Ordering::Relaxed); loop { let new_head = if head & INDEX_MASK == EMPTY as u64 { (countdown as u64 * COUNTDOWN_ONE) | EMPTY as u64 @@ -93,7 +136,7 @@ impl TaskSet { // operations in `Task::wake_by_ref` and ensures that all memory // operations performed during and before the tasks were scheduled // become visible. - match self.head.compare_exchange_weak( + match self.shared.head.compare_exchange_weak( head, new_head, Ordering::Acquire, @@ -122,22 +165,22 @@ impl TaskSet { /// notification is currently requested. /// /// All discarded tasks are put in the sleeping (unscheduled) state. - pub(super) fn discard_scheduled(&self) { - if self.head.load(Ordering::Relaxed) != EMPTY as u64 { + pub(crate) fn discard_scheduled(&self) { + if self.shared.head.load(Ordering::Relaxed) != EMPTY as u64 { // Dropping the iterator ensures that all tasks are put in the // sleeping state. - let _ = self.steal_scheduled(0); + let _ = self.take_scheduled(0); } } - /// Modify the number of active tasks. + /// Set the number of active tasks. /// - /// Note that this method may discard all scheduled tasks. + /// Note that this method may discard already scheduled tasks. /// /// # Panic /// /// This method will panic if `len` is greater than `u32::MAX - 1`. - pub(super) fn resize(&mut self, len: usize) { + pub(crate) fn resize(&mut self, len: usize) { assert!(len <= EMPTY as usize && len <= SLEEPING as usize); self.task_count = len; @@ -149,37 +192,46 @@ impl TaskSet { self.tasks.push(Arc::new(Task { idx, - notifier: self.notifier.clone(), + shared: self.shared.clone(), next: AtomicU32::new(SLEEPING), - head: self.head.clone(), })); } return; } - // Try to remove inactive tasks. + // Try to shrink the vector of tasks. // - // The main issue when shrinking the set of active tasks is that stale + // The main issue when shrinking the vector of tasks is that stale // wakers may still be around and may at any moment be scheduled and - // insert their index in the list of scheduled tasks. If it cannot be - // guaranteed that this will not happen, then a reference to that task - // must be kept or the iterator for scheduled tasks will panic when - // indexing a stale task. + // insert their task index in the list of scheduled tasks. If it cannot + // be guaranteed that this will not happen, then the vector of tasks + // cannot be shrunk further, otherwise the iterator for scheduled tasks + // will later fail when reaching a task with an invalid index. // - // To prevent an inactive task from being spuriously scheduled, it is - // enough to pretend that the task is already scheduled by setting its - // `next` field to anything else than `SLEEPING`. However, this could - // race if the task has just set its `next` field but has not yet - // updated the head of the list of scheduled tasks, so this can only be - // done reliably if the task is currently sleeping. + // We follow a 2-steps strategy: + // + // 1) remove all tasks currently in the list of scheduled task and set + // them to `SLEEPING` state in case some of them might have an index + // that will be invalidated when the vector of tasks is shrunk; + // + // 2) attempt to iteratively shrink the vector of tasks by removing + // tasks starting from the back of the vector: + // - If a task is in the `SLEEPING` state, then its `next` pointer is + // changed to an arbitrary value other than`SLEEPING`, but the task + // is not inserted in the list of scheduled tasks; this way, the + // task will be effectively rendered inactive. The task can now be + // removed from the vector. + // - If a task is found in a non-`SLEEPING` state (meaning that there + // was a race and the task was scheduled after step 1) then abandon + // further shrinking and leave this task in the vector; the iterator + // for scheduled tasks mitigates such situation by only yielding + // task indices that are within the expected range. - // All scheduled tasks are first unscheduled in case some of them are - // now inactive. + // Step 1: unscheduled tasks that may be scheduled. self.discard_scheduled(); - // The position of tasks in the set must stay consistent with their - // associated index so tasks are popped from the back. + // Step 2: attempt to remove tasks starting at the back of the vector. while self.tasks.len() > len { // There is at least one task since `len()` was non-zero. let task = self.tasks.last().unwrap(); @@ -200,11 +252,11 @@ impl TaskSet { } } - /// Returns `true` if one or more tasks are currently scheduled. - pub(super) fn has_scheduled(&self) -> bool { + /// Returns `true` if one or more sub-tasks are currently scheduled. + pub(crate) fn has_scheduled(&self) -> bool { // Ordering: the content of the head is only used as an advisory flag so // Relaxed ordering is sufficient. - self.head.load(Ordering::Relaxed) & INDEX_MASK != EMPTY as u64 + self.shared.head.load(Ordering::Relaxed) & INDEX_MASK != EMPTY as u64 } /// Returns a reference to the waker associated to the active task with the @@ -214,29 +266,36 @@ impl TaskSet { /// /// This method will panic if there is no active task with the provided /// index. - pub(super) fn waker_of(&self, idx: usize) -> WakerRef { + pub(crate) fn waker_of(&self, idx: usize) -> WakerRef { assert!(idx < self.task_count); waker_ref(&self.tasks[idx]) } } +/// Internals shared between a `TaskSet` and its associated `Task`s. +struct Shared { + /// Head of the Treiber stack for scheduled tasks. + /// + /// The lower 32 bits specify the index of the last scheduled task (the + /// actual head), if any, whereas the upper 32 bits specify the countdown of + /// tasks still to be scheduled before the parent task is notified. + head: AtomicU64, + /// A notifier used to wake the parent task. + notifier: WakeSource, +} + /// An asynchronous task associated with the future of a sender. -pub(super) struct Task { +struct Task { /// Index of this task. idx: u32, - /// A notifier triggered once a certain number of tasks have been scheduled. - notifier: WakeSource, /// Index of the next task in the list of scheduled tasks. next: AtomicU32, /// Head of the list of scheduled tasks. - head: Arc, + shared: Arc, } impl ArcWake for Task { - fn wake(self: Arc) { - Self::wake_by_ref(&self); - } fn wake_by_ref(arc_self: &Arc) { let mut next = arc_self.next.load(Ordering::Relaxed); @@ -251,7 +310,7 @@ impl ArcWake for Task { // CAS on the head already ensure that all memory operations // that precede this call to `wake_by_ref` become visible when // the tasks are stolen. - let head = arc_self.head.load(Ordering::Relaxed); + let head = arc_self.shared.head.load(Ordering::Relaxed); match arc_self.next.compare_exchange_weak( SLEEPING, (head & INDEX_MASK) as u32, @@ -297,7 +356,7 @@ impl ArcWake for Task { // that the value of the `next` field as well as all memory // operations that precede this call to `wake_by_ref` become visible // when the tasks are stolen. - match arc_self.head.compare_exchange_weak( + match arc_self.shared.head.compare_exchange_weak( head, new_head, Ordering::Release, @@ -307,7 +366,7 @@ impl ArcWake for Task { // If the countdown has just been cleared, it is necessary // to send a notification. if countdown == COUNTDOWN_ONE { - arc_self.notifier.notify(); + arc_self.shared.notifier.notify(); } return; @@ -339,7 +398,7 @@ impl ArcWake for Task { } /// An iterator over scheduled tasks. -pub(super) struct TaskIterator<'a> { +pub(crate) struct TaskIterator<'a> { task_list: &'a TaskSet, next_index: u32, } diff --git a/asynchronix/tests/model_scheduling.rs b/asynchronix/tests/model_scheduling.rs index 6ff9b44..50bdce4 100644 --- a/asynchronix/tests/model_scheduling.rs +++ b/asynchronix/tests/model_scheduling.rs @@ -2,9 +2,10 @@ use std::time::Duration; -use asynchronix::model::{Model, Output}; +use asynchronix::model::Model; +use asynchronix::ports::{EventBuffer, Output}; use asynchronix::simulation::{Mailbox, SimInit}; -use asynchronix::time::{EventKey, MonotonicTime, Scheduler}; +use asynchronix::time::{ActionKey, MonotonicTime, Scheduler}; #[test] fn model_schedule_event() { @@ -27,13 +28,14 @@ fn model_schedule_event() { let mut model = TestModel::default(); let mbox = Mailbox::new(); - let mut output = model.output.connect_stream().0; + let mut output = EventBuffer::new(); + model.output.connect_sink(&output); let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; let mut simu = SimInit::new().add_model(model, mbox).init(t0); - simu.send_event(TestModel::trigger, (), addr); + simu.process_event(TestModel::trigger, (), addr); simu.step(); assert_eq!(simu.time(), t0 + Duration::from_secs(2)); assert!(output.next().is_some()); @@ -46,7 +48,7 @@ fn model_cancel_future_keyed_event() { #[derive(Default)] struct TestModel { output: Output, - key: Option, + key: Option, } impl TestModel { fn trigger(&mut self, _: (), scheduler: &Scheduler) { @@ -71,13 +73,14 @@ fn model_cancel_future_keyed_event() { let mut model = TestModel::default(); let mbox = Mailbox::new(); - let mut output = model.output.connect_stream().0; + let mut output = EventBuffer::new(); + model.output.connect_sink(&output); let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; let mut simu = SimInit::new().add_model(model, mbox).init(t0); - simu.send_event(TestModel::trigger, (), addr); + simu.process_event(TestModel::trigger, (), addr); simu.step(); assert_eq!(simu.time(), t0 + Duration::from_secs(1)); assert_eq!(output.next(), Some(1)); @@ -91,7 +94,7 @@ fn model_cancel_same_time_keyed_event() { #[derive(Default)] struct TestModel { output: Output, - key: Option, + key: Option, } impl TestModel { fn trigger(&mut self, _: (), scheduler: &Scheduler) { @@ -116,13 +119,14 @@ fn model_cancel_same_time_keyed_event() { let mut model = TestModel::default(); let mbox = Mailbox::new(); - let mut output = model.output.connect_stream().0; + let mut output = EventBuffer::new(); + model.output.connect_sink(&output); let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; let mut simu = SimInit::new().add_model(model, mbox).init(t0); - simu.send_event(TestModel::trigger, (), addr); + simu.process_event(TestModel::trigger, (), addr); simu.step(); assert_eq!(simu.time(), t0 + Duration::from_secs(2)); assert_eq!(output.next(), Some(1)); @@ -157,13 +161,14 @@ fn model_schedule_periodic_event() { let mut model = TestModel::default(); let mbox = Mailbox::new(); - let mut output = model.output.connect_stream().0; + let mut output = EventBuffer::new(); + model.output.connect_sink(&output); let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; let mut simu = SimInit::new().add_model(model, mbox).init(t0); - simu.send_event(TestModel::trigger, (), addr); + simu.process_event(TestModel::trigger, (), addr); // Move to the next events at t0 + 2s + k*3s. for k in 0..10 { @@ -182,7 +187,7 @@ fn model_cancel_periodic_event() { #[derive(Default)] struct TestModel { output: Output<()>, - key: Option, + key: Option, } impl TestModel { fn trigger(&mut self, _: (), scheduler: &Scheduler) { @@ -206,13 +211,14 @@ fn model_cancel_periodic_event() { let mut model = TestModel::default(); let mbox = Mailbox::new(); - let mut output = model.output.connect_stream().0; + let mut output = EventBuffer::new(); + model.output.connect_sink(&output); let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; let mut simu = SimInit::new().add_model(model, mbox).init(t0); - simu.send_event(TestModel::trigger, (), addr); + simu.process_event(TestModel::trigger, (), addr); simu.step(); assert_eq!(simu.time(), t0 + Duration::from_secs(2)); diff --git a/asynchronix/tests/simulation_scheduling.rs b/asynchronix/tests/simulation_scheduling.rs index 858f81e..70956a1 100644 --- a/asynchronix/tests/simulation_scheduling.rs +++ b/asynchronix/tests/simulation_scheduling.rs @@ -2,8 +2,9 @@ use std::time::Duration; -use asynchronix::model::{Model, Output}; -use asynchronix::simulation::{Address, EventStream, Mailbox, SimInit, Simulation}; +use asynchronix::model::Model; +use asynchronix::ports::{EventBuffer, Output}; +use asynchronix::simulation::{Address, Mailbox, SimInit, Simulation}; use asynchronix::time::MonotonicTime; // Input-to-output pass-through model. @@ -26,12 +27,13 @@ impl Model for PassThroughModel {} /// output) running as fast as possible. fn passthrough_bench( t0: MonotonicTime, -) -> (Simulation, Address>, EventStream) { +) -> (Simulation, Address>, EventBuffer) { // Bench assembly. let mut model = PassThroughModel::new(); let mbox = Mailbox::new(); - let out_stream = model.output.connect_stream().0; + let out_stream = EventBuffer::new(); + model.output.connect_sink(&out_stream); let addr = mbox.address(); let simu = SimInit::new().add_model(model, mbox).init(t0); @@ -243,18 +245,20 @@ fn timestamp_bench( ) -> ( Simulation, Address, - EventStream<(Instant, SystemTime)>, + EventBuffer<(Instant, SystemTime)>, ) { // Bench assembly. let mut model = TimestampModel::default(); let mbox = Mailbox::new(); - let stamp_stream = model.stamp.connect_stream().0; + let stamp_stream = EventBuffer::new(); + model.stamp.connect_sink(&stamp_stream); let addr = mbox.address(); let simu = SimInit::new() .add_model(model, mbox) - .init_with_clock(t0, clock); + .set_clock(clock) + .init(t0); (simu, addr, stamp_stream) } From 7526ffbceadf9dcddc02656d2f6715543fe08480 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ja=C5=ADhien=20Piatlicki?= Date: Thu, 11 Apr 2024 09:40:37 +0200 Subject: [PATCH 02/12] Add setup step. --- .github/workflows/ci.yml | 2 +- asynchronix/Cargo.toml | 2 +- asynchronix/examples/espresso_machine.rs | 51 +- asynchronix/examples/power_supply.rs | 2 +- asynchronix/examples/stepper_motor.rs | 25 +- asynchronix/src/channel.rs | 21 +- asynchronix/src/lib.rs | 57 +- asynchronix/src/model.rs | 135 +++-- asynchronix/src/model/context.rs | 485 ++++++++++++++++++ asynchronix/src/ports/input/markers.rs | 16 +- asynchronix/src/ports/input/model_fn.rs | 55 +- asynchronix/src/ports/output/broadcaster.rs | 14 +- asynchronix/src/ports/source.rs | 5 +- asynchronix/src/ports/source/broadcaster.rs | 14 +- .../src/rpc/codegen/custom_transport.rs | 10 +- asynchronix/src/rpc/codegen/simulation.rs | 178 ++----- asynchronix/src/simulation.rs | 67 ++- .../src/{time => simulation}/scheduler.rs | 386 +------------- asynchronix/src/simulation/sim_init.rs | 15 +- asynchronix/src/time.rs | 19 +- asynchronix/src/util/sync_cell.rs | 9 +- asynchronix/tests/model_scheduling.rs | 44 +- asynchronix/tests/simulation_scheduling.rs | 22 +- 23 files changed, 836 insertions(+), 798 deletions(-) create mode 100644 asynchronix/src/model/context.rs rename asynchronix/src/{time => simulation}/scheduler.rs (57%) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e25d852..efb6ebc 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ jobs: matrix: rust: - stable - - 1.64.0 + - 1.75.0 steps: - name: Checkout sources uses: actions/checkout@v3 diff --git a/asynchronix/Cargo.toml b/asynchronix/Cargo.toml index e9bef07..a91ff21 100644 --- a/asynchronix/Cargo.toml +++ b/asynchronix/Cargo.toml @@ -9,7 +9,7 @@ name = "asynchronix" authors = ["Serge Barral "] version = "0.2.2" edition = "2021" -rust-version = "1.64" +rust-version = "1.75" license = "MIT OR Apache-2.0" repository = "https://github.com/asynchronics/asynchronix" readme = "../README.md" diff --git a/asynchronix/examples/espresso_machine.rs b/asynchronix/examples/espresso_machine.rs index a2c0826..bd6b1a6 100644 --- a/asynchronix/examples/espresso_machine.rs +++ b/asynchronix/examples/espresso_machine.rs @@ -31,14 +31,12 @@ //! (-) //! ``` -use std::future::Future; -use std::pin::Pin; use std::time::Duration; -use asynchronix::model::{InitializedModel, Model}; +use asynchronix::model::{Context, InitializedModel, Model}; use asynchronix::ports::{EventSlot, Output}; -use asynchronix::simulation::{Mailbox, SimInit}; -use asynchronix::time::{ActionKey, MonotonicTime, Scheduler}; +use asynchronix::simulation::{ActionKey, Mailbox, SimInit}; +use asynchronix::time::MonotonicTime; /// Water pump. pub struct Pump { @@ -122,7 +120,7 @@ impl Controller { } /// Starts brewing or cancels the current brew -- input port. - pub async fn brew_cmd(&mut self, _: (), scheduler: &Scheduler) { + pub async fn brew_cmd(&mut self, _: (), context: &Context) { // If a brew was ongoing, sending the brew command is interpreted as a // request to cancel it. if let Some(key) = self.stop_brew_key.take() { @@ -141,7 +139,7 @@ impl Controller { // Schedule the `stop_brew()` method and turn on the pump. self.stop_brew_key = Some( - scheduler + context .schedule_keyed_event(self.brew_time, Self::stop_brew, ()) .unwrap(), ); @@ -190,7 +188,7 @@ impl Tank { } /// Water volume added [m³] -- input port. - pub async fn fill(&mut self, added_volume: f64, scheduler: &Scheduler) { + pub async fn fill(&mut self, added_volume: f64, context: &Context) { // Ignore zero and negative values. We could also impose a maximum based // on tank capacity. if added_volume <= 0.0 { @@ -208,11 +206,11 @@ impl Tank { state.set_empty_key.cancel(); // Update the volume, saturating at 0 in case of rounding errors. - let time = scheduler.time(); + let time = context.time(); let elapsed_time = time.duration_since(state.last_volume_update).as_secs_f64(); self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0); - self.schedule_empty(state.flow_rate, time, scheduler).await; + self.schedule_empty(state.flow_rate, time, context).await; // There is no need to broadcast the state of the water sense since // it could not be previously `Empty` (otherwise the dynamic state @@ -230,10 +228,10 @@ impl Tank { /// # Panics /// /// This method will panic if the flow rate is negative. - pub async fn set_flow_rate(&mut self, flow_rate: f64, scheduler: &Scheduler) { + pub async fn set_flow_rate(&mut self, flow_rate: f64, context: &Context) { assert!(flow_rate >= 0.0); - let time = scheduler.time(); + let time = context.time(); // If the flow rate was non-zero up to now, update the volume. if let Some(state) = self.dynamic_state.take() { @@ -245,7 +243,7 @@ impl Tank { self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0); } - self.schedule_empty(flow_rate, time, scheduler).await; + self.schedule_empty(flow_rate, time, context).await; } /// Schedules a callback for when the tank becomes empty. @@ -258,7 +256,7 @@ impl Tank { &mut self, flow_rate: f64, time: MonotonicTime, - scheduler: &Scheduler, + context: &Context, ) { // Determine when the tank will be empty at the current flow rate. let duration_until_empty = if self.volume == 0.0 { @@ -275,7 +273,7 @@ impl Tank { let duration_until_empty = Duration::from_secs_f64(duration_until_empty); // Schedule the next update. - match scheduler.schedule_keyed_event(duration_until_empty, Self::set_empty, ()) { + match context.schedule_keyed_event(duration_until_empty, Self::set_empty, ()) { Ok(set_empty_key) => { let state = TankDynamicState { last_volume_update: time, @@ -302,21 +300,16 @@ impl Tank { impl Model for Tank { /// Broadcasts the initial state of the water sense. - fn init( - mut self, - _scheduler: &Scheduler, - ) -> Pin> + Send + '_>> { - Box::pin(async move { - self.water_sense - .send(if self.volume == 0.0 { - WaterSenseState::Empty - } else { - WaterSenseState::NotEmpty - }) - .await; + async fn init(mut self, _: &Context) -> InitializedModel { + self.water_sense + .send(if self.volume == 0.0 { + WaterSenseState::Empty + } else { + WaterSenseState::NotEmpty + }) + .await; - self.into() - }) + self.into() } } diff --git a/asynchronix/examples/power_supply.rs b/asynchronix/examples/power_supply.rs index 4b4930a..bce7b9c 100644 --- a/asynchronix/examples/power_supply.rs +++ b/asynchronix/examples/power_supply.rs @@ -24,7 +24,7 @@ //! │ Power │ ◀current │ │ //! │ supply │ └────────┘ //! │ ├───────────────────────────────▶ Total power -//! └──────────┘ +//! └──────────┘ //! ``` use asynchronix::model::Model; use asynchronix::ports::{EventSlot, Output, Requestor}; diff --git a/asynchronix/examples/stepper_motor.rs b/asynchronix/examples/stepper_motor.rs index 9f5d764..3d24221 100644 --- a/asynchronix/examples/stepper_motor.rs +++ b/asynchronix/examples/stepper_motor.rs @@ -15,13 +15,12 @@ //! ``` use std::future::Future; -use std::pin::Pin; use std::time::Duration; -use asynchronix::model::{InitializedModel, Model}; +use asynchronix::model::{Context, InitializedModel, Model}; use asynchronix::ports::{EventBuffer, Output}; use asynchronix::simulation::{Mailbox, SimInit}; -use asynchronix::time::{MonotonicTime, Scheduler}; +use asynchronix::time::MonotonicTime; /// Stepper motor. pub struct Motor { @@ -88,15 +87,9 @@ impl Motor { impl Model for Motor { /// Broadcasts the initial position of the motor. - fn init( - mut self, - _scheduler: &Scheduler, - ) -> Pin> + Send + '_>> { - Box::pin(async move { - self.position.send(self.pos).await; - - self.into() - }) + async fn init(mut self, _: &Context) -> InitializedModel { + self.position.send(self.pos).await; + self.into() } } @@ -130,7 +123,7 @@ impl Driver { } /// Sets the pulse rate (sign = direction) [Hz] -- input port. - pub async fn pulse_rate(&mut self, pps: f64, scheduler: &Scheduler) { + pub async fn pulse_rate(&mut self, pps: f64, context: &Context) { let pps = pps.signum() * pps.abs().clamp(Self::MIN_PPS, Self::MAX_PPS); if pps == self.pps { return; @@ -142,7 +135,7 @@ impl Driver { // Trigger the rotation if the motor is currently idle. Otherwise the // new value will be accounted for at the next pulse. if is_idle { - self.send_pulse((), scheduler).await; + self.send_pulse((), context).await; } } @@ -153,7 +146,7 @@ impl Driver { fn send_pulse<'a>( &'a mut self, _: (), - scheduler: &'a Scheduler, + context: &'a Context, ) -> impl Future + Send + 'a { async move { let current_out = match self.next_phase { @@ -174,7 +167,7 @@ impl Driver { let pulse_duration = Duration::from_secs_f64(1.0 / self.pps.abs()); // Schedule the next pulse. - scheduler + context .schedule_event(pulse_duration, Self::send_pulse, ()) .unwrap(); } diff --git a/asynchronix/src/channel.rs b/asynchronix/src/channel.rs index a1e8a43..5fe67b2 100644 --- a/asynchronix/src/channel.rs +++ b/asynchronix/src/channel.rs @@ -18,8 +18,7 @@ use recycle_box::RecycleBox; use queue::{PopError, PushError, Queue}; use recycle_box::coerce_box; -use crate::model::Model; -use crate::time::Scheduler; +use crate::model::{Context, Model}; /// Data shared between the receiver and the senders. struct Inner { @@ -45,7 +44,7 @@ impl Inner { } /// A receiver which can asynchronously execute `async` message that take an -/// argument of type `&mut M` and an optional `&Scheduler` argument. +/// argument of type `&mut M` and an optional `&Context` argument. pub(crate) struct Receiver { /// Shared data. inner: Arc>, @@ -90,7 +89,7 @@ impl Receiver { pub(crate) async fn recv( &mut self, model: &mut M, - scheduler: &Scheduler, + context: &Context, ) -> Result<(), RecvError> { let msg = unsafe { self.inner @@ -106,7 +105,7 @@ impl Receiver { match msg { Some(mut msg) => { // Consume the message to obtain a boxed future. - let fut = msg.call_once(model, scheduler, self.future_box.take().unwrap()); + let fut = msg.call_once(model, context, self.future_box.take().unwrap()); // Now that `msg` was consumed and its slot in the queue was // freed, signal to one awaiting sender that one slot is @@ -188,7 +187,7 @@ impl Sender { where F: for<'a> FnOnce( &'a mut M, - &'a Scheduler, + &'a Context, RecycleBox<()>, ) -> RecycleBox + Send + 'a> + Send @@ -311,7 +310,7 @@ impl fmt::Debug for Sender { } /// A closure that can be called once to create a future boxed in a `RecycleBox` -/// from an `&mut M`, a `&Scheduler` and an empty `RecycleBox`. +/// from an `&mut M`, a `&Context` and an empty `RecycleBox`. /// /// This is basically a workaround to emulate an `FnOnce` with the equivalent of /// an `FnMut` so that it is possible to call it as a `dyn` trait stored in a @@ -327,7 +326,7 @@ trait MessageFn: Send { fn call_once<'a>( &mut self, model: &'a mut M, - scheduler: &'a Scheduler, + context: &'a Context, recycle_box: RecycleBox<()>, ) -> RecycleBox + Send + 'a>; } @@ -349,7 +348,7 @@ impl MessageFn for MessageFnOnce where F: for<'a> FnOnce( &'a mut M, - &'a Scheduler, + &'a Context, RecycleBox<()>, ) -> RecycleBox + Send + 'a> + Send, @@ -357,12 +356,12 @@ where fn call_once<'a>( &mut self, model: &'a mut M, - scheduler: &'a Scheduler, + context: &'a Context, recycle_box: RecycleBox<()>, ) -> RecycleBox + Send + 'a> { let closure = self.msg_fn.take().unwrap(); - (closure)(model, scheduler, recycle_box) + (closure)(model, context, recycle_box) } } diff --git a/asynchronix/src/lib.rs b/asynchronix/src/lib.rs index aeaefad..c9c7c76 100644 --- a/asynchronix/src/lib.rs +++ b/asynchronix/src/lib.rs @@ -45,7 +45,7 @@ //! * _input ports_, which are synchronous or asynchronous methods that //! implement the [`InputFn`](ports::InputFn) trait and take an `&mut self` //! argument, a message argument, and an optional -//! [`&Scheduler`](time::Scheduler) argument, +//! [`&Context`](model::Context) argument, //! * _replier ports_, which are similar to input ports but implement the //! [`ReplierFn`](ports::ReplierFn) trait and return a reply. //! @@ -54,12 +54,17 @@ //! are referred to as *requests* and *replies*. //! //! Models must implement the [`Model`](model::Model) trait. The main purpose of -//! this trait is to allow models to specify an `init()` method that is -//! guaranteed to run once and only once when the simulation is initialized, -//! _i.e._ after all models have been connected but before the simulation -//! starts. The `init()` method has a default implementation, so models that do -//! not require initialization can simply implement the trait with a one-liner -//! such as `impl Model for MyModel {}`. +//! this trait is to allow models to specify +//! * a `setup()` method that is called once during model addtion to simulation, +//! this method allows e.g. creation and interconnection of submodels inside +//! the model, +//! * an `init()` method that is guaranteed to run once and only once when the +//! simulation is initialized, _i.e._ after all models have been connected but +//! before the simulation starts. +//! +//! The `setup()` and `init()` methods have default implementations, so models +//! that do not require setup and initialization can simply implement the trait +//! with a one-liner such as `impl Model for MyModel {}`. //! //! #### A simple model //! @@ -93,29 +98,28 @@ //! impl Model for Multiplier {} //! ``` //! -//! #### A model using the local scheduler +//! #### A model using the local context //! //! Models frequently need to schedule actions at a future time or simply get //! access to the current simulation time. To do so, input and replier methods -//! can take an optional argument that gives them access to a local scheduler. +//! can take an optional argument that gives them access to a local context. //! -//! To show how the local scheduler can be used in practice, let us implement +//! To show how the local context can be used in practice, let us implement //! `Delay`, a model which simply forwards its input unmodified after a 1s //! delay: //! //! ``` //! use std::time::Duration; -//! use asynchronix::model::Model; +//! use asynchronix::model::{Context, Model}; //! use asynchronix::ports::Output; -//! use asynchronix::time::Scheduler; //! //! #[derive(Default)] //! pub struct Delay { //! pub output: Output, //! } //! impl Delay { -//! pub fn input(&mut self, value: f64, scheduler: &Scheduler) { -//! scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap(); +//! pub fn input(&mut self, value: f64, context: &Context) { +//! context.schedule_event(Duration::from_secs(1), Self::send, value).unwrap(); //! } //! //! async fn send(&mut self, value: f64) { @@ -137,7 +141,7 @@ //! [`Address`](simulation::Mailbox)es pointing to that mailbox. //! //! Addresses are used among others to connect models: each output or requestor -//! ports has a `connect()` method that takes as argument a function pointer to +//! port has a `connect()` method that takes as argument a function pointer to //! the corresponding input or replier port method and the address of the //! targeted model. //! @@ -168,9 +172,8 @@ //! ``` //! # mod models { //! # use std::time::Duration; -//! # use asynchronix::model::Model; +//! # use asynchronix::model::{Context, Model}; //! # use asynchronix::ports::Output; -//! # use asynchronix::time::Scheduler; //! # #[derive(Default)] //! # pub struct Multiplier { //! # pub output: Output, @@ -186,8 +189,8 @@ //! # pub output: Output, //! # } //! # impl Delay { -//! # pub fn input(&mut self, value: f64, scheduler: &Scheduler) { -//! # scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap(); +//! # pub fn input(&mut self, value: f64, context: &Context) { +//! # context.schedule_event(Duration::from_secs(1), Self::send, value).unwrap(); //! # } //! # async fn send(&mut self, value: f64) { // this method can be private //! # self.output.send(value).await; @@ -268,9 +271,8 @@ //! ``` //! # mod models { //! # use std::time::Duration; -//! # use asynchronix::model::Model; +//! # use asynchronix::model::{Context, Model}; //! # use asynchronix::ports::Output; -//! # use asynchronix::time::Scheduler; //! # #[derive(Default)] //! # pub struct Multiplier { //! # pub output: Output, @@ -286,8 +288,8 @@ //! # pub output: Output, //! # } //! # impl Delay { -//! # pub fn input(&mut self, value: f64, scheduler: &Scheduler) { -//! # scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap(); +//! # pub fn input(&mut self, value: f64, context: &Context) { +//! # context.schedule_event(Duration::from_secs(1), Self::send, value).unwrap(); //! # } //! # async fn send(&mut self, value: f64) { // this method can be private //! # self.output.send(value).await; @@ -395,15 +397,14 @@ //! //! * the [`model`] module provides more details about the signatures of input //! and replier port methods and discusses model initialization in the -//! documentation of [`model::Model`], +//! documentation of [`model::Model`] and self-scheduling methods as well as +//! scheduling cancellation in the documentation of [`model::Context`], //! * the [`simulation`] module discusses how the capacity of mailboxes may //! affect the simulation, how connections can be modified after the //! simulation was instantiated, and which pathological situations can lead to //! a deadlock, -//! * the [`time`] module discusses in particular self-scheduling methods and -//! scheduling cancellation in the documentation of [`time::Scheduler`] while -//! the monotonic timestamp format used for simulations is documented in -//! [`time::MonotonicTime`]. +//! * the [`time`] module discusses in particular the monotonic timestamp format +//! used for simulations ([`time::MonotonicTime`]). #![warn(missing_docs, missing_debug_implementations, unreachable_pub)] pub(crate) mod channel; diff --git a/asynchronix/src/model.rs b/asynchronix/src/model.rs index 4fecf87..cda239c 100644 --- a/asynchronix/src/model.rs +++ b/asynchronix/src/model.rs @@ -2,16 +2,19 @@ //! //! # Model trait //! -//! Every model must implement the [`Model`] trait. This trait defines an -//! asynchronous initialization method, [`Model::init()`], which main purpose is -//! to enable models to perform specific actions only once all models have been -//! connected and migrated to the simulation, but before the simulation actually -//! starts. +//! Every model must implement the [`Model`] trait. This trait defines +//! * a setup method, [`Model::setup()`], which main purpose is to create, +//! connect and add to the simulation bench submodels and perform other setup +//! steps, +//! * an asynchronous initialization method, [`Model::init()`], which main +//! purpose is to enable models to perform specific actions only once all +//! models have been connected and migrated to the simulation, but before the +//! simulation actually starts. //! //! #### Examples //! -//! A model that does not require initialization can simply use the default -//! implementation of the `Model` trait: +//! A model that does not require setup and initialization can simply use the +//! default implementation of the `Model` trait: //! //! ``` //! use asynchronix::model::Model; @@ -22,28 +25,31 @@ //! impl Model for MyModel {} //! ``` //! -//! Otherwise, a custom `init()` method can be implemented: +//! Otherwise, custom `setup()` or `init()` methods can be implemented: //! //! ``` //! use std::future::Future; //! use std::pin::Pin; //! -//! use asynchronix::model::{InitializedModel, Model}; -//! use asynchronix::time::Scheduler; +//! use asynchronix::model::{Context, InitializedModel, Model, SetupContext}; //! //! pub struct MyModel { //! // ... //! } //! impl Model for MyModel { -//! fn init( -//! mut self, -//! scheduler: &Scheduler -//! ) -> Pin> + Send + '_>>{ -//! Box::pin(async move { -//! println!("...initialization..."); +//! fn setup( +//! &mut self, +//! setup_context: &SetupContext) { +//! println!("...setup..."); +//! } //! -//! self.into() -//! }) +//! async fn init( +//! mut self, +//! context: &Context +//! ) -> InitializedModel { +//! println!("...initialization..."); +//! +//! self.into() //! } //! } //! ``` @@ -103,17 +109,17 @@ //! ```ignore //! fn(&mut self) // argument elided, implies `T=()` //! fn(&mut self, T) -//! fn(&mut self, T, &Scheduler) +//! fn(&mut self, T, &Context) //! async fn(&mut self) // argument elided, implies `T=()` //! async fn(&mut self, T) -//! async fn(&mut self, T, &Scheduler) +//! async fn(&mut self, T, &Context) //! where //! Self: Model, //! T: Clone + Send + 'static, //! R: Send + 'static, //! ``` //! -//! The scheduler argument is useful for methods that need access to the +//! The context argument is useful for methods that need access to the //! simulation time or that need to schedule an action at a future date. //! //! A replier port for a request of type `T` with a reply of type `R` may in @@ -123,7 +129,7 @@ //! ```ignore //! async fn(&mut self) -> R // argument elided, implies `T=()` //! async fn(&mut self, T) -> R -//! async fn(&mut self, T, &Scheduler) -> R +//! async fn(&mut self, T, &Context) -> R //! where //! Self: Model, //! T: Clone + Send + 'static, @@ -134,7 +140,7 @@ //! can be connected to input and requestor ports when assembling the simulation //! bench. However, input ports may instead be defined as private methods if //! they are only used by the model itself to schedule future actions (see the -//! [`Scheduler`] examples). +//! [`Context`] examples). //! //! Changing the signature of an input or replier port is not considered to //! alter the public interface of a model provided that the event, request and @@ -143,17 +149,16 @@ //! #### Example //! //! ``` -//! use asynchronix::model::Model; -//! use asynchronix::time::Scheduler; +//! use asynchronix::model::{Context, Model}; //! //! pub struct MyModel { //! // ... //! } //! impl MyModel { -//! pub fn my_input(&mut self, input: String, scheduler: &Scheduler) { +//! pub fn my_input(&mut self, input: String, context: &Context) { //! // ... //! } -//! pub async fn my_replier(&mut self, request: u32) -> bool { // scheduler argument elided +//! pub async fn my_replier(&mut self, request: u32) -> bool { // context argument elided //! // ... //! # unimplemented!() //! } @@ -163,14 +168,19 @@ //! use std::future::Future; -use std::pin::Pin; -use crate::time::Scheduler; +pub use context::{Context, SetupContext}; + +mod context; /// Trait to be implemented by all models. /// -/// This trait enables models to perform specific actions in the -/// [`Model::init()`] method only once all models have been connected and +/// This trait enables models to perform specific actions during setup and +/// initialization. The [`Model::setup()`] method is run only once when models +/// are being added to the simulation bench. This method allows in particular +/// sub-models to be created, connected and added to the simulation. +/// +/// The [`Model::init()`] method is run only once all models have been connected and /// migrated to the simulation bench, but before the simulation actually starts. /// A common use for `init` is to send messages to connected models at the /// beginning of the simulation. @@ -179,6 +189,37 @@ use crate::time::Scheduler; /// to prevent an already initialized model from being added to the simulation /// bench. pub trait Model: Sized + Send + 'static { + /// Performs model setup. + /// + /// This method is executed exactly once for all models of the simulation + /// when the [`SimInit::add_model()`](crate::simulation::SimInit::add_model) + /// method is called. + /// + /// The default implementation does nothing. + /// + /// # Examples + /// + /// ``` + /// use std::future::Future; + /// use std::pin::Pin; + /// + /// use asynchronix::model::{InitializedModel, Model, SetupContext}; + /// + /// pub struct MyModel { + /// // ... + /// } + /// + /// impl Model for MyModel { + /// fn setup( + /// &mut self, + /// setup_context: &SetupContext + /// ) { + /// println!("...setup..."); + /// } + /// } + /// ``` + fn setup(&mut self, _: &SetupContext) {} + /// Performs asynchronous model initialization. /// /// This asynchronous method is executed exactly once for all models of the @@ -188,47 +229,31 @@ pub trait Model: Sized + Send + 'static { /// The default implementation simply converts the model to an /// `InitializedModel` without any side effect. /// - /// *Note*: it is currently necessary to box the returned future; this - /// limitation will be lifted once Rust supports `async` methods in traits. - /// /// # Examples /// /// ``` /// use std::future::Future; /// use std::pin::Pin; /// - /// use asynchronix::model::{InitializedModel, Model}; - /// use asynchronix::time::Scheduler; + /// use asynchronix::model::{Context, InitializedModel, Model}; /// /// pub struct MyModel { /// // ... /// } /// /// impl Model for MyModel { - /// fn init( + /// async fn init( /// self, - /// scheduler: &Scheduler - /// ) -> Pin> + Send + '_>>{ - /// Box::pin(async move { - /// println!("...initialization..."); + /// context: &Context + /// ) -> InitializedModel { + /// println!("...initialization..."); /// - /// self.into() - /// }) + /// self.into() /// } /// } /// ``` - - // Removing the boxing constraint requires the - // `return_position_impl_trait_in_trait` and `async_fn_in_trait` features. - // Tracking issue: . - fn init( - self, - scheduler: &Scheduler, - ) -> Pin> + Send + '_>> { - Box::pin(async move { - let _ = scheduler; // suppress the unused argument warning - self.into() - }) + fn init(self, _: &Context) -> impl Future> + Send { + async { self.into() } } } diff --git a/asynchronix/src/model/context.rs b/asynchronix/src/model/context.rs new file mode 100644 index 0000000..24e0c94 --- /dev/null +++ b/asynchronix/src/model/context.rs @@ -0,0 +1,485 @@ +use std::fmt; +use std::sync::{Arc, Mutex}; +use std::time::Duration; + +use crate::channel::Sender; +use crate::executor::Executor; +use crate::ports::InputFn; +use crate::simulation::{ + self, schedule_event_at_unchecked, schedule_keyed_event_at_unchecked, + schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked, ActionKey, + Deadline, Mailbox, SchedulerQueue, SchedulingError, +}; +use crate::time::{MonotonicTime, TearableAtomicTime}; +use crate::util::sync_cell::SyncCellReader; + +use super::Model; + +/// A local context for models. +/// +/// A `Context` is a handle to the global context associated to a model +/// instance. It can be used by the model to retrieve the simulation time or +/// schedule delayed actions on itself. +/// +/// ### Caveat: self-scheduling `async` methods +/// +/// Due to a current rustc issue, `async` methods that schedule themselves will +/// not compile unless an explicit `Send` bound is added to the returned future. +/// This can be done by replacing the `async` signature with a partially +/// desugared signature such as: +/// +/// ```ignore +/// fn self_scheduling_method<'a>( +/// &'a mut self, +/// arg: MyEventType, +/// context: &'a Context +/// ) -> impl Future + Send + 'a { +/// async move { +/// /* implementation */ +/// } +/// } +/// ``` +/// +/// Self-scheduling methods which are not `async` are not affected by this +/// issue. +/// +/// # Examples +/// +/// A model that sends a greeting after some delay. +/// +/// ``` +/// use std::time::Duration; +/// use asynchronix::model::{Context, Model}; +/// use asynchronix::ports::Output; +/// +/// #[derive(Default)] +/// pub struct DelayedGreeter { +/// msg_out: Output, +/// } +/// +/// impl DelayedGreeter { +/// // Triggers a greeting on the output port after some delay [input port]. +/// pub async fn greet_with_delay(&mut self, delay: Duration, context: &Context) { +/// let time = context.time(); +/// let greeting = format!("Hello, this message was scheduled at: {:?}.", time); +/// +/// if delay.is_zero() { +/// self.msg_out.send(greeting).await; +/// } else { +/// context.schedule_event(delay, Self::send_msg, greeting).unwrap(); +/// } +/// } +/// +/// // Sends a message to the output [private input port]. +/// async fn send_msg(&mut self, msg: String) { +/// self.msg_out.send(msg).await; +/// } +/// } +/// impl Model for DelayedGreeter {} +/// ``` + +// The self-scheduling caveat seems related to this issue: +// https://github.com/rust-lang/rust/issues/78649 +pub struct Context { + sender: Sender, + scheduler_queue: Arc>, + time: SyncCellReader, +} + +impl Context { + /// Creates a new local context. + pub(crate) fn new( + sender: Sender, + scheduler_queue: Arc>, + time: SyncCellReader, + ) -> Self { + Self { + sender, + scheduler_queue, + time, + } + } + + /// Returns the current simulation time. + /// + /// # Examples + /// + /// ``` + /// use asynchronix::model::{Context, Model}; + /// use asynchronix::time::MonotonicTime; + /// + /// fn is_third_millenium(context: &Context) -> bool { + /// let time = context.time(); + /// time >= MonotonicTime::new(978307200, 0).unwrap() + /// && time < MonotonicTime::new(32535216000, 0).unwrap() + /// } + /// ``` + pub fn time(&self) -> MonotonicTime { + self.time.try_read().expect("internal simulation error: could not perform a synchronized read of the simulation time") + } + + /// Schedules an event at a future time. + /// + /// An error is returned if the specified deadline is not in the future of + /// the current simulation time. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// use asynchronix::model::{Context, Model}; + /// + /// // A timer. + /// pub struct Timer {} + /// + /// impl Timer { + /// // Sets an alarm [input port]. + /// pub fn set(&mut self, setting: Duration, context: &Context) { + /// if context.schedule_event(setting, Self::ring, ()).is_err() { + /// println!("The alarm clock can only be set for a future time"); + /// } + /// } + /// + /// // Rings [private input port]. + /// fn ring(&mut self) { + /// println!("Brringggg"); + /// } + /// } + /// + /// impl Model for Timer {} + /// ``` + pub fn schedule_event( + &self, + deadline: impl Deadline, + func: F, + arg: T, + ) -> Result<(), SchedulingError> + where + F: for<'a> InputFn<'a, M, T, S>, + T: Send + Clone + 'static, + S: Send + 'static, + { + let now = self.time(); + let time = deadline.into_time(now); + if now >= time { + return Err(SchedulingError::InvalidScheduledTime); + } + let sender = self.sender.clone(); + schedule_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue); + + Ok(()) + } + + /// Schedules a cancellable event at a future time and returns an action + /// key. + /// + /// An error is returned if the specified deadline is not in the future of + /// the current simulation time. + /// + /// # Examples + /// + /// ``` + /// use asynchronix::model::{Context, Model}; + /// use asynchronix::simulation::ActionKey; + /// use asynchronix::time::MonotonicTime; + /// + /// // An alarm clock that can be cancelled. + /// #[derive(Default)] + /// pub struct CancellableAlarmClock { + /// event_key: Option, + /// } + /// + /// impl CancellableAlarmClock { + /// // Sets an alarm [input port]. + /// pub fn set(&mut self, setting: MonotonicTime, context: &Context) { + /// self.cancel(); + /// match context.schedule_keyed_event(setting, Self::ring, ()) { + /// Ok(event_key) => self.event_key = Some(event_key), + /// Err(_) => println!("The alarm clock can only be set for a future time"), + /// }; + /// } + /// + /// // Cancels the current alarm, if any [input port]. + /// pub fn cancel(&mut self) { + /// self.event_key.take().map(|k| k.cancel()); + /// } + /// + /// // Rings the alarm [private input port]. + /// fn ring(&mut self) { + /// println!("Brringggg!"); + /// } + /// } + /// + /// impl Model for CancellableAlarmClock {} + /// ``` + pub fn schedule_keyed_event( + &self, + deadline: impl Deadline, + func: F, + arg: T, + ) -> Result + where + F: for<'a> InputFn<'a, M, T, S>, + T: Send + Clone + 'static, + S: Send + 'static, + { + let now = self.time(); + let time = deadline.into_time(now); + if now >= time { + return Err(SchedulingError::InvalidScheduledTime); + } + let sender = self.sender.clone(); + let event_key = + schedule_keyed_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue); + + Ok(event_key) + } + + /// Schedules a periodically recurring event at a future time. + /// + /// An error is returned if the specified deadline is not in the future of + /// the current simulation time or if the specified period is null. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// use asynchronix::model::{Context, Model}; + /// use asynchronix::time::MonotonicTime; + /// + /// // An alarm clock beeping at 1Hz. + /// pub struct BeepingAlarmClock {} + /// + /// impl BeepingAlarmClock { + /// // Sets an alarm [input port]. + /// pub fn set(&mut self, setting: MonotonicTime, context: &Context) { + /// if context.schedule_periodic_event( + /// setting, + /// Duration::from_secs(1), // 1Hz = 1/1s + /// Self::beep, + /// () + /// ).is_err() { + /// println!("The alarm clock can only be set for a future time"); + /// } + /// } + /// + /// // Emits a single beep [private input port]. + /// fn beep(&mut self) { + /// println!("Beep!"); + /// } + /// } + /// + /// impl Model for BeepingAlarmClock {} + /// ``` + pub fn schedule_periodic_event( + &self, + deadline: impl Deadline, + period: Duration, + func: F, + arg: T, + ) -> Result<(), SchedulingError> + where + F: for<'a> InputFn<'a, M, T, S> + Clone, + T: Send + Clone + 'static, + S: Send + 'static, + { + let now = self.time(); + let time = deadline.into_time(now); + if now >= time { + return Err(SchedulingError::InvalidScheduledTime); + } + if period.is_zero() { + return Err(SchedulingError::NullRepetitionPeriod); + } + let sender = self.sender.clone(); + schedule_periodic_event_at_unchecked( + time, + period, + func, + arg, + sender, + &self.scheduler_queue, + ); + + Ok(()) + } + + /// Schedules a cancellable, periodically recurring event at a future time + /// and returns an action key. + /// + /// An error is returned if the specified deadline is not in the future of + /// the current simulation time or if the specified period is null. + /// + /// # Examples + /// + /// ``` + /// use std::time::Duration; + /// + /// use asynchronix::model::{Context, Model}; + /// use asynchronix::simulation::ActionKey; + /// use asynchronix::time::MonotonicTime; + /// + /// // An alarm clock beeping at 1Hz that can be cancelled before it sets off, or + /// // stopped after it sets off. + /// #[derive(Default)] + /// pub struct CancellableBeepingAlarmClock { + /// event_key: Option, + /// } + /// + /// impl CancellableBeepingAlarmClock { + /// // Sets an alarm [input port]. + /// pub fn set(&mut self, setting: MonotonicTime, context: &Context) { + /// self.cancel(); + /// match context.schedule_keyed_periodic_event( + /// setting, + /// Duration::from_secs(1), // 1Hz = 1/1s + /// Self::beep, + /// () + /// ) { + /// Ok(event_key) => self.event_key = Some(event_key), + /// Err(_) => println!("The alarm clock can only be set for a future time"), + /// }; + /// } + /// + /// // Cancels or stops the alarm [input port]. + /// pub fn cancel(&mut self) { + /// self.event_key.take().map(|k| k.cancel()); + /// } + /// + /// // Emits a single beep [private input port]. + /// fn beep(&mut self) { + /// println!("Beep!"); + /// } + /// } + /// + /// impl Model for CancellableBeepingAlarmClock {} + /// ``` + pub fn schedule_keyed_periodic_event( + &self, + deadline: impl Deadline, + period: Duration, + func: F, + arg: T, + ) -> Result + where + F: for<'a> InputFn<'a, M, T, S> + Clone, + T: Send + Clone + 'static, + S: Send + 'static, + { + let now = self.time(); + let time = deadline.into_time(now); + if now >= time { + return Err(SchedulingError::InvalidScheduledTime); + } + if period.is_zero() { + return Err(SchedulingError::NullRepetitionPeriod); + } + let sender = self.sender.clone(); + let event_key = schedule_periodic_keyed_event_at_unchecked( + time, + period, + func, + arg, + sender, + &self.scheduler_queue, + ); + + Ok(event_key) + } +} + +impl fmt::Debug for Context { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("Context").finish_non_exhaustive() + } +} + +/// A setup context for models. +/// +/// A `SetupContext` can be used by models during the setup stage to +/// create submodels and add them to the simulation bench. +/// +/// # Examples +/// +/// A model that contains two connected submodels. +/// +/// ``` +/// use std::time::Duration; +/// use asynchronix::model::{Model, SetupContext}; +/// use asynchronix::ports::Output; +/// use asynchronix::simulation::Mailbox; +/// +/// #[derive(Default)] +/// pub struct SubmodelA { +/// out: Output, +/// } +/// +/// impl Model for SubmodelA {} +/// +/// #[derive(Default)] +/// pub struct SubmodelB {} +/// +/// impl SubmodelB { +/// pub async fn input(&mut self, value: u32) { +/// println!("Received {}", value); +/// } +/// } +/// +/// impl Model for SubmodelB {} +/// +/// #[derive(Default)] +/// pub struct Parent {} +/// +/// impl Model for Parent { +/// fn setup( +/// &mut self, +/// setup_context: &SetupContext) { +/// let mut a = SubmodelA::default(); +/// let b = SubmodelB::default(); +/// let a_mbox = Mailbox::new(); +/// let b_mbox = Mailbox::new(); +/// +/// a.out.connect(SubmodelB::input, &b_mbox); +/// +/// setup_context.add_model(a, a_mbox); +/// setup_context.add_model(b, b_mbox); +/// } +/// } +/// +/// ``` + +#[derive(Debug)] +pub struct SetupContext<'a, M: Model> { + /// Mailbox of the model. + pub mailbox: &'a Mailbox, + context: &'a Context, + executor: &'a Executor, +} + +impl<'a, M: Model> SetupContext<'a, M> { + /// Creates a new local context. + pub(crate) fn new( + mailbox: &'a Mailbox, + context: &'a Context, + executor: &'a Executor, + ) -> Self { + Self { + mailbox, + context, + executor, + } + } + + /// Adds a new model and its mailbox to the simulation bench. + pub fn add_model(&self, model: N, mailbox: Mailbox) { + simulation::add_model( + model, + mailbox, + self.context.scheduler_queue.clone(), + self.context.time.clone(), + self.executor, + ); + } +} diff --git a/asynchronix/src/ports/input/markers.rs b/asynchronix/src/ports/input/markers.rs index d502ca4..44b7f2a 100644 --- a/asynchronix/src/ports/input/markers.rs +++ b/asynchronix/src/ports/input/markers.rs @@ -6,14 +6,14 @@ pub struct WithoutArguments {} /// Marker type for regular simulation model methods that take a mutable -/// reference to the model and a message, without scheduler argument. +/// reference to the model and a message, without context argument. #[derive(Debug)] -pub struct WithoutScheduler {} +pub struct WithoutContext {} /// Marker type for regular simulation model methods that take a mutable -/// reference to the model, a message and an explicit scheduler argument. +/// reference to the model, a message and an explicit context argument. #[derive(Debug)] -pub struct WithScheduler {} +pub struct WithContext {} /// Marker type for asynchronous simulation model methods that take a mutable /// reference to the model, without any other argument. @@ -21,11 +21,11 @@ pub struct WithScheduler {} pub struct AsyncWithoutArguments {} /// Marker type for asynchronous simulation model methods that take a mutable -/// reference to the model and a message, without scheduler argument. +/// reference to the model and a message, without context argument. #[derive(Debug)] -pub struct AsyncWithoutScheduler {} +pub struct AsyncWithoutContext {} /// Marker type for asynchronous simulation model methods that take a mutable -/// reference to the model, a message and an explicit scheduler argument. +/// reference to the model, a message and an explicit context argument. #[derive(Debug)] -pub struct AsyncWithScheduler {} +pub struct AsyncWithContext {} diff --git a/asynchronix/src/ports/input/model_fn.rs b/asynchronix/src/ports/input/model_fn.rs index 5ace206..d9668e5 100644 --- a/asynchronix/src/ports/input/model_fn.rs +++ b/asynchronix/src/ports/input/model_fn.rs @@ -2,8 +2,7 @@ use std::future::{ready, Future, Ready}; -use crate::model::Model; -use crate::time::Scheduler; +use crate::model::{Context, Model}; use super::markers; @@ -15,9 +14,9 @@ use super::markers; /// /// ```ignore /// FnOnce(&mut M, T) -/// FnOnce(&mut M, T, &Scheduler) +/// FnOnce(&mut M, T, &Context) /// async fn(&mut M, T) -/// async fn(&mut M, T, &Scheduler) +/// async fn(&mut M, T, &Context) /// where /// M: Model /// ``` @@ -35,7 +34,7 @@ pub trait InputFn<'a, M: Model, T, S>: Send + 'static { type Future: Future + Send + 'a; /// Calls the method. - fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler) -> Self::Future; + fn call(self, model: &'a mut M, arg: T, context: &'a Context) -> Self::Future; } impl<'a, M, F> InputFn<'a, M, (), markers::WithoutArguments> for F @@ -45,36 +44,36 @@ where { type Future = Ready<()>; - fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler) -> Self::Future { + fn call(self, model: &'a mut M, _arg: (), _context: &'a Context) -> Self::Future { self(model); ready(()) } } -impl<'a, M, T, F> InputFn<'a, M, T, markers::WithoutScheduler> for F +impl<'a, M, T, F> InputFn<'a, M, T, markers::WithoutContext> for F where M: Model, F: FnOnce(&'a mut M, T) + Send + 'static, { type Future = Ready<()>; - fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler) -> Self::Future { + fn call(self, model: &'a mut M, arg: T, _context: &'a Context) -> Self::Future { self(model, arg); ready(()) } } -impl<'a, M, T, F> InputFn<'a, M, T, markers::WithScheduler> for F +impl<'a, M, T, F> InputFn<'a, M, T, markers::WithContext> for F where M: Model, - F: FnOnce(&'a mut M, T, &'a Scheduler) + Send + 'static, + F: FnOnce(&'a mut M, T, &'a Context) + Send + 'static, { type Future = Ready<()>; - fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler) -> Self::Future { - self(model, arg, scheduler); + fn call(self, model: &'a mut M, arg: T, context: &'a Context) -> Self::Future { + self(model, arg, context); ready(()) } @@ -88,12 +87,12 @@ where { type Future = Fut; - fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler) -> Self::Future { + fn call(self, model: &'a mut M, _arg: (), _context: &'a Context) -> Self::Future { self(model) } } -impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithoutScheduler> for F +impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithoutContext> for F where M: Model, Fut: Future + Send + 'a, @@ -101,21 +100,21 @@ where { type Future = Fut; - fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler) -> Self::Future { + fn call(self, model: &'a mut M, arg: T, _context: &'a Context) -> Self::Future { self(model, arg) } } -impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithScheduler> for F +impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithContext> for F where M: Model, Fut: Future + Send + 'a, - F: FnOnce(&'a mut M, T, &'a Scheduler) -> Fut + Send + 'static, + F: FnOnce(&'a mut M, T, &'a Context) -> Fut + Send + 'static, { type Future = Fut; - fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler) -> Self::Future { - self(model, arg, scheduler) + fn call(self, model: &'a mut M, arg: T, context: &'a Context) -> Self::Future { + self(model, arg, context) } } @@ -127,7 +126,7 @@ where /// /// ```ignore /// async fn(&mut M, T) -> R -/// async fn(&mut M, T, &Scheduler) -> R +/// async fn(&mut M, T, &Context) -> R /// where /// M: Model /// ``` @@ -144,7 +143,7 @@ pub trait ReplierFn<'a, M: Model, T, R, S>: Send + 'static { type Future: Future + Send + 'a; /// Calls the method. - fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler) -> Self::Future; + fn call(self, model: &'a mut M, arg: T, context: &'a Context) -> Self::Future; } impl<'a, M, R, Fut, F> ReplierFn<'a, M, (), R, markers::AsyncWithoutArguments> for F @@ -155,12 +154,12 @@ where { type Future = Fut; - fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler) -> Self::Future { + fn call(self, model: &'a mut M, _arg: (), _context: &'a Context) -> Self::Future { self(model) } } -impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithoutScheduler> for F +impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithoutContext> for F where M: Model, Fut: Future + Send + 'a, @@ -168,20 +167,20 @@ where { type Future = Fut; - fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler) -> Self::Future { + fn call(self, model: &'a mut M, arg: T, _context: &'a Context) -> Self::Future { self(model, arg) } } -impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithScheduler> for F +impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithContext> for F where M: Model, Fut: Future + Send + 'a, - F: FnOnce(&'a mut M, T, &'a Scheduler) -> Fut + Send + 'static, + F: FnOnce(&'a mut M, T, &'a Context) -> Fut + Send + 'static, { type Future = Fut; - fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler) -> Self::Future { - self(model, arg, scheduler) + fn call(self, model: &'a mut M, arg: T, context: &'a Context) -> Self::Future { + self(model, arg, context) } } diff --git a/asynchronix/src/ports/output/broadcaster.rs b/asynchronix/src/ports/output/broadcaster.rs index 2f35417..f269312 100644 --- a/asynchronix/src/ports/output/broadcaster.rs +++ b/asynchronix/src/ports/output/broadcaster.rs @@ -505,7 +505,7 @@ mod tests { use futures_executor::block_on; use crate::channel::Receiver; - use crate::time::Scheduler; + use crate::model::Context; use crate::time::{MonotonicTime, TearableAtomicTime}; use crate::util::priority_queue::PriorityQueue; use crate::util::sync_cell::SyncCell; @@ -563,9 +563,9 @@ mod tests { let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); - let dummy_scheduler = - Scheduler::new(dummy_address, dummy_priority_queue, dummy_time); - block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap(); + let dummy_context = + Context::new(dummy_address, dummy_priority_queue, dummy_time); + block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap(); } }) }) @@ -614,9 +614,9 @@ mod tests { let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); - let dummy_scheduler = - Scheduler::new(dummy_address, dummy_priority_queue, dummy_time); - block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap(); + let dummy_context = + Context::new(dummy_address, dummy_priority_queue, dummy_time); + block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap(); thread::sleep(std::time::Duration::from_millis(100)); } }) diff --git a/asynchronix/src/ports/source.rs b/asynchronix/src/ports/source.rs index 5e745ae..6850005 100644 --- a/asynchronix/src/ports/source.rs +++ b/asynchronix/src/ports/source.rs @@ -8,9 +8,8 @@ use std::time::Duration; use crate::model::Model; use crate::ports::InputFn; use crate::ports::{LineError, LineId}; -use crate::simulation::Address; -use crate::time::{ - Action, ActionKey, KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction, +use crate::simulation::{ + Action, ActionKey, Address, KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction, }; use crate::util::slot; diff --git a/asynchronix/src/ports/source/broadcaster.rs b/asynchronix/src/ports/source/broadcaster.rs index c418b83..d3fb990 100644 --- a/asynchronix/src/ports/source/broadcaster.rs +++ b/asynchronix/src/ports/source/broadcaster.rs @@ -430,7 +430,7 @@ mod tests { use futures_executor::block_on; use crate::channel::Receiver; - use crate::time::Scheduler; + use crate::model::Context; use crate::time::{MonotonicTime, TearableAtomicTime}; use crate::util::priority_queue::PriorityQueue; use crate::util::sync_cell::SyncCell; @@ -488,9 +488,9 @@ mod tests { let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); - let dummy_scheduler = - Scheduler::new(dummy_address, dummy_priority_queue, dummy_time); - block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap(); + let dummy_context = + Context::new(dummy_address, dummy_priority_queue, dummy_time); + block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap(); } }) }) @@ -539,9 +539,9 @@ mod tests { let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); - let dummy_scheduler = - Scheduler::new(dummy_address, dummy_priority_queue, dummy_time); - block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap(); + let dummy_context = + Context::new(dummy_address, dummy_priority_queue, dummy_time); + block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap(); thread::sleep(std::time::Duration::from_millis(100)); } }) diff --git a/asynchronix/src/rpc/codegen/custom_transport.rs b/asynchronix/src/rpc/codegen/custom_transport.rs index 61eac9d..43a91bd 100644 --- a/asynchronix/src/rpc/codegen/custom_transport.rs +++ b/asynchronix/src/rpc/codegen/custom_transport.rs @@ -11,7 +11,10 @@ pub struct ServerError { #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnyRequest { /// Expects exactly 1 variant. - #[prost(oneof = "any_request::Request", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11")] + #[prost( + oneof = "any_request::Request", + tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11" + )] pub request: ::core::option::Option, } /// Nested message and enum types in `AnyRequest`. @@ -48,7 +51,10 @@ pub mod any_request { #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnyReply { /// Contains exactly 1 variant. - #[prost(oneof = "any_reply::Reply", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 100")] + #[prost( + oneof = "any_reply::Reply", + tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 100" + )] pub reply: ::core::option::Option, } /// Nested message and enum types in `AnyReply`. diff --git a/asynchronix/src/rpc/codegen/simulation.rs b/asynchronix/src/rpc/codegen/simulation.rs index 26f7518..abe0073 100644 --- a/asynchronix/src/rpc/codegen/simulation.rs +++ b/asynchronix/src/rpc/codegen/simulation.rs @@ -411,31 +411,19 @@ pub mod simulation_server { async fn schedule_event( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn cancel_event( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn process_event( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn process_query( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; + ) -> std::result::Result, tonic::Status>; async fn read_events( &self, request: tonic::Request, @@ -472,10 +460,7 @@ pub mod simulation_server { max_encoding_message_size: None, } } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService + pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService where F: tonic::service::Interceptor, { @@ -531,21 +516,15 @@ pub mod simulation_server { "/simulation.Simulation/Init" => { #[allow(non_camel_case_types)] struct InitSvc(pub Arc); - impl tonic::server::UnaryService - for InitSvc { + impl tonic::server::UnaryService for InitSvc { type Response = super::InitReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::init(&inner, request).await - }; + let fut = async move { ::init(&inner, request).await }; Box::pin(fut) } } @@ -575,21 +554,15 @@ pub mod simulation_server { "/simulation.Simulation/Time" => { #[allow(non_camel_case_types)] struct TimeSvc(pub Arc); - impl tonic::server::UnaryService - for TimeSvc { + impl tonic::server::UnaryService for TimeSvc { type Response = super::TimeReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::time(&inner, request).await - }; + let fut = async move { ::time(&inner, request).await }; Box::pin(fut) } } @@ -619,21 +592,15 @@ pub mod simulation_server { "/simulation.Simulation/Step" => { #[allow(non_camel_case_types)] struct StepSvc(pub Arc); - impl tonic::server::UnaryService - for StepSvc { + impl tonic::server::UnaryService for StepSvc { type Response = super::StepReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::step(&inner, request).await - }; + let fut = async move { ::step(&inner, request).await }; Box::pin(fut) } } @@ -663,23 +630,16 @@ pub mod simulation_server { "/simulation.Simulation/StepUntil" => { #[allow(non_camel_case_types)] struct StepUntilSvc(pub Arc); - impl< - T: Simulation, - > tonic::server::UnaryService - for StepUntilSvc { + impl tonic::server::UnaryService for StepUntilSvc { type Response = super::StepUntilReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::step_until(&inner, request).await - }; + let fut = + async move { ::step_until(&inner, request).await }; Box::pin(fut) } } @@ -709,15 +669,11 @@ pub mod simulation_server { "/simulation.Simulation/ScheduleEvent" => { #[allow(non_camel_case_types)] struct ScheduleEventSvc(pub Arc); - impl< - T: Simulation, - > tonic::server::UnaryService - for ScheduleEventSvc { + impl tonic::server::UnaryService + for ScheduleEventSvc + { type Response = super::ScheduleEventReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -755,15 +711,9 @@ pub mod simulation_server { "/simulation.Simulation/CancelEvent" => { #[allow(non_camel_case_types)] struct CancelEventSvc(pub Arc); - impl< - T: Simulation, - > tonic::server::UnaryService - for CancelEventSvc { + impl tonic::server::UnaryService for CancelEventSvc { type Response = super::CancelEventReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -801,15 +751,9 @@ pub mod simulation_server { "/simulation.Simulation/ProcessEvent" => { #[allow(non_camel_case_types)] struct ProcessEventSvc(pub Arc); - impl< - T: Simulation, - > tonic::server::UnaryService - for ProcessEventSvc { + impl tonic::server::UnaryService for ProcessEventSvc { type Response = super::ProcessEventReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -847,15 +791,9 @@ pub mod simulation_server { "/simulation.Simulation/ProcessQuery" => { #[allow(non_camel_case_types)] struct ProcessQuerySvc(pub Arc); - impl< - T: Simulation, - > tonic::server::UnaryService - for ProcessQuerySvc { + impl tonic::server::UnaryService for ProcessQuerySvc { type Response = super::ProcessQueryReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -893,15 +831,9 @@ pub mod simulation_server { "/simulation.Simulation/ReadEvents" => { #[allow(non_camel_case_types)] struct ReadEventsSvc(pub Arc); - impl< - T: Simulation, - > tonic::server::UnaryService - for ReadEventsSvc { + impl tonic::server::UnaryService for ReadEventsSvc { type Response = super::ReadEventsReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, @@ -939,23 +871,16 @@ pub mod simulation_server { "/simulation.Simulation/OpenSink" => { #[allow(non_camel_case_types)] struct OpenSinkSvc(pub Arc); - impl< - T: Simulation, - > tonic::server::UnaryService - for OpenSinkSvc { + impl tonic::server::UnaryService for OpenSinkSvc { type Response = super::OpenSinkReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::open_sink(&inner, request).await - }; + let fut = + async move { ::open_sink(&inner, request).await }; Box::pin(fut) } } @@ -985,23 +910,16 @@ pub mod simulation_server { "/simulation.Simulation/CloseSink" => { #[allow(non_camel_case_types)] struct CloseSinkSvc(pub Arc); - impl< - T: Simulation, - > tonic::server::UnaryService - for CloseSinkSvc { + impl tonic::server::UnaryService for CloseSinkSvc { type Response = super::CloseSinkReply; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; + type Future = BoxFuture, tonic::Status>; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { - ::close_sink(&inner, request).await - }; + let fut = + async move { ::close_sink(&inner, request).await }; Box::pin(fut) } } @@ -1028,18 +946,14 @@ pub mod simulation_server { }; Box::pin(fut) } - _ => { - Box::pin(async move { - Ok( - http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap(), - ) - }) - } + _ => Box::pin(async move { + Ok(http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap()) + }), } } } diff --git a/asynchronix/src/simulation.rs b/asynchronix/src/simulation.rs index af3dfe9..1d8d5f4 100644 --- a/asynchronix/src/simulation.rs +++ b/asynchronix/src/simulation.rs @@ -98,9 +98,9 @@ //! connects or disconnects a port, such as: //! //! ``` -//! # use asynchronix::model::Model; +//! # use asynchronix::model::{Context, Model}; //! # use asynchronix::ports::Output; -//! # use asynchronix::time::{MonotonicTime, Scheduler}; +//! # use asynchronix::time::MonotonicTime; //! # use asynchronix::simulation::{Mailbox, SimInit}; //! # pub struct ModelA { //! # pub output: Output, @@ -123,9 +123,16 @@ //! ); //! ``` mod mailbox; +mod scheduler; mod sim_init; pub use mailbox::{Address, Mailbox}; +pub(crate) use scheduler::{ + schedule_event_at_unchecked, schedule_keyed_event_at_unchecked, + schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked, + KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction, SchedulerQueue, +}; +pub use scheduler::{Action, ActionKey, Deadline, SchedulingError}; pub use sim_init::SimInit; use std::error::Error; @@ -137,15 +144,12 @@ use std::time::Duration; use recycle_box::{coerce_box, RecycleBox}; use crate::executor::Executor; -use crate::model::Model; +use crate::model::{Context, Model, SetupContext}; use crate::ports::{InputFn, ReplierFn}; -use crate::time::{ - self, Action, ActionKey, Clock, Deadline, MonotonicTime, SchedulerQueue, SchedulingError, - TearableAtomicTime, -}; +use crate::time::{Clock, MonotonicTime, TearableAtomicTime}; use crate::util::seq_futures::SeqFuture; use crate::util::slot; -use crate::util::sync_cell::SyncCell; +use crate::util::sync_cell::{SyncCell, SyncCellReader}; /// Simulation environment. /// @@ -157,9 +161,9 @@ use crate::util::sync_cell::SyncCell; /// A [`Simulation`] object also manages an event scheduling queue and /// simulation time. The scheduling queue can be accessed from the simulation /// itself, but also from models via the optional -/// [`&Scheduler`](time::Scheduler) argument of input and replier port methods. +/// [`&Context`](crate::model::Context) argument of input and replier port methods. /// Likewise, simulation time can be accessed with the [`Simulation::time()`] -/// method, or from models with the [`Scheduler::time()`](time::Scheduler::time) +/// method, or from models with the [`Context::time()`](crate::model::Context::time) /// method. /// /// Events and queries can be scheduled immediately, *i.e.* for the current @@ -177,7 +181,7 @@ use crate::util::sync_cell::SyncCell; /// /// 1. increment simulation time until that of the next scheduled event in /// chronological order, then -/// 2. call [`Clock::synchronize()`](time::Clock::synchronize) which, unless the +/// 2. call [`Clock::synchronize()`](crate::time::Clock::synchronize) which, unless the /// simulation is configured to run as fast as possible, blocks until the /// desired wall clock time, and finally /// 3. run all computations scheduled for the new simulation time. @@ -217,7 +221,7 @@ impl Simulation { /// that event as well as all other event scheduled for the same time. /// /// Processing is gated by a (possibly blocking) call to - /// [`Clock::synchronize()`](time::Clock::synchronize) on the configured + /// [`Clock::synchronize()`](crate::time::Clock::synchronize) on the configured /// simulation clock. This method blocks until all newly processed events /// have completed. pub fn step(&mut self) { @@ -292,7 +296,7 @@ impl Simulation { /// Events scheduled for the same time and targeting the same model are /// guaranteed to be processed according to the scheduling order. /// - /// See also: [`time::Scheduler::schedule_event`]. + /// See also: [`Context::schedule_event`](crate::model::Context::schedule_event). pub fn schedule_event( &mut self, deadline: impl Deadline, @@ -311,8 +315,7 @@ impl Simulation { if now >= time { return Err(SchedulingError::InvalidScheduledTime); } - - time::schedule_event_at_unchecked(time, func, arg, address.into().0, &self.scheduler_queue); + schedule_event_at_unchecked(time, func, arg, address.into().0, &self.scheduler_queue); Ok(()) } @@ -325,7 +328,7 @@ impl Simulation { /// Events scheduled for the same time and targeting the same model are /// guaranteed to be processed according to the scheduling order. /// - /// See also: [`time::Scheduler::schedule_keyed_event`]. + /// See also: [`Context::schedule_keyed_event`](crate::model::Context::schedule_keyed_event). pub fn schedule_keyed_event( &mut self, deadline: impl Deadline, @@ -344,7 +347,7 @@ impl Simulation { if now >= time { return Err(SchedulingError::InvalidScheduledTime); } - let event_key = time::schedule_keyed_event_at_unchecked( + let event_key = schedule_keyed_event_at_unchecked( time, func, arg, @@ -363,7 +366,7 @@ impl Simulation { /// Events scheduled for the same time and targeting the same model are /// guaranteed to be processed according to the scheduling order. /// - /// See also: [`time::Scheduler::schedule_periodic_event`]. + /// See also: [`Context::schedule_periodic_event`](crate::model::Context::schedule_periodic_event). pub fn schedule_periodic_event( &mut self, deadline: impl Deadline, @@ -386,7 +389,7 @@ impl Simulation { if period.is_zero() { return Err(SchedulingError::NullRepetitionPeriod); } - time::schedule_periodic_event_at_unchecked( + schedule_periodic_event_at_unchecked( time, period, func, @@ -407,7 +410,7 @@ impl Simulation { /// Events scheduled for the same time and targeting the same model are /// guaranteed to be processed according to the scheduling order. /// - /// See also: [`time::Scheduler::schedule_keyed_periodic_event`]. + /// See also: [`Context::schedule_keyed_periodic_event`](crate::model::Context::schedule_keyed_periodic_event). pub fn schedule_keyed_periodic_event( &mut self, deadline: impl Deadline, @@ -430,7 +433,7 @@ impl Simulation { if period.is_zero() { return Err(SchedulingError::NullRepetitionPeriod); } - let event_key = time::schedule_periodic_keyed_event_at_unchecked( + let event_key = schedule_periodic_keyed_event_at_unchecked( time, period, func, @@ -658,3 +661,25 @@ impl fmt::Display for QueryError { } impl Error for QueryError {} + +/// Adds a model and its mailbox to the simulation bench. +pub(crate) fn add_model( + mut model: M, + mailbox: Mailbox, + scheduler_queue: Arc>, + time: SyncCellReader, + executor: &Executor, +) { + let sender = mailbox.0.sender(); + + let context = Context::new(sender, scheduler_queue, time); + let setup_context = SetupContext::new(&mailbox, &context, executor); + + model.setup(&setup_context); + + let mut receiver = mailbox.0; + executor.spawn_and_forget(async move { + let mut model = model.init(&context).await.0; + while receiver.recv(&mut model, &context).await.is_ok() {} + }); +} diff --git a/asynchronix/src/time/scheduler.rs b/asynchronix/src/simulation/scheduler.rs similarity index 57% rename from asynchronix/src/time/scheduler.rs rename to asynchronix/src/simulation/scheduler.rs index 740f644..79a4682 100644 --- a/asynchronix/src/time/scheduler.rs +++ b/asynchronix/src/simulation/scheduler.rs @@ -17,9 +17,8 @@ use crate::channel::Sender; use crate::executor::Executor; use crate::model::Model; use crate::ports::InputFn; -use crate::time::{MonotonicTime, TearableAtomicTime}; +use crate::time::MonotonicTime; use crate::util::priority_queue::PriorityQueue; -use crate::util::sync_cell::SyncCellReader; /// Shorthand for the scheduler queue type. @@ -55,387 +54,6 @@ impl Deadline for MonotonicTime { } } -/// A local scheduler for models. -/// -/// A `Scheduler` is a handle to the global scheduler associated to a model -/// instance. It can be used by the model to retrieve the simulation time or -/// schedule delayed actions on itself. -/// -/// ### Caveat: self-scheduling `async` methods -/// -/// Due to a current rustc issue, `async` methods that schedule themselves will -/// not compile unless an explicit `Send` bound is added to the returned future. -/// This can be done by replacing the `async` signature with a partially -/// desugared signature such as: -/// -/// ```ignore -/// fn self_scheduling_method<'a>( -/// &'a mut self, -/// arg: MyEventType, -/// scheduler: &'a Scheduler -/// ) -> impl Future + Send + 'a { -/// async move { -/// /* implementation */ -/// } -/// } -/// ``` -/// -/// Self-scheduling methods which are not `async` are not affected by this -/// issue. -/// -/// # Examples -/// -/// A model that sends a greeting after some delay. -/// -/// ``` -/// use std::time::Duration; -/// use asynchronix::model::Model; -/// use asynchronix::ports::Output; -/// use asynchronix::time::Scheduler; -/// -/// #[derive(Default)] -/// pub struct DelayedGreeter { -/// msg_out: Output, -/// } -/// -/// impl DelayedGreeter { -/// // Triggers a greeting on the output port after some delay [input port]. -/// pub async fn greet_with_delay(&mut self, delay: Duration, scheduler: &Scheduler) { -/// let time = scheduler.time(); -/// let greeting = format!("Hello, this message was scheduled at: {:?}.", time); -/// -/// if delay.is_zero() { -/// self.msg_out.send(greeting).await; -/// } else { -/// scheduler.schedule_event(delay, Self::send_msg, greeting).unwrap(); -/// } -/// } -/// -/// // Sends a message to the output [private input port]. -/// async fn send_msg(&mut self, msg: String) { -/// self.msg_out.send(msg).await; -/// } -/// } -/// impl Model for DelayedGreeter {} -/// ``` - -// The self-scheduling caveat seems related to this issue: -// https://github.com/rust-lang/rust/issues/78649 -pub struct Scheduler { - sender: Sender, - scheduler_queue: Arc>, - time: SyncCellReader, -} - -impl Scheduler { - /// Creates a new local scheduler. - pub(crate) fn new( - sender: Sender, - scheduler_queue: Arc>, - time: SyncCellReader, - ) -> Self { - Self { - sender, - scheduler_queue, - time, - } - } - - /// Returns the current simulation time. - /// - /// # Examples - /// - /// ``` - /// use asynchronix::model::Model; - /// use asynchronix::time::{MonotonicTime, Scheduler}; - /// - /// fn is_third_millenium(scheduler: &Scheduler) -> bool { - /// let time = scheduler.time(); - /// time >= MonotonicTime::new(978307200, 0).unwrap() - /// && time < MonotonicTime::new(32535216000, 0).unwrap() - /// } - /// ``` - pub fn time(&self) -> MonotonicTime { - self.time.try_read().expect("internal simulation error: could not perform a synchronized read of the simulation time") - } - - /// Schedules an event at a future time. - /// - /// An error is returned if the specified deadline is not in the future of - /// the current simulation time. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// - /// use asynchronix::model::Model; - /// use asynchronix::time::Scheduler; - /// - /// // A timer. - /// pub struct Timer {} - /// - /// impl Timer { - /// // Sets an alarm [input port]. - /// pub fn set(&mut self, setting: Duration, scheduler: &Scheduler) { - /// if scheduler.schedule_event(setting, Self::ring, ()).is_err() { - /// println!("The alarm clock can only be set for a future time"); - /// } - /// } - /// - /// // Rings [private input port]. - /// fn ring(&mut self) { - /// println!("Brringggg"); - /// } - /// } - /// - /// impl Model for Timer {} - /// ``` - pub fn schedule_event( - &self, - deadline: impl Deadline, - func: F, - arg: T, - ) -> Result<(), SchedulingError> - where - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, - S: Send + 'static, - { - let now = self.time(); - let time = deadline.into_time(now); - if now >= time { - return Err(SchedulingError::InvalidScheduledTime); - } - let sender = self.sender.clone(); - schedule_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue); - - Ok(()) - } - - /// Schedules a cancellable event at a future time and returns an action - /// key. - /// - /// An error is returned if the specified deadline is not in the future of - /// the current simulation time. - /// - /// # Examples - /// - /// ``` - /// use asynchronix::model::Model; - /// use asynchronix::time::{ActionKey, MonotonicTime, Scheduler}; - /// - /// // An alarm clock that can be cancelled. - /// #[derive(Default)] - /// pub struct CancellableAlarmClock { - /// event_key: Option, - /// } - /// - /// impl CancellableAlarmClock { - /// // Sets an alarm [input port]. - /// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler) { - /// self.cancel(); - /// match scheduler.schedule_keyed_event(setting, Self::ring, ()) { - /// Ok(event_key) => self.event_key = Some(event_key), - /// Err(_) => println!("The alarm clock can only be set for a future time"), - /// }; - /// } - /// - /// // Cancels the current alarm, if any [input port]. - /// pub fn cancel(&mut self) { - /// self.event_key.take().map(|k| k.cancel()); - /// } - /// - /// // Rings the alarm [private input port]. - /// fn ring(&mut self) { - /// println!("Brringggg!"); - /// } - /// } - /// - /// impl Model for CancellableAlarmClock {} - /// ``` - pub fn schedule_keyed_event( - &self, - deadline: impl Deadline, - func: F, - arg: T, - ) -> Result - where - F: for<'a> InputFn<'a, M, T, S>, - T: Send + Clone + 'static, - S: Send + 'static, - { - let now = self.time(); - let time = deadline.into_time(now); - if now >= time { - return Err(SchedulingError::InvalidScheduledTime); - } - let sender = self.sender.clone(); - let event_key = - schedule_keyed_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue); - - Ok(event_key) - } - - /// Schedules a periodically recurring event at a future time. - /// - /// An error is returned if the specified deadline is not in the future of - /// the current simulation time or if the specified period is null. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// - /// use asynchronix::model::Model; - /// use asynchronix::time::{MonotonicTime, Scheduler}; - /// - /// // An alarm clock beeping at 1Hz. - /// pub struct BeepingAlarmClock {} - /// - /// impl BeepingAlarmClock { - /// // Sets an alarm [input port]. - /// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler) { - /// if scheduler.schedule_periodic_event( - /// setting, - /// Duration::from_secs(1), // 1Hz = 1/1s - /// Self::beep, - /// () - /// ).is_err() { - /// println!("The alarm clock can only be set for a future time"); - /// } - /// } - /// - /// // Emits a single beep [private input port]. - /// fn beep(&mut self) { - /// println!("Beep!"); - /// } - /// } - /// - /// impl Model for BeepingAlarmClock {} - /// ``` - pub fn schedule_periodic_event( - &self, - deadline: impl Deadline, - period: Duration, - func: F, - arg: T, - ) -> Result<(), SchedulingError> - where - F: for<'a> InputFn<'a, M, T, S> + Clone, - T: Send + Clone + 'static, - S: Send + 'static, - { - let now = self.time(); - let time = deadline.into_time(now); - if now >= time { - return Err(SchedulingError::InvalidScheduledTime); - } - if period.is_zero() { - return Err(SchedulingError::NullRepetitionPeriod); - } - let sender = self.sender.clone(); - schedule_periodic_event_at_unchecked( - time, - period, - func, - arg, - sender, - &self.scheduler_queue, - ); - - Ok(()) - } - - /// Schedules a cancellable, periodically recurring event at a future time - /// and returns an action key. - /// - /// An error is returned if the specified deadline is not in the future of - /// the current simulation time or if the specified period is null. - /// - /// # Examples - /// - /// ``` - /// use std::time::Duration; - /// - /// use asynchronix::model::Model; - /// use asynchronix::time::{ActionKey, MonotonicTime, Scheduler}; - /// - /// // An alarm clock beeping at 1Hz that can be cancelled before it sets off, or - /// // stopped after it sets off. - /// #[derive(Default)] - /// pub struct CancellableBeepingAlarmClock { - /// event_key: Option, - /// } - /// - /// impl CancellableBeepingAlarmClock { - /// // Sets an alarm [input port]. - /// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler) { - /// self.cancel(); - /// match scheduler.schedule_keyed_periodic_event( - /// setting, - /// Duration::from_secs(1), // 1Hz = 1/1s - /// Self::beep, - /// () - /// ) { - /// Ok(event_key) => self.event_key = Some(event_key), - /// Err(_) => println!("The alarm clock can only be set for a future time"), - /// }; - /// } - /// - /// // Cancels or stops the alarm [input port]. - /// pub fn cancel(&mut self) { - /// self.event_key.take().map(|k| k.cancel()); - /// } - /// - /// // Emits a single beep [private input port]. - /// fn beep(&mut self) { - /// println!("Beep!"); - /// } - /// } - /// - /// impl Model for CancellableBeepingAlarmClock {} - /// ``` - pub fn schedule_keyed_periodic_event( - &self, - deadline: impl Deadline, - period: Duration, - func: F, - arg: T, - ) -> Result - where - F: for<'a> InputFn<'a, M, T, S> + Clone, - T: Send + Clone + 'static, - S: Send + 'static, - { - let now = self.time(); - let time = deadline.into_time(now); - if now >= time { - return Err(SchedulingError::InvalidScheduledTime); - } - if period.is_zero() { - return Err(SchedulingError::NullRepetitionPeriod); - } - let sender = self.sender.clone(); - let event_key = schedule_periodic_keyed_event_at_unchecked( - time, - period, - func, - arg, - sender, - &self.scheduler_queue, - ); - - Ok(event_key) - } -} - -impl fmt::Debug for Scheduler { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("Scheduler").finish_non_exhaustive() - } -} - /// Handle to a scheduled action. /// /// An `ActionKey` can be used to cancel a scheduled action. @@ -468,7 +86,7 @@ impl PartialEq for ActionKey { /// Implements equality by considering clones to be equivalent, rather than /// keys with the same `is_cancelled` value. fn eq(&self, other: &Self) -> bool { - ptr::addr_eq(&*self.is_cancelled, &*other.is_cancelled) + ptr::eq(&*self.is_cancelled, &*other.is_cancelled) } } diff --git a/asynchronix/src/simulation/sim_init.rs b/asynchronix/src/simulation/sim_init.rs index 11c774f..ae22589 100644 --- a/asynchronix/src/simulation/sim_init.rs +++ b/asynchronix/src/simulation/sim_init.rs @@ -3,12 +3,12 @@ use std::sync::{Arc, Mutex}; use crate::executor::Executor; use crate::model::Model; -use crate::time::{Clock, NoClock, Scheduler}; -use crate::time::{MonotonicTime, SchedulerQueue, TearableAtomicTime}; +use crate::time::{Clock, NoClock}; +use crate::time::{MonotonicTime, TearableAtomicTime}; use crate::util::priority_queue::PriorityQueue; use crate::util::sync_cell::SyncCell; -use super::{Mailbox, Simulation}; +use super::{add_model, Mailbox, SchedulerQueue, Simulation}; /// Builder for a multi-threaded, discrete-event simulation. pub struct SimInit { @@ -44,15 +44,8 @@ impl SimInit { pub fn add_model(self, model: M, mailbox: Mailbox) -> Self { let scheduler_queue = self.scheduler_queue.clone(); let time = self.time.reader(); - let mut receiver = mailbox.0; - self.executor.spawn_and_forget(async move { - let sender = receiver.sender(); - let scheduler = Scheduler::new(sender, scheduler_queue, time); - let mut model = model.init(&scheduler).await.0; - - while receiver.recv(&mut model, &scheduler).await.is_ok() {} - }); + add_model(model, mailbox, scheduler_queue, time, &self.executor); self } diff --git a/asynchronix/src/time.rs b/asynchronix/src/time.rs index acc2f50..df5a613 100644 --- a/asynchronix/src/time.rs +++ b/asynchronix/src/time.rs @@ -4,9 +4,7 @@ //! //! * [`MonotonicTime`]: a monotonic timestamp based on the [TAI] time standard, //! * [`Clock`]: a trait for types that can synchronize a simulation, -//! implemented for instance by [`SystemClock`] and [`AutoSystemClock`], -//! * [`Scheduler`]: a model-local handle to the global scheduler that can be -//! used by models to schedule future actions onto themselves. +//! implemented for instance by [`SystemClock`] and [`AutoSystemClock`]. //! //! [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time //! @@ -17,8 +15,8 @@ //! the specified timestamp. //! //! ``` -//! use asynchronix::model::Model; -//! use asynchronix::time::{MonotonicTime, Scheduler}; +//! use asynchronix::model::{Context, Model}; +//! use asynchronix::time::MonotonicTime; //! //! // An alarm clock model. //! pub struct AlarmClock { @@ -32,8 +30,8 @@ //! } //! //! // Sets an alarm [input port]. -//! pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler) { -//! if scheduler.schedule_event(setting, Self::ring, ()).is_err() { +//! pub fn set(&mut self, setting: MonotonicTime, context: &Context) { +//! if context.schedule_event(setting, Self::ring, ()).is_err() { //! println!("The alarm clock can only be set for a future time"); //! } //! } @@ -49,15 +47,8 @@ mod clock; mod monotonic_time; -mod scheduler; pub use tai_time::MonotonicTime; pub use clock::{AutoSystemClock, Clock, NoClock, SyncStatus, SystemClock}; pub(crate) use monotonic_time::TearableAtomicTime; -pub(crate) use scheduler::{ - schedule_event_at_unchecked, schedule_keyed_event_at_unchecked, - schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked, - KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction, SchedulerQueue, -}; -pub use scheduler::{Action, ActionKey, Deadline, Scheduler, SchedulingError}; diff --git a/asynchronix/src/util/sync_cell.rs b/asynchronix/src/util/sync_cell.rs index e4a84e6..01bc8ec 100644 --- a/asynchronix/src/util/sync_cell.rs +++ b/asynchronix/src/util/sync_cell.rs @@ -143,7 +143,6 @@ impl SyncCell { /// A handle to a `SyncCell` that enables synchronized reads from multiple /// threads. -#[derive(Clone)] pub(crate) struct SyncCellReader { inner: Arc>, } @@ -186,6 +185,14 @@ impl SyncCellReader { } } +impl Clone for SyncCellReader { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + /// An error returned when attempting to perform a read operation concurrently /// with a write operation. #[derive(Clone, Copy, Debug, PartialEq, Eq)] diff --git a/asynchronix/tests/model_scheduling.rs b/asynchronix/tests/model_scheduling.rs index 50bdce4..3f4afce 100644 --- a/asynchronix/tests/model_scheduling.rs +++ b/asynchronix/tests/model_scheduling.rs @@ -2,10 +2,10 @@ use std::time::Duration; -use asynchronix::model::Model; +use asynchronix::model::{Context, Model}; use asynchronix::ports::{EventBuffer, Output}; -use asynchronix::simulation::{Mailbox, SimInit}; -use asynchronix::time::{ActionKey, MonotonicTime, Scheduler}; +use asynchronix::simulation::{ActionKey, Mailbox, SimInit}; +use asynchronix::time::MonotonicTime; #[test] fn model_schedule_event() { @@ -14,9 +14,9 @@ fn model_schedule_event() { output: Output<()>, } impl TestModel { - fn trigger(&mut self, _: (), scheduler: &Scheduler) { - scheduler - .schedule_event(scheduler.time() + Duration::from_secs(2), Self::action, ()) + fn trigger(&mut self, _: (), context: &Context) { + context + .schedule_event(context.time() + Duration::from_secs(2), Self::action, ()) .unwrap(); } async fn action(&mut self) { @@ -51,12 +51,12 @@ fn model_cancel_future_keyed_event() { key: Option, } impl TestModel { - fn trigger(&mut self, _: (), scheduler: &Scheduler) { - scheduler - .schedule_event(scheduler.time() + Duration::from_secs(1), Self::action1, ()) + fn trigger(&mut self, _: (), context: &Context) { + context + .schedule_event(context.time() + Duration::from_secs(1), Self::action1, ()) .unwrap(); - self.key = scheduler - .schedule_keyed_event(scheduler.time() + Duration::from_secs(2), Self::action2, ()) + self.key = context + .schedule_keyed_event(context.time() + Duration::from_secs(2), Self::action2, ()) .ok(); } async fn action1(&mut self) { @@ -97,12 +97,12 @@ fn model_cancel_same_time_keyed_event() { key: Option, } impl TestModel { - fn trigger(&mut self, _: (), scheduler: &Scheduler) { - scheduler - .schedule_event(scheduler.time() + Duration::from_secs(2), Self::action1, ()) + fn trigger(&mut self, _: (), context: &Context) { + context + .schedule_event(context.time() + Duration::from_secs(2), Self::action1, ()) .unwrap(); - self.key = scheduler - .schedule_keyed_event(scheduler.time() + Duration::from_secs(2), Self::action2, ()) + self.key = context + .schedule_keyed_event(context.time() + Duration::from_secs(2), Self::action2, ()) .ok(); } async fn action1(&mut self) { @@ -142,10 +142,10 @@ fn model_schedule_periodic_event() { output: Output, } impl TestModel { - fn trigger(&mut self, _: (), scheduler: &Scheduler) { - scheduler + fn trigger(&mut self, _: (), context: &Context) { + context .schedule_periodic_event( - scheduler.time() + Duration::from_secs(2), + context.time() + Duration::from_secs(2), Duration::from_secs(3), Self::action, 42, @@ -190,10 +190,10 @@ fn model_cancel_periodic_event() { key: Option, } impl TestModel { - fn trigger(&mut self, _: (), scheduler: &Scheduler) { - self.key = scheduler + fn trigger(&mut self, _: (), context: &Context) { + self.key = context .schedule_keyed_periodic_event( - scheduler.time() + Duration::from_secs(2), + context.time() + Duration::from_secs(2), Duration::from_secs(3), Self::action, (), diff --git a/asynchronix/tests/simulation_scheduling.rs b/asynchronix/tests/simulation_scheduling.rs index 70956a1..6919091 100644 --- a/asynchronix/tests/simulation_scheduling.rs +++ b/asynchronix/tests/simulation_scheduling.rs @@ -2,6 +2,8 @@ use std::time::Duration; +#[cfg(not(miri))] +use asynchronix::model::Context; use asynchronix::model::Model; use asynchronix::ports::{EventBuffer, Output}; use asynchronix::simulation::{Address, Mailbox, SimInit, Simulation}; @@ -219,21 +221,9 @@ impl TimestampModel { } #[cfg(not(miri))] impl Model for TimestampModel { - fn init( - mut self, - _scheduler: &asynchronix::time::Scheduler, - ) -> std::pin::Pin< - Box< - dyn futures_util::Future> - + Send - + '_, - >, - > { - Box::pin(async { - self.stamp.send((Instant::now(), SystemTime::now())).await; - - self.into() - }) + async fn init(mut self, _: &Context) -> asynchronix::model::InitializedModel { + self.stamp.send((Instant::now(), SystemTime::now())).await; + self.into() } } @@ -267,7 +257,7 @@ fn timestamp_bench( #[test] fn simulation_system_clock_from_instant() { let t0 = MonotonicTime::EPOCH; - const TOLERANCE: f64 = 0.0005; // [s] + const TOLERANCE: f64 = 0.005; // [s] // The reference simulation time is set in the past of t0 so that the // simulation starts in the future when the reference wall clock time is From 9956c4fa3fd97b58df8311307de6a9dee6e61069 Mon Sep 17 00:00:00 2001 From: Serge Barral Date: Fri, 26 Apr 2024 13:26:49 +0200 Subject: [PATCH 03/12] CI: add --all-features and force check on `dev` --- .github/workflows/ci.yml | 14 +++++++------- asynchronix/src/rpc/endpoint_registry.rs | 2 +- asynchronix/src/rpc/key_registry.rs | 3 ++- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index efb6ebc..8c952a0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,7 +3,7 @@ name: CI on: pull_request: push: - branches: [ main ] + branches: [ main, dev ] env: RUSTFLAGS: -Dwarnings @@ -28,7 +28,7 @@ jobs: toolchain: ${{ matrix.rust }} - name: Run cargo check - run: cargo check + run: cargo check --features="rpc grpc-server" test: name: Test suite @@ -41,7 +41,7 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Run cargo test - run: cargo test + run: cargo test --features="rpc grpc-server" loom-dry-run: name: Loom dry run @@ -54,7 +54,7 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Dry-run cargo test (Loom) - run: cargo test --no-run --tests + run: cargo test --no-run --tests --features="rpc grpc-server" env: RUSTFLAGS: --cfg asynchronix_loom @@ -71,7 +71,7 @@ jobs: components: miri - name: Run cargo miri tests - run: cargo miri test --tests --lib + run: cargo miri test --tests --lib --features="rpc grpc-server" env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4 @@ -104,7 +104,7 @@ jobs: run: cargo fmt --all -- --check - name: Run cargo clippy - run: cargo clippy + run: cargo clippy --features="rpc grpc-server" docs: name: Docs @@ -117,4 +117,4 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Run cargo doc - run: cargo doc --no-deps --document-private-items + run: cargo doc --no-deps --features="rpc grpc-server" --document-private-items diff --git a/asynchronix/src/rpc/endpoint_registry.rs b/asynchronix/src/rpc/endpoint_registry.rs index 8a71ffb..9ded8e5 100644 --- a/asynchronix/src/rpc/endpoint_registry.rs +++ b/asynchronix/src/rpc/endpoint_registry.rs @@ -9,7 +9,7 @@ use serde::de::DeserializeOwned; use serde::Serialize; use crate::ports::{EventSinkStream, EventSource, QuerySource, ReplyReceiver}; -use crate::time::{Action, ActionKey}; +use crate::simulation::{Action, ActionKey}; /// A registry that holds all sources and sinks meant to be accessed through /// remote procedure calls. diff --git a/asynchronix/src/rpc/key_registry.rs b/asynchronix/src/rpc/key_registry.rs index fa76e47..0c6678e 100644 --- a/asynchronix/src/rpc/key_registry.rs +++ b/asynchronix/src/rpc/key_registry.rs @@ -1,4 +1,5 @@ -use crate::time::{ActionKey, MonotonicTime}; +use crate::simulation::ActionKey; +use crate::time::MonotonicTime; use crate::util::indexed_priority_queue::{IndexedPriorityQueue, InsertKey}; pub(crate) type KeyRegistryId = InsertKey; From 9b4f69c17e8ae4e66050c68fadf68d23b97fd7d7 Mon Sep 17 00:00:00 2001 From: Serge Barral Date: Fri, 26 Apr 2024 15:15:51 +0200 Subject: [PATCH 04/12] gRPC: small fix + minor changes --- asynchronix/Cargo.toml | 2 +- asynchronix/src/ports/sink.rs | 21 +- asynchronix/src/ports/sink/event_buffer.rs | 4 +- asynchronix/src/rpc/api/simulation.proto | 6 +- asynchronix/src/rpc/codegen.rs | 2 + .../src/rpc/codegen/custom_transport.rs | 10 +- asynchronix/src/rpc/codegen/simulation.rs | 182 +++++++++++++----- asynchronix/src/rpc/endpoint_registry.rs | 12 +- asynchronix/src/rpc/generic_server.rs | 8 +- asynchronix/src/rpc/grpc.rs | 4 +- 10 files changed, 170 insertions(+), 81 deletions(-) diff --git a/asynchronix/Cargo.toml b/asynchronix/Cargo.toml index a91ff21..9533780 100644 --- a/asynchronix/Cargo.toml +++ b/asynchronix/Cargo.toml @@ -58,7 +58,7 @@ rmp-serde = { version = "1.1", optional = true } serde = { version = "1", optional = true } # gRPC dependencies. -tokio = { version = "1.0", optional = true } +tokio = { version = "1.0", features=["net"], optional = true } tonic = { version = "0.11", optional = true } diff --git a/asynchronix/src/ports/sink.rs b/asynchronix/src/ports/sink.rs index 639ed4c..d22d008 100644 --- a/asynchronix/src/ports/sink.rs +++ b/asynchronix/src/ports/sink.rs @@ -33,18 +33,21 @@ pub trait EventSinkStream: Iterator { /// Events that were previously in the stream remain available. fn close(&mut self); - /// This is a stop-gap method that shadows `Iterator::try_fold` until the - /// latter can be implemented by user-defined types on stable Rust. + /// This is a stop-gap method that serves the exact same purpose as + /// `Iterator::try_fold` but is specialized for `Result` rather than the + /// `Try` trait so it can be implemented on stable Rust. /// - /// It serves the exact same purpose as `Iterator::try_fold` but is - /// specialized for `Result` to avoid depending on the unstable `Try` trait. + /// It makes it possible to provide a faster implementation when the event + /// sink stream can be iterated over more rapidly than by repeatably calling + /// `Iterator::next`, for instance if the implementation of the stream + /// relies on a mutex that must be locked on each call. /// - /// Implementors may elect to override the default implementation when the - /// event sink stream can be iterated over more rapidly than by repeatably - /// calling `Iterator::next`, for instance if the implementation of the - /// stream relies on a mutex that must be locked on each call. + /// It is not publicly implementable because it may be removed at any time + /// once the `Try` trait is stabilized, without regard for backward + /// compatibility. #[doc(hidden)] - fn try_fold(&mut self, init: B, f: F) -> Result + #[allow(private_interfaces)] + fn __try_fold(&mut self, init: B, f: F) -> Result where Self: Sized, F: FnMut(B, Self::Item) -> Result, diff --git a/asynchronix/src/ports/sink/event_buffer.rs b/asynchronix/src/ports/sink/event_buffer.rs index 15b89b0..35b25ee 100644 --- a/asynchronix/src/ports/sink/event_buffer.rs +++ b/asynchronix/src/ports/sink/event_buffer.rs @@ -86,7 +86,9 @@ impl EventSinkStream for EventBuffer { self.inner.is_open.store(false, Ordering::Relaxed); } - fn try_fold(&mut self, init: B, f: F) -> Result + #[doc(hidden)] + #[allow(private_interfaces)] + fn __try_fold(&mut self, init: B, f: F) -> Result where Self: Sized, F: FnMut(B, Self::Item) -> Result, diff --git a/asynchronix/src/rpc/api/simulation.proto b/asynchronix/src/rpc/api/simulation.proto index b8982a0..b12d593 100644 --- a/asynchronix/src/rpc/api/simulation.proto +++ b/asynchronix/src/rpc/api/simulation.proto @@ -31,7 +31,7 @@ message EventKey { uint64 subkey2 = 2; } -message InitRequest { optional google.protobuf.Timestamp time = 1; } +message InitRequest { google.protobuf.Timestamp time = 1; } message InitReply { oneof result { // Always returns exactly 1 variant. google.protobuf.Empty empty = 1; @@ -75,8 +75,8 @@ message ScheduleEventRequest { } string source_name = 3; bytes event = 4; - optional google.protobuf.Duration period = 5; - optional bool with_key = 6; + google.protobuf.Duration period = 5; + bool with_key = 6; } message ScheduleEventReply { oneof result { // Always returns exactly 1 variant. diff --git a/asynchronix/src/rpc/codegen.rs b/asynchronix/src/rpc/codegen.rs index 0dfd7c8..3221cbc 100644 --- a/asynchronix/src/rpc/codegen.rs +++ b/asynchronix/src/rpc/codegen.rs @@ -1,5 +1,7 @@ #![allow(unreachable_pub)] #![allow(clippy::enum_variant_names)] +#[rustfmt::skip] pub(crate) mod custom_transport; +#[rustfmt::skip] pub(crate) mod simulation; diff --git a/asynchronix/src/rpc/codegen/custom_transport.rs b/asynchronix/src/rpc/codegen/custom_transport.rs index 43a91bd..61eac9d 100644 --- a/asynchronix/src/rpc/codegen/custom_transport.rs +++ b/asynchronix/src/rpc/codegen/custom_transport.rs @@ -11,10 +11,7 @@ pub struct ServerError { #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnyRequest { /// Expects exactly 1 variant. - #[prost( - oneof = "any_request::Request", - tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11" - )] + #[prost(oneof = "any_request::Request", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11")] pub request: ::core::option::Option, } /// Nested message and enum types in `AnyRequest`. @@ -51,10 +48,7 @@ pub mod any_request { #[derive(Clone, PartialEq, ::prost::Message)] pub struct AnyReply { /// Contains exactly 1 variant. - #[prost( - oneof = "any_reply::Reply", - tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 100" - )] + #[prost(oneof = "any_reply::Reply", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 100")] pub reply: ::core::option::Option, } /// Nested message and enum types in `AnyReply`. diff --git a/asynchronix/src/rpc/codegen/simulation.rs b/asynchronix/src/rpc/codegen/simulation.rs index abe0073..aefb660 100644 --- a/asynchronix/src/rpc/codegen/simulation.rs +++ b/asynchronix/src/rpc/codegen/simulation.rs @@ -131,8 +131,8 @@ pub struct ScheduleEventRequest { pub event: ::prost::alloc::vec::Vec, #[prost(message, optional, tag = "5")] pub period: ::core::option::Option<::prost_types::Duration>, - #[prost(bool, optional, tag = "6")] - pub with_key: ::core::option::Option, + #[prost(bool, tag = "6")] + pub with_key: bool, /// Expects exactly 1 variant. #[prost(oneof = "schedule_event_request::Deadline", tags = "1, 2")] pub deadline: ::core::option::Option, @@ -411,19 +411,31 @@ pub mod simulation_server { async fn schedule_event( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn cancel_event( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn process_event( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn process_query( &self, request: tonic::Request, - ) -> std::result::Result, tonic::Status>; + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; async fn read_events( &self, request: tonic::Request, @@ -460,7 +472,10 @@ pub mod simulation_server { max_encoding_message_size: None, } } - pub fn with_interceptor(inner: T, interceptor: F) -> InterceptedService + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService where F: tonic::service::Interceptor, { @@ -516,15 +531,21 @@ pub mod simulation_server { "/simulation.Simulation/Init" => { #[allow(non_camel_case_types)] struct InitSvc(pub Arc); - impl tonic::server::UnaryService for InitSvc { + impl tonic::server::UnaryService + for InitSvc { type Response = super::InitReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::init(&inner, request).await }; + let fut = async move { + ::init(&inner, request).await + }; Box::pin(fut) } } @@ -554,15 +575,21 @@ pub mod simulation_server { "/simulation.Simulation/Time" => { #[allow(non_camel_case_types)] struct TimeSvc(pub Arc); - impl tonic::server::UnaryService for TimeSvc { + impl tonic::server::UnaryService + for TimeSvc { type Response = super::TimeReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::time(&inner, request).await }; + let fut = async move { + ::time(&inner, request).await + }; Box::pin(fut) } } @@ -592,15 +619,21 @@ pub mod simulation_server { "/simulation.Simulation/Step" => { #[allow(non_camel_case_types)] struct StepSvc(pub Arc); - impl tonic::server::UnaryService for StepSvc { + impl tonic::server::UnaryService + for StepSvc { type Response = super::StepReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = async move { ::step(&inner, request).await }; + let fut = async move { + ::step(&inner, request).await + }; Box::pin(fut) } } @@ -630,16 +663,23 @@ pub mod simulation_server { "/simulation.Simulation/StepUntil" => { #[allow(non_camel_case_types)] struct StepUntilSvc(pub Arc); - impl tonic::server::UnaryService for StepUntilSvc { + impl< + T: Simulation, + > tonic::server::UnaryService + for StepUntilSvc { type Response = super::StepUntilReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = - async move { ::step_until(&inner, request).await }; + let fut = async move { + ::step_until(&inner, request).await + }; Box::pin(fut) } } @@ -669,11 +709,15 @@ pub mod simulation_server { "/simulation.Simulation/ScheduleEvent" => { #[allow(non_camel_case_types)] struct ScheduleEventSvc(pub Arc); - impl tonic::server::UnaryService - for ScheduleEventSvc - { + impl< + T: Simulation, + > tonic::server::UnaryService + for ScheduleEventSvc { type Response = super::ScheduleEventReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -711,9 +755,15 @@ pub mod simulation_server { "/simulation.Simulation/CancelEvent" => { #[allow(non_camel_case_types)] struct CancelEventSvc(pub Arc); - impl tonic::server::UnaryService for CancelEventSvc { + impl< + T: Simulation, + > tonic::server::UnaryService + for CancelEventSvc { type Response = super::CancelEventReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -751,9 +801,15 @@ pub mod simulation_server { "/simulation.Simulation/ProcessEvent" => { #[allow(non_camel_case_types)] struct ProcessEventSvc(pub Arc); - impl tonic::server::UnaryService for ProcessEventSvc { + impl< + T: Simulation, + > tonic::server::UnaryService + for ProcessEventSvc { type Response = super::ProcessEventReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -791,9 +847,15 @@ pub mod simulation_server { "/simulation.Simulation/ProcessQuery" => { #[allow(non_camel_case_types)] struct ProcessQuerySvc(pub Arc); - impl tonic::server::UnaryService for ProcessQuerySvc { + impl< + T: Simulation, + > tonic::server::UnaryService + for ProcessQuerySvc { type Response = super::ProcessQueryReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -831,9 +893,15 @@ pub mod simulation_server { "/simulation.Simulation/ReadEvents" => { #[allow(non_camel_case_types)] struct ReadEventsSvc(pub Arc); - impl tonic::server::UnaryService for ReadEventsSvc { + impl< + T: Simulation, + > tonic::server::UnaryService + for ReadEventsSvc { type Response = super::ReadEventsReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, @@ -871,16 +939,23 @@ pub mod simulation_server { "/simulation.Simulation/OpenSink" => { #[allow(non_camel_case_types)] struct OpenSinkSvc(pub Arc); - impl tonic::server::UnaryService for OpenSinkSvc { + impl< + T: Simulation, + > tonic::server::UnaryService + for OpenSinkSvc { type Response = super::OpenSinkReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = - async move { ::open_sink(&inner, request).await }; + let fut = async move { + ::open_sink(&inner, request).await + }; Box::pin(fut) } } @@ -910,16 +985,23 @@ pub mod simulation_server { "/simulation.Simulation/CloseSink" => { #[allow(non_camel_case_types)] struct CloseSinkSvc(pub Arc); - impl tonic::server::UnaryService for CloseSinkSvc { + impl< + T: Simulation, + > tonic::server::UnaryService + for CloseSinkSvc { type Response = super::CloseSinkReply; - type Future = BoxFuture, tonic::Status>; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; fn call( &mut self, request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); - let fut = - async move { ::close_sink(&inner, request).await }; + let fut = async move { + ::close_sink(&inner, request).await + }; Box::pin(fut) } } @@ -946,14 +1028,18 @@ pub mod simulation_server { }; Box::pin(fut) } - _ => Box::pin(async move { - Ok(http::Response::builder() - .status(200) - .header("grpc-status", "12") - .header("content-type", "application/grpc") - .body(empty_body()) - .unwrap()) - }), + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", "12") + .header("content-type", "application/grpc") + .body(empty_body()) + .unwrap(), + ) + }) + } } } } diff --git a/asynchronix/src/rpc/endpoint_registry.rs b/asynchronix/src/rpc/endpoint_registry.rs index 9ded8e5..67b5821 100644 --- a/asynchronix/src/rpc/endpoint_registry.rs +++ b/asynchronix/src/rpc/endpoint_registry.rs @@ -54,10 +54,10 @@ impl EndpointRegistry { self.event_sources.get_mut(name).map(|s| s.as_mut()) } - /// Adds an query source to the registry. + /// Adds a query source to the registry. /// - /// If the specified name is already in use for another query source, the source - /// provided as argument is returned in the error. + /// If the specified name is already in use for another query source, the + /// source provided as argument is returned in the error. pub fn add_query_source( &mut self, source: QuerySource, @@ -87,7 +87,7 @@ impl EndpointRegistry { /// /// If the specified name is already in use for another sink, the sink /// provided as argument is returned in the error. - pub fn add_sink(&mut self, sink: S, name: impl Into) -> Result<(), S> + pub fn add_event_sink(&mut self, sink: S, name: impl Into) -> Result<(), S> where S: EventSinkStream + Send + 'static, S::Item: Serialize, @@ -104,7 +104,7 @@ impl EndpointRegistry { /// Returns a mutable reference to the specified sink if it is in the /// registry. - pub(crate) fn get_sink_mut(&mut self, name: &str) -> Option<&mut dyn EventSinkStreamAny> { + pub(crate) fn get_event_sink_mut(&mut self, name: &str) -> Option<&mut dyn EventSinkStreamAny> { self.sinks.get_mut(name).map(|s| s.as_mut()) } } @@ -272,7 +272,7 @@ where } fn collect(&mut self) -> Result>, RmpEncodeError> { - EventSinkStream::try_fold(self, Vec::new(), |mut encoded_events, event| { + self.__try_fold(Vec::new(), |mut encoded_events, event| { rmp_serde::to_vec_named(&event).map(|encoded_event| { encoded_events.push(encoded_event); diff --git a/asynchronix/src/rpc/generic_server.rs b/asynchronix/src/rpc/generic_server.rs index edeb06d..6f54a93 100644 --- a/asynchronix/src/rpc/generic_server.rs +++ b/asynchronix/src/rpc/generic_server.rs @@ -248,7 +248,7 @@ where let reply = move || -> Result, (ErrorCode, String)> { let source_name = &request.source_name; let msgpack_event = &request.event; - let with_key = request.with_key.unwrap_or_default(); + let with_key = request.with_key; let period = request .period .map(|period| { @@ -508,7 +508,7 @@ where "the simulation was not started".to_string(), ))?; - let sink = registry.get_sink_mut(sink_name).ok_or(( + let sink = registry.get_event_sink_mut(sink_name).ok_or(( ErrorCode::SinkNotFound, "no sink is registered with the name '{}'".to_string(), ))?; @@ -549,7 +549,7 @@ where "the simulation was not started".to_string(), ))?; - let sink = registry.get_sink_mut(sink_name).ok_or(( + let sink = registry.get_event_sink_mut(sink_name).ok_or(( ErrorCode::SinkNotFound, "no sink is registered with the name '{}'".to_string(), ))?; @@ -582,7 +582,7 @@ where "the simulation was not started".to_string(), ))?; - let sink = registry.get_sink_mut(sink_name).ok_or(( + let sink = registry.get_event_sink_mut(sink_name).ok_or(( ErrorCode::SinkNotFound, "no sink is registered with the name '{}'".to_string(), ))?; diff --git a/asynchronix/src/rpc/grpc.rs b/asynchronix/src/rpc/grpc.rs index 1cb2960..94809e9 100644 --- a/asynchronix/src/rpc/grpc.rs +++ b/asynchronix/src/rpc/grpc.rs @@ -23,7 +23,9 @@ where F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, { // Use a single-threaded server. - let rt = tokio::runtime::Builder::new_current_thread().build()?; + let rt = tokio::runtime::Builder::new_current_thread() + .enable_io() + .build()?; let sim_manager = GrpcServer::new(sim_gen); From 0734dc2faca9ef4249a3bd539feaf1e159378eeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ja=C5=ADhien=20Piatlicki?= Date: Fri, 26 Apr 2024 11:06:40 +0200 Subject: [PATCH 05/12] Implement clonable outputs and add submodels example --- asynchronix/Cargo.toml | 1 + asynchronix/examples/assembly.rs | 153 ++++++++++++++++++++ asynchronix/examples/stepper_motor.rs | 3 +- asynchronix/src/ports.rs | 35 +++++ asynchronix/src/ports/output.rs | 73 +++++----- asynchronix/src/ports/output/broadcaster.rs | 95 +++++++++--- asynchronix/src/ports/output/sender.rs | 60 +++++++- asynchronix/src/ports/sink.rs | 2 +- asynchronix/src/ports/sink/event_buffer.rs | 8 + asynchronix/src/ports/sink/event_slot.rs | 10 +- asynchronix/src/ports/source.rs | 18 +-- asynchronix/src/ports/source/broadcaster.rs | 39 +++-- asynchronix/src/util.rs | 1 + asynchronix/src/util/cached_rw_lock.rs | 111 ++++++++++++++ asynchronix/src/util/task_set.rs | 4 + 15 files changed, 524 insertions(+), 89 deletions(-) create mode 100644 asynchronix/examples/assembly.rs create mode 100644 asynchronix/src/util/cached_rw_lock.rs diff --git a/asynchronix/Cargo.toml b/asynchronix/Cargo.toml index 9533780..079c113 100644 --- a/asynchronix/Cargo.toml +++ b/asynchronix/Cargo.toml @@ -39,6 +39,7 @@ dev-logs = [] async-event = "0.1" crossbeam-utils = "0.8" diatomic-waker = "0.1" +dyn-clone = "1.0" futures-channel = "0.3" futures-task = "0.3" multishot = "0.3.2" diff --git a/asynchronix/examples/assembly.rs b/asynchronix/examples/assembly.rs new file mode 100644 index 0000000..e550656 --- /dev/null +++ b/asynchronix/examples/assembly.rs @@ -0,0 +1,153 @@ +//! Example: an assembly consisting of a current-controlled stepper motor and +//! its driver. +//! +//! This example demonstrates in particular: +//! +//! * submodels, +//! * outputs cloning, +//! * self-scheduling methods, +//! * model setup, +//! * model initialization, +//! * simulation monitoring with event streams. +//! +//! ```text +//! ┌──────────────────────────────────────────────┐ +//! │ Assembly │ +//! │ ┌──────────┐ ┌──────────┐ │ +//! PPS │ │ │ coil currents │ │ │position +//! Pulse rate ●───────▶│──▶│ Driver ├───────────────▶│ Motor ├──▶│─────────▶ +//! (±freq)│ │ │ (IA, IB) │ │ │(0:199) +//! │ └──────────┘ └──────────┘ │ +//! └──────────────────────────────────────────────┘ +//! ``` + +use std::time::Duration; + +use asynchronix::model::{Model, SetupContext}; +use asynchronix::ports::{EventBuffer, Output}; +use asynchronix::simulation::{Mailbox, SimInit}; +use asynchronix::time::MonotonicTime; + +mod stepper_motor; + +pub use stepper_motor::{Driver, Motor}; + +pub struct MotorAssembly { + pub position: Output, + init_pos: u16, + load: Output, + pps: Output, +} + +impl MotorAssembly { + pub fn new(init_pos: u16) -> Self { + Self { + position: Default::default(), + init_pos, + load: Default::default(), + pps: Default::default(), + } + } + + /// Sets the pulse rate (sign = direction) [Hz] -- input port. + pub async fn pulse_rate(&mut self, pps: f64) { + self.pps.send(pps).await; + } + + /// Torque applied by the load [N·m] -- input port. + pub async fn load(&mut self, torque: f64) { + self.load.send(torque).await; + } +} + +impl Model for MotorAssembly { + fn setup(&mut self, setup_context: &SetupContext) { + let mut motor = Motor::new(self.init_pos); + let mut driver = Driver::new(1.0); + + // Mailboxes. + let motor_mbox = Mailbox::new(); + let driver_mbox = Mailbox::new(); + + // Connections. + self.pps.connect(Driver::pulse_rate, &driver_mbox); + self.load.connect(Motor::load, &motor_mbox); + driver.current_out.connect(Motor::current_in, &motor_mbox); + // Note: it is important to clone `position` from the parent to the + // submodel so that all connections made by the user to the parent model + // are preserved. Connections added after cloning are reflected in all + // clones. + motor.position = self.position.clone(); + + setup_context.add_model(driver, driver_mbox); + setup_context.add_model(motor, motor_mbox); + } +} + +fn main() { + // --------------- + // Bench assembly. + // --------------- + + // Models. + let init_pos = 123; + let mut assembly = MotorAssembly::new(init_pos); + + // Mailboxes. + let assembly_mbox = Mailbox::new(); + let assembly_addr = assembly_mbox.address(); + + // Model handles for simulation. + let mut position = EventBuffer::new(); + assembly.position.connect_sink(&position); + + // Start time (arbitrary since models do not depend on absolute time). + let t0 = MonotonicTime::EPOCH; + + // Assembly and initialization. + let mut simu = SimInit::new().add_model(assembly, assembly_mbox).init(t0); + + // ---------- + // Simulation. + // ---------- + + // Check initial conditions. + let mut t = t0; + assert_eq!(simu.time(), t); + assert_eq!(position.next(), Some(init_pos)); + assert!(position.next().is_none()); + + // Start the motor in 2s with a PPS of 10Hz. + simu.schedule_event( + Duration::from_secs(2), + MotorAssembly::pulse_rate, + 10.0, + &assembly_addr, + ) + .unwrap(); + + // Advance simulation time to two next events. + simu.step(); + t += Duration::new(2, 0); + assert_eq!(simu.time(), t); + simu.step(); + t += Duration::new(0, 100_000_000); + assert_eq!(simu.time(), t); + + // Whichever the starting position, after two phase increments from the + // driver the rotor should have synchronized with the driver, with a + // position given by this beautiful formula. + let mut pos = (((init_pos + 1) / 4) * 4 + 1) % Motor::STEPS_PER_REV; + assert_eq!(position.by_ref().last().unwrap(), pos); + + // Advance simulation time by 0.9s, which with a 10Hz PPS should correspond to + // 9 position increments. + simu.step_by(Duration::new(0, 900_000_000)); + t += Duration::new(0, 900_000_000); + assert_eq!(simu.time(), t); + for _ in 0..9 { + pos = (pos + 1) % Motor::STEPS_PER_REV; + assert_eq!(position.next(), Some(pos)); + } + assert!(position.next().is_none()); +} diff --git a/asynchronix/examples/stepper_motor.rs b/asynchronix/examples/stepper_motor.rs index 3d24221..c733af5 100644 --- a/asynchronix/examples/stepper_motor.rs +++ b/asynchronix/examples/stepper_motor.rs @@ -40,7 +40,7 @@ impl Motor { pub const TORQUE_CONSTANT: f64 = 1.0; /// Creates a motor with the specified initial position. - fn new(position: u16) -> Self { + pub fn new(position: u16) -> Self { Self { position: Default::default(), pos: position % Self::STEPS_PER_REV, @@ -176,6 +176,7 @@ impl Driver { impl Model for Driver {} +#[allow(dead_code)] fn main() { // --------------- // Bench assembly. diff --git a/asynchronix/src/ports.rs b/asynchronix/src/ports.rs index f7ae6d6..c764302 100644 --- a/asynchronix/src/ports.rs +++ b/asynchronix/src/ports.rs @@ -12,6 +12,41 @@ //! contrast, since events are buffered in the mailbox of the target model, //! sending an event is a fire-and-forget operation. For this reason, output //! ports should generally be preferred over requestor ports when possible. +//! +//! `Output` and `Requestor` ports are clonable. Their clones are shallow +//! copies, meaning that any modification of the ports connected to one clone is +//! immediately reflected in other clones. +//! +//! #### Example +//! +//! The outputs in this example are clones of each other and remain therefore +//! always connected to the same inputs. For an example usage of outputs cloning +//! in submodels assemblies, see the [`assembly example`][assembly]. +//! +//! [assembly]: +//! https://github.com/asynchronics/asynchronix/tree/main/asynchronix/examples/assembly.rs +//! +//! ``` +//! use asynchronix::model::Model; +//! use asynchronix::ports::Output; +//! +//! pub struct MyModel { +//! pub output_a: Output, +//! pub output_b: Output, +//! } +//! +//! impl MyModel { +//! pub fn new() -> Self { +//! let output: Output<_> = Default::default(); +//! Self { +//! output_a: output.clone(), +//! output_b: output, +//! } +//! } +//! } +//! +//! impl Model for MyModel {} +//! ``` mod input; mod output; diff --git a/asynchronix/src/ports/output.rs b/asynchronix/src/ports/output.rs index 5f60e8a..d5599fd 100644 --- a/asynchronix/src/ports/output.rs +++ b/asynchronix/src/ports/output.rs @@ -7,6 +7,7 @@ use crate::model::Model; use crate::ports::{EventSink, LineError, LineId}; use crate::ports::{InputFn, ReplierFn}; use crate::simulation::Address; +use crate::util::cached_rw_lock::CachedRwLock; use broadcaster::{EventBroadcaster, QueryBroadcaster}; @@ -17,9 +18,13 @@ use self::sender::{EventSinkSender, InputSender, ReplierSender}; /// `Output` ports can be connected to input ports, i.e. to asynchronous model /// methods that return no value. They broadcast events to all connected input /// ports. +/// +/// When an `Output` is cloned, the information on connected ports remains +/// shared and therefore all clones use and modify the same list of connected +/// ports. +#[derive(Clone)] pub struct Output { - broadcaster: EventBroadcaster, - next_line_id: u64, + broadcaster: CachedRwLock>, } impl Output { @@ -40,26 +45,16 @@ impl Output { F: for<'a> InputFn<'a, M, T, S> + Clone, S: Send + 'static, { - assert!(self.next_line_id != u64::MAX); - let line_id = LineId(self.next_line_id); - self.next_line_id += 1; let sender = Box::new(InputSender::new(input, address.into().0)); - self.broadcaster.add(sender, line_id); - - line_id + self.broadcaster.write().unwrap().add(sender) } /// Adds a connection to an event sink such as an /// [`EventSlot`](crate::ports::EventSlot) or /// [`EventBuffer`](crate::ports::EventBuffer). pub fn connect_sink>(&mut self, sink: &S) -> LineId { - assert!(self.next_line_id != u64::MAX); - let line_id = LineId(self.next_line_id); - self.next_line_id += 1; let sender = Box::new(EventSinkSender::new(sink.writer())); - self.broadcaster.add(sender, line_id); - - line_id + self.broadcaster.write().unwrap().add(sender) } /// Removes the connection specified by the `LineId` parameter. @@ -69,7 +64,7 @@ impl Output { /// [`QuerySource`](crate::ports::QuerySource) instance and may result in /// the disconnection of an arbitrary endpoint. pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> { - if self.broadcaster.remove(line_id) { + if self.broadcaster.write().unwrap().remove(line_id) { Ok(()) } else { Err(LineError {}) @@ -78,27 +73,31 @@ impl Output { /// Removes all connections. pub fn disconnect_all(&mut self) { - self.broadcaster.clear(); + self.broadcaster.write().unwrap().clear(); } /// Broadcasts an event to all connected input ports. pub async fn send(&mut self, arg: T) { - self.broadcaster.broadcast(arg).await.unwrap(); + let broadcaster = self.broadcaster.write_scratchpad().unwrap(); + broadcaster.broadcast(arg).await.unwrap(); } } impl Default for Output { fn default() -> Self { Self { - broadcaster: EventBroadcaster::default(), - next_line_id: 0, + broadcaster: CachedRwLock::new(EventBroadcaster::default()), } } } impl fmt::Debug for Output { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Output ({} connected ports)", self.broadcaster.len()) + write!( + f, + "Output ({} connected ports)", + self.broadcaster.read_unsync().len() + ) } } @@ -107,9 +106,12 @@ impl fmt::Debug for Output { /// `Requestor` ports can be connected to replier ports, i.e. to asynchronous /// model methods that return a value. They broadcast queries to all connected /// replier ports. +/// +/// When a `Requestor` is cloned, the information on connected ports remains +/// shared and therefore all clones use and modify the same list of connected +/// ports. pub struct Requestor { - broadcaster: QueryBroadcaster, - next_line_id: u64, + broadcaster: CachedRwLock>, } impl Requestor { @@ -130,13 +132,8 @@ impl Requestor { F: for<'a> ReplierFn<'a, M, T, R, S> + Clone, S: Send + 'static, { - assert!(self.next_line_id != u64::MAX); - let line_id = LineId(self.next_line_id); - self.next_line_id += 1; let sender = Box::new(ReplierSender::new(replier, address.into().0)); - self.broadcaster.add(sender, line_id); - - line_id + self.broadcaster.write().unwrap().add(sender) } /// Removes the connection specified by the `LineId` parameter. @@ -146,7 +143,7 @@ impl Requestor { /// [`QuerySource`](crate::ports::QuerySource) instance and may result in /// the disconnection of an arbitrary endpoint. pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> { - if self.broadcaster.remove(line_id) { + if self.broadcaster.write().unwrap().remove(line_id) { Ok(()) } else { Err(LineError {}) @@ -155,26 +152,34 @@ impl Requestor { /// Removes all connections. pub fn disconnect_all(&mut self) { - self.broadcaster.clear(); + self.broadcaster.write().unwrap().clear(); } /// Broadcasts a query to all connected replier ports. pub async fn send(&mut self, arg: T) -> impl Iterator + '_ { - self.broadcaster.broadcast(arg).await.unwrap() + self.broadcaster + .write_scratchpad() + .unwrap() + .broadcast(arg) + .await + .unwrap() } } impl Default for Requestor { fn default() -> Self { Self { - broadcaster: QueryBroadcaster::default(), - next_line_id: 0, + broadcaster: CachedRwLock::new(QueryBroadcaster::default()), } } } impl fmt::Debug for Requestor { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Requestor ({} connected ports)", self.broadcaster.len()) + write!( + f, + "Requestor ({} connected ports)", + self.broadcaster.read_unsync().len() + ) } } diff --git a/asynchronix/src/ports/output/broadcaster.rs b/asynchronix/src/ports/output/broadcaster.rs index f269312..cb39f52 100644 --- a/asynchronix/src/ports/output/broadcaster.rs +++ b/asynchronix/src/ports/output/broadcaster.rs @@ -25,6 +25,8 @@ use crate::util::task_set::TaskSet; /// - the outputs of all sender futures are returned all at once rather than /// with an asynchronous iterator (a.k.a. async stream). pub(super) struct BroadcasterInner { + /// Line identifier for the next port to be connected. + next_line_id: u64, /// The list of senders with their associated line identifier. senders: Vec<(LineId, Box>)>, /// Fields explicitly borrowed by the `BroadcastFuture`. @@ -38,15 +40,18 @@ impl BroadcasterInner { /// /// This method will panic if the total count of senders would reach /// `u32::MAX - 1`. - pub(super) fn add(&mut self, sender: Box>, id: LineId) { - self.senders.push((id, sender)); + pub(super) fn add(&mut self, sender: Box>) -> LineId { + assert!(self.next_line_id != u64::MAX); + let line_id = LineId(self.next_line_id); + self.next_line_id += 1; - self.shared.futures_env.push(FutureEnv { - storage: None, - output: None, - }); + self.senders.push((line_id, sender)); + + self.shared.futures_env.push(FutureEnv::default()); self.shared.task_set.resize(self.senders.len()); + + line_id } /// Removes the first sender with the specified identifier, if any. @@ -122,6 +127,7 @@ impl Default for BroadcasterInner { let wake_src = wake_sink.source(); Self { + next_line_id: 0, senders: Vec::new(), shared: Shared { wake_sink, @@ -133,12 +139,23 @@ impl Default for BroadcasterInner { } } +impl Clone for BroadcasterInner { + fn clone(&self) -> Self { + Self { + next_line_id: self.next_line_id, + senders: self.senders.clone(), + shared: self.shared.clone(), + } + } +} + /// An object that can efficiently broadcast events to several input ports. /// /// This is very similar to `source::broadcaster::EventBroadcaster`, but /// generates non-owned futures instead. /// /// See `BroadcasterInner` for implementation details. +#[derive(Clone)] pub(super) struct EventBroadcaster { /// The broadcaster core object. inner: BroadcasterInner, @@ -151,8 +168,8 @@ impl EventBroadcaster { /// /// This method will panic if the total count of senders would reach /// `u32::MAX - 1`. - pub(super) fn add(&mut self, sender: Box>, id: LineId) { - self.inner.add(sender, id); + pub(super) fn add(&mut self, sender: Box>) -> LineId { + self.inner.add(sender) } /// Removes the first sender with the specified identifier, if any. @@ -212,8 +229,8 @@ impl QueryBroadcaster { /// /// This method will panic if the total count of senders would reach /// `u32::MAX - 1`. - pub(super) fn add(&mut self, sender: Box>, id: LineId) { - self.inner.add(sender, id); + pub(super) fn add(&mut self, sender: Box>) -> LineId { + self.inner.add(sender) } /// Removes the first sender with the specified identifier, if any. @@ -272,6 +289,14 @@ impl Default for QueryBroadcaster { } } +impl Clone for QueryBroadcaster { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + /// Data related to a sender future. struct FutureEnv { /// Cached storage for the future. @@ -280,6 +305,15 @@ struct FutureEnv { output: Option, } +impl Default for FutureEnv { + fn default() -> Self { + Self { + storage: None, + output: None, + } + } +} + /// A type-erased `Send` future wrapped in a `RecycleBox`. type RecycleBoxFuture<'a, R> = RecycleBox> + Send + 'a>; @@ -299,6 +333,23 @@ struct Shared { storage: Option>>>, } +impl Clone for Shared { + fn clone(&self) -> Self { + let wake_sink = WakeSink::new(); + let wake_src = wake_sink.source(); + + let mut futures_env = Vec::new(); + futures_env.resize_with(self.futures_env.len(), Default::default); + + Self { + wake_sink, + task_set: TaskSet::with_len(wake_src, self.task_set.len()), + futures_env, + storage: None, + } + } +} + /// A future aggregating the outputs of a collection of sender futures. /// /// The idea is to join all sender futures as efficiently as possible, meaning: @@ -537,12 +588,12 @@ mod tests { let mut mailboxes = Vec::new(); let mut broadcaster = EventBroadcaster::default(); - for id in 0..N_RECV { + for _ in 0..N_RECV { let mailbox = Receiver::new(10); let address = mailbox.sender(); let sender = Box::new(InputSender::new(Counter::inc, address)); - broadcaster.add(sender, LineId(id as u64)); + broadcaster.add(sender); mailboxes.push(mailbox); } @@ -585,12 +636,12 @@ mod tests { let mut mailboxes = Vec::new(); let mut broadcaster = QueryBroadcaster::default(); - for id in 0..N_RECV { + for _ in 0..N_RECV { let mailbox = Receiver::new(10); let address = mailbox.sender(); let sender = Box::new(ReplierSender::new(Counter::fetch_inc, address)); - broadcaster.add(sender, LineId(id as u64)); + broadcaster.add(sender); mailboxes.push(mailbox); } @@ -664,6 +715,12 @@ mod tests { } } + impl Clone for TestEvent { + fn clone(&self) -> Self { + unreachable!() + } + } + // An object that can wake a `TestEvent`. #[derive(Clone)] struct TestEventWaker { @@ -705,9 +762,9 @@ mod tests { let (test_event3, waker3) = test_event::(); let mut broadcaster = QueryBroadcaster::default(); - broadcaster.add(Box::new(test_event1), LineId(1)); - broadcaster.add(Box::new(test_event2), LineId(2)); - broadcaster.add(Box::new(test_event3), LineId(3)); + broadcaster.add(Box::new(test_event1)); + broadcaster.add(Box::new(test_event2)); + broadcaster.add(Box::new(test_event3)); let mut fut = Box::pin(broadcaster.broadcast(())); let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false)); @@ -777,8 +834,8 @@ mod tests { let (test_event2, waker2) = test_event::(); let mut broadcaster = QueryBroadcaster::default(); - broadcaster.add(Box::new(test_event1), LineId(1)); - broadcaster.add(Box::new(test_event2), LineId(2)); + broadcaster.add(Box::new(test_event1)); + broadcaster.add(Box::new(test_event2)); let mut fut = Box::pin(broadcaster.broadcast(())); let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false)); diff --git a/asynchronix/src/ports/output/sender.rs b/asynchronix/src/ports/output/sender.rs index 88cddfa..1c9ab02 100644 --- a/asynchronix/src/ports/output/sender.rs +++ b/asynchronix/src/ports/output/sender.rs @@ -6,6 +6,7 @@ use std::mem::ManuallyDrop; use std::pin::Pin; use std::task::{Context, Poll}; +use dyn_clone::DynClone; use recycle_box::{coerce_box, RecycleBox}; use crate::channel; @@ -14,11 +15,13 @@ use crate::ports::{EventSinkWriter, InputFn, ReplierFn}; /// An event or query sender abstracting over the target model and input or /// replier method. -pub(super) trait Sender: Send { +pub(super) trait Sender: DynClone + Send { /// Asynchronously send the event or request. fn send(&mut self, arg: T) -> RecycledFuture<'_, Result>; } +dyn_clone::clone_trait_object!( Sender); + /// An object that can send events to an input port. pub(super) struct InputSender where @@ -72,6 +75,24 @@ where } } +impl Clone for InputSender +where + M: Model, + F: for<'a> InputFn<'a, M, T, S> + Clone, + T: Send + 'static, + S: Send + 'static, +{ + fn clone(&self) -> Self { + Self { + func: self.func.clone(), + sender: self.sender.clone(), + fut_storage: None, + _phantom_closure: PhantomData, + _phantom_closure_marker: PhantomData, + } + } +} + /// An object that can send a request to a replier port and retrieve a response. pub(super) struct ReplierSender { func: F, @@ -140,6 +161,26 @@ where } } +impl Clone for ReplierSender +where + M: Model, + F: for<'a> ReplierFn<'a, M, T, R, S> + Clone, + T: Send + 'static, + R: Send + 'static, + S: Send, +{ + fn clone(&self) -> Self { + Self { + func: self.func.clone(), + sender: self.sender.clone(), + receiver: multishot::Receiver::new(), + fut_storage: None, + _phantom_closure: PhantomData, + _phantom_closure_marker: PhantomData, + } + } +} + /// An object that can send a payload to an event sink. pub(super) struct EventSinkSender> { writer: W, @@ -157,9 +198,10 @@ impl> EventSinkSender { } } -impl> Sender for EventSinkSender +impl Sender for EventSinkSender where T: Send + 'static, + W: EventSinkWriter, { fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> { let writer = &mut self.writer; @@ -172,6 +214,20 @@ where } } +impl Clone for EventSinkSender +where + T: Send + 'static, + W: EventSinkWriter, +{ + fn clone(&self) -> Self { + Self { + writer: self.writer.clone(), + fut_storage: None, + _phantom_event: PhantomData, + } + } +} + /// Error returned when the mailbox was closed or dropped. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub(super) struct SendError {} diff --git a/asynchronix/src/ports/sink.rs b/asynchronix/src/ports/sink.rs index d22d008..e0fc0d1 100644 --- a/asynchronix/src/ports/sink.rs +++ b/asynchronix/src/ports/sink.rs @@ -14,7 +14,7 @@ pub trait EventSink { } /// A writer handle to an event sink. -pub trait EventSinkWriter: Send + Sync + 'static { +pub trait EventSinkWriter: Clone + Send + Sync + 'static { /// Writes a value to the associated sink. fn write(&self, event: T); } diff --git a/asynchronix/src/ports/sink/event_buffer.rs b/asynchronix/src/ports/sink/event_buffer.rs index 35b25ee..1cc8718 100644 --- a/asynchronix/src/ports/sink/event_buffer.rs +++ b/asynchronix/src/ports/sink/event_buffer.rs @@ -133,6 +133,14 @@ impl EventSinkWriter for EventBufferWriter { } } +impl Clone for EventBufferWriter { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + impl fmt::Debug for EventBufferWriter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("EventBufferWriter").finish_non_exhaustive() diff --git a/asynchronix/src/ports/sink/event_slot.rs b/asynchronix/src/ports/sink/event_slot.rs index 62a6cef..135242f 100644 --- a/asynchronix/src/ports/sink/event_slot.rs +++ b/asynchronix/src/ports/sink/event_slot.rs @@ -10,7 +10,7 @@ struct Inner { slot: Mutex>, } -/// An `EventSink` and `EventSinkStream` that only keeps the last event. +/// An [`EventSink`] and [`EventSinkStream`] that only keeps the last event. /// /// Once the value is read, the iterator will return `None` until a new value is /// received. If the slot contains a value when a new value is received, the @@ -113,6 +113,14 @@ impl EventSinkWriter for EventSlotWriter { } } +impl Clone for EventSlotWriter { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + } + } +} + impl fmt::Debug for EventSlotWriter { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("EventStreamWriter").finish_non_exhaustive() diff --git a/asynchronix/src/ports/source.rs b/asynchronix/src/ports/source.rs index 6850005..7c8ce3e 100644 --- a/asynchronix/src/ports/source.rs +++ b/asynchronix/src/ports/source.rs @@ -27,7 +27,6 @@ use super::ReplierFn; /// simulation monitoring endpoint instantiated during bench assembly. pub struct EventSource { broadcaster: Arc>>, - next_line_id: u64, } impl EventSource { @@ -48,13 +47,8 @@ impl EventSource { F: for<'a> InputFn<'a, M, T, S> + Clone, S: Send + 'static, { - assert!(self.next_line_id != u64::MAX); - let line_id = LineId(self.next_line_id); - self.next_line_id += 1; let sender = Box::new(InputSender::new(input, address.into().0)); - self.broadcaster.lock().unwrap().add(sender, line_id); - - line_id + self.broadcaster.lock().unwrap().add(sender) } /// Removes the connection specified by the `LineId` parameter. @@ -163,7 +157,6 @@ impl Default for EventSource { fn default() -> Self { Self { broadcaster: Arc::new(Mutex::new(EventBroadcaster::default())), - next_line_id: 0, } } } @@ -187,7 +180,6 @@ impl fmt::Debug for EventSource { /// instantiated during bench assembly. pub struct QuerySource { broadcaster: Arc>>, - next_line_id: u64, } impl QuerySource { @@ -208,13 +200,8 @@ impl QuerySource { F: for<'a> ReplierFn<'a, M, T, R, S> + Clone, S: Send + 'static, { - assert!(self.next_line_id != u64::MAX); - let line_id = LineId(self.next_line_id); - self.next_line_id += 1; let sender = Box::new(ReplierSender::new(replier, address.into().0)); - self.broadcaster.lock().unwrap().add(sender, line_id); - - line_id + self.broadcaster.lock().unwrap().add(sender) } /// Removes the connection specified by the `LineId` parameter. @@ -259,7 +246,6 @@ impl Default for QuerySource fn default() -> Self { Self { broadcaster: Arc::new(Mutex::new(QueryBroadcaster::default())), - next_line_id: 0, } } } diff --git a/asynchronix/src/ports/source/broadcaster.rs b/asynchronix/src/ports/source/broadcaster.rs index d3fb990..cff1d50 100644 --- a/asynchronix/src/ports/source/broadcaster.rs +++ b/asynchronix/src/ports/source/broadcaster.rs @@ -24,6 +24,8 @@ use crate::util::task_set::TaskSet; /// does, but the outputs of all sender futures are returned all at once rather /// than with an asynchronous iterator (a.k.a. async stream). pub(super) struct BroadcasterInner { + /// Line identifier for the next port to be connected. + next_line_id: u64, /// The list of senders with their associated line identifier. senders: Vec<(LineId, Box>)>, } @@ -35,8 +37,14 @@ impl BroadcasterInner { /// /// This method will panic if the total count of senders would reach /// `u32::MAX - 1`. - pub(super) fn add(&mut self, sender: Box>, id: LineId) { - self.senders.push((id, sender)); + pub(super) fn add(&mut self, sender: Box>) -> LineId { + assert!(self.next_line_id != u64::MAX); + let line_id = LineId(self.next_line_id); + self.next_line_id += 1; + + self.senders.push((line_id, sender)); + + line_id } /// Removes the first sender with the specified identifier, if any. @@ -89,6 +97,7 @@ impl BroadcasterInner { impl Default for BroadcasterInner { fn default() -> Self { Self { + next_line_id: 0, senders: Vec::new(), } } @@ -112,8 +121,8 @@ impl EventBroadcaster { /// /// This method will panic if the total count of senders would reach /// `u32::MAX - 1`. - pub(super) fn add(&mut self, sender: Box>, id: LineId) { - self.inner.add(sender, id); + pub(super) fn add(&mut self, sender: Box>) -> LineId { + self.inner.add(sender) } /// Removes the first sender with the specified identifier, if any. @@ -190,8 +199,8 @@ impl QueryBroadcaster { /// /// This method will panic if the total count of senders would reach /// `u32::MAX - 1`. - pub(super) fn add(&mut self, sender: Box>, id: LineId) { - self.inner.add(sender, id); + pub(super) fn add(&mut self, sender: Box>) -> LineId { + self.inner.add(sender) } /// Removes the first sender with the specified identifier, if any. @@ -462,12 +471,12 @@ mod tests { let mut mailboxes = Vec::new(); let mut broadcaster = EventBroadcaster::default(); - for id in 0..N_RECV { + for _ in 0..N_RECV { let mailbox = Receiver::new(10); let address = mailbox.sender(); let sender = Box::new(InputSender::new(Counter::inc, address)); - broadcaster.add(sender, LineId(id as u64)); + broadcaster.add(sender); mailboxes.push(mailbox); } @@ -510,12 +519,12 @@ mod tests { let mut mailboxes = Vec::new(); let mut broadcaster = QueryBroadcaster::default(); - for id in 0..N_RECV { + for _ in 0..N_RECV { let mailbox = Receiver::new(10); let address = mailbox.sender(); let sender = Box::new(ReplierSender::new(Counter::fetch_inc, address)); - broadcaster.add(sender, LineId(id as u64)); + broadcaster.add(sender); mailboxes.push(mailbox); } @@ -629,9 +638,9 @@ mod tests { let (test_event3, waker3) = test_event::(); let mut broadcaster = QueryBroadcaster::default(); - broadcaster.add(Box::new(test_event1), LineId(1)); - broadcaster.add(Box::new(test_event2), LineId(2)); - broadcaster.add(Box::new(test_event3), LineId(3)); + broadcaster.add(Box::new(test_event1)); + broadcaster.add(Box::new(test_event2)); + broadcaster.add(Box::new(test_event3)); let mut fut = Box::pin(broadcaster.broadcast(())); let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false)); @@ -701,8 +710,8 @@ mod tests { let (test_event2, waker2) = test_event::(); let mut broadcaster = QueryBroadcaster::default(); - broadcaster.add(Box::new(test_event1), LineId(1)); - broadcaster.add(Box::new(test_event2), LineId(2)); + broadcaster.add(Box::new(test_event1)); + broadcaster.add(Box::new(test_event2)); let mut fut = Box::pin(broadcaster.broadcast(())); let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false)); diff --git a/asynchronix/src/util.rs b/asynchronix/src/util.rs index f618e05..631479a 100644 --- a/asynchronix/src/util.rs +++ b/asynchronix/src/util.rs @@ -1,4 +1,5 @@ pub(crate) mod bit; +pub(crate) mod cached_rw_lock; pub(crate) mod indexed_priority_queue; pub(crate) mod priority_queue; pub(crate) mod rng; diff --git a/asynchronix/src/util/cached_rw_lock.rs b/asynchronix/src/util/cached_rw_lock.rs new file mode 100644 index 0000000..eba8bf1 --- /dev/null +++ b/asynchronix/src/util/cached_rw_lock.rs @@ -0,0 +1,111 @@ +use std::ops::{Deref, DerefMut}; +use std::sync::atomic::{AtomicUsize, Ordering}; +use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; + +/// A cached read-write lock. +/// +/// This read-write lock maintains a local cache in each clone for read +/// access. Regular writes are always synchronized and performed on the shared +/// data. Regular reads are synchronized only when the shared data has been +/// modified since the local cache was last synchronized. The local cache can +/// alternatively be used as a scratchpad without invalidating the shared data, +/// in which case all changes to the scratchpad will be lost on the next +/// synchronization. +#[derive(Clone)] +pub(crate) struct CachedRwLock { + local: T, + local_epoch: usize, + shared: Arc>, + epoch: Arc, +} + +impl CachedRwLock { + /// Creates a new cached read-write lock in an ulocked state. + pub(crate) fn new(t: T) -> Self { + let shared = t.clone(); + Self { + local: t, + local_epoch: 0, + shared: Arc::new(Mutex::new(shared)), + epoch: Arc::new(AtomicUsize::new(0)), + } + } + + /// Gives access to the local cache without synchronization. + pub(crate) fn read_unsync(&self) -> &T { + &self.local + } + + /// Synchronizes the local cache if it is behind the shared data and gives + /// access to it. + #[allow(dead_code)] + pub(crate) fn read(&mut self) -> LockResult<&T> { + if self.epoch.load(Ordering::Relaxed) != self.local_epoch { + match self.shared.lock() { + LockResult::Ok(shared) => { + self.local = shared.clone(); + self.local_epoch = self.epoch.load(Ordering::Relaxed) + } + LockResult::Err(_) => return LockResult::Err(PoisonError::new(&self.local)), + } + } + LockResult::Ok(&self.local) + } + + /// Gives write access to the local cache without synchronization so it can + /// be used as a scratchpad. + #[allow(dead_code)] + pub(crate) fn write_scratchpad_unsync(&mut self) -> &mut T { + &mut self.local + } + + /// Synchronizes the local cache if it is behind the shared data and gives + /// write access to it so it can be used as a scratchpad. + pub(crate) fn write_scratchpad(&mut self) -> LockResult<&mut T> { + if self.epoch.load(Ordering::Relaxed) != self.local_epoch { + match self.shared.lock() { + LockResult::Ok(shared) => { + self.local = shared.clone(); + self.local_epoch = self.epoch.load(Ordering::Relaxed) + } + LockResult::Err(_) => return LockResult::Err(PoisonError::new(&mut self.local)), + } + } + LockResult::Ok(&mut self.local) + } + + /// Acquires a write lock on the shared data. + pub(crate) fn write(&mut self) -> LockResult> { + let guard = self.shared.lock(); + let epoch = self.epoch.load(Ordering::Relaxed) + 1; + self.epoch.store(epoch, Ordering::Relaxed); + + match guard { + LockResult::Ok(shared) => LockResult::Ok(CachedRwLockWriteGuard { guard: shared }), + LockResult::Err(poison) => LockResult::Err(PoisonError::new(CachedRwLockWriteGuard { + guard: poison.into_inner(), + })), + } + } +} + +/// Write guard. +/// +/// The lock is released when the guard is dropped. +pub(crate) struct CachedRwLockWriteGuard<'a, T: Clone> { + guard: MutexGuard<'a, T>, +} + +impl Deref for CachedRwLockWriteGuard<'_, T> { + type Target = T; + + fn deref(&self) -> &T { + &self.guard + } +} + +impl DerefMut for CachedRwLockWriteGuard<'_, T> { + fn deref_mut(&mut self) -> &mut T { + &mut self.guard + } +} diff --git a/asynchronix/src/util/task_set.rs b/asynchronix/src/util/task_set.rs index 90f3e41..e1145e8 100644 --- a/asynchronix/src/util/task_set.rs +++ b/asynchronix/src/util/task_set.rs @@ -271,6 +271,10 @@ impl TaskSet { waker_ref(&self.tasks[idx]) } + + pub(crate) fn len(&self) -> usize { + self.task_count + } } /// Internals shared between a `TaskSet` and its associated `Task`s. From e4b108c6b73fdf2415df65a214643f7ad9f29372 Mon Sep 17 00:00:00 2001 From: Serge Barral Date: Mon, 6 May 2024 16:38:29 +0200 Subject: [PATCH 06/12] Satisfy newest clippy --- .github/workflows/ci.yml | 2 +- asynchronix/Cargo.toml | 2 +- asynchronix/build.rs | 3 +++ asynchronix/src/executor/task/cancel_token.rs | 6 +++--- asynchronix/src/executor/task/promise.rs | 4 ++-- asynchronix/src/executor/task/runnable.rs | 2 +- 6 files changed, 11 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 8c952a0..b0b9912 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -17,7 +17,7 @@ jobs: matrix: rust: - stable - - 1.75.0 + - 1.77.0 steps: - name: Checkout sources uses: actions/checkout@v3 diff --git a/asynchronix/Cargo.toml b/asynchronix/Cargo.toml index 079c113..981ce35 100644 --- a/asynchronix/Cargo.toml +++ b/asynchronix/Cargo.toml @@ -9,7 +9,7 @@ name = "asynchronix" authors = ["Serge Barral "] version = "0.2.2" edition = "2021" -rust-version = "1.75" +rust-version = "1.77.0" license = "MIT OR Apache-2.0" repository = "https://github.com/asynchronics/asynchronix" readme = "../README.md" diff --git a/asynchronix/build.rs b/asynchronix/build.rs index ce89ca8..fb7492c 100644 --- a/asynchronix/build.rs +++ b/asynchronix/build.rs @@ -1,4 +1,7 @@ fn main() -> Result<(), Box> { + // Prevent warnings when checking for flag `asynchronix_loom`. + println!("cargo::rustc-check-cfg=cfg(asynchronix_loom)"); + #[cfg(feature = "rpc-codegen")] let builder = tonic_build::configure() .build_client(false) diff --git a/asynchronix/src/executor/task/cancel_token.rs b/asynchronix/src/executor/task/cancel_token.rs index 2bc2b13..6d1511f 100644 --- a/asynchronix/src/executor/task/cancel_token.rs +++ b/asynchronix/src/executor/task/cancel_token.rs @@ -25,7 +25,7 @@ struct VTable { /// but not currently scheduled (no `Runnable` exist) then the future is /// dropped immediately. Otherwise, the future will be dropped at a later /// time by the scheduled `Runnable` once it runs. -unsafe fn cancel(ptr: *const ()) +unsafe fn cancel(ptr: *const ()) where F: Future + Send + 'static, F::Output: Send + 'static, @@ -123,7 +123,7 @@ where } /// Drops the token without cancelling the task. -unsafe fn drop(ptr: *const ()) +unsafe fn drop(ptr: *const ()) where F: Future + Send + 'static, F::Output: Send + 'static, @@ -180,7 +180,7 @@ impl CancelToken { /// allocator, /// - the reference count has been incremented to account for this new task /// reference. - pub(super) unsafe fn new_unchecked(task: *const Task) -> Self + pub(super) unsafe fn new_unchecked(task: *const Task) -> Self where F: Future + Send + 'static, F::Output: Send + 'static, diff --git a/asynchronix/src/executor/task/promise.rs b/asynchronix/src/executor/task/promise.rs index 7504d26..47d56f9 100644 --- a/asynchronix/src/executor/task/promise.rs +++ b/asynchronix/src/executor/task/promise.rs @@ -20,7 +20,7 @@ struct VTable { } /// Retrieves the output of the task if ready. -unsafe fn poll(ptr: *const ()) -> Stage +unsafe fn poll(ptr: *const ()) -> Stage where F: Future + Send + 'static, F::Output: Send + 'static, @@ -62,7 +62,7 @@ where } /// Drops the promise. -unsafe fn drop(ptr: *const ()) +unsafe fn drop(ptr: *const ()) where F: Future + Send + 'static, F::Output: Send + 'static, diff --git a/asynchronix/src/executor/task/runnable.rs b/asynchronix/src/executor/task/runnable.rs index 792af3b..d5162c6 100644 --- a/asynchronix/src/executor/task/runnable.rs +++ b/asynchronix/src/executor/task/runnable.rs @@ -22,7 +22,7 @@ struct VTable { } /// Polls the inner future. -unsafe fn run(ptr: *const ()) +unsafe fn run(ptr: *const ()) where F: Future + Send + 'static, F::Output: Send + 'static, From af3d68e76f3612d1a34017d092e139f2c0516b77 Mon Sep 17 00:00:00 2001 From: Serge Barral Date: Tue, 7 May 2024 01:37:47 +0200 Subject: [PATCH 07/12] Force the waker VTable to be uniquely instantiated From Rust 1.78, `Waker::will_wake` tests equality by comparing the VTable pointers rather than the content of the VTable. Unfortunately, this exposes some instability in the code generation which sometimes causes several VTables to be instantiated in memory for the same generic parameters. This can in turn defeat `Waker::will_wake` if e.g. `Waker::clone` and `Waker::wake_by_*` end up with different pointers. The problemt is hopefully addressed by preventing inlining of the VTable generation function. A test has been added to try to detect regression, though the test may not be 100% reliable. --- .github/workflows/loom.yml | 6 ++- asynchronix/src/executor/task.rs | 40 +++++++++++++---- asynchronix/src/executor/task/runnable.rs | 4 +- .../src/executor/task/tests/general.rs | 43 +++++++++++++++++++ 4 files changed, 81 insertions(+), 12 deletions(-) diff --git a/.github/workflows/loom.yml b/.github/workflows/loom.yml index e739386..016e9a0 100644 --- a/.github/workflows/loom.yml +++ b/.github/workflows/loom.yml @@ -10,8 +10,10 @@ on: - 'asynchronix/src/executor/task.rs' - 'asynchronix/src/executor/task/**' - 'asynchronix/src/loom_exports.rs' - - 'asynchronix/src/model/ports/broadcaster.rs' - - 'asynchronix/src/model/ports/broadcaster/**' + - 'asynchronix/src/ports/output/broadcaster.rs' + - 'asynchronix/src/ports/output/broadcaster/**' + - 'asynchronix/src/ports/source/broadcaster.rs' + - 'asynchronix/src/ports/source/broadcaster/**' - 'asynchronix/src/util/slot.rs' - 'asynchronix/src/util/sync_cell.rs' diff --git a/asynchronix/src/executor/task.rs b/asynchronix/src/executor/task.rs index 3b8d1e3..9236de9 100644 --- a/asynchronix/src/executor/task.rs +++ b/asynchronix/src/executor/task.rs @@ -125,13 +125,6 @@ where S: Fn(Runnable, T) + Send + Sync + 'static, T: Clone + Send + Sync + 'static, { - const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new( - Self::clone_waker, - Self::wake_by_val, - Self::wake_by_ref, - Self::drop_waker, - ); - /// Clones a waker. unsafe fn clone_waker(ptr: *const ()) -> RawWaker { let this = &*(ptr as *const Self); @@ -141,7 +134,7 @@ where panic!("Attack of the clones: the waker was cloned too many times"); } - RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE) + RawWaker::new(ptr, raw_waker_vtable::()) } /// Wakes the task by value. @@ -287,6 +280,37 @@ where } } +/// Returns a reference to the waker's virtual table. +/// +/// Unfortunately, Rust will sometimes create multiple memory instances of the +/// virtual table for the same generic parameters, which defeats +/// `Waker::will_wake` as the latter tests the pointers to the virtual tables +/// for equality. +/// +/// Forcing the function to be inlined appears to solve this problem, but we may +/// want to investigate more robust methods. Tokio has [switched][1] to a single +/// non-generic virtual table declared as `static`, which then delegates each +/// call with another virtual call. This does ensure that `Waker::will_wake` +/// will always work, but the double indirection is a bit unfortunate and its +/// cost would need to be evaluated. +/// +/// [1]: https://github.com/tokio-rs/tokio/pull/5213 +#[inline(never)] +fn raw_waker_vtable() -> &'static RawWakerVTable +where + F: Future + Send + 'static, + F::Output: Send + 'static, + S: Fn(Runnable, T) + Send + Sync + 'static, + T: Clone + Send + Sync + 'static, +{ + &RawWakerVTable::new( + Task::::clone_waker, + Task::::wake_by_val, + Task::::wake_by_ref, + Task::::drop_waker, + ) +} + /// Spawns a task. /// /// An arbitrary tag can be attached to the task, a clone of which will be diff --git a/asynchronix/src/executor/task/runnable.rs b/asynchronix/src/executor/task/runnable.rs index d5162c6..59b719a 100644 --- a/asynchronix/src/executor/task/runnable.rs +++ b/asynchronix/src/executor/task/runnable.rs @@ -11,7 +11,7 @@ use crate::loom_exports::debug_or_loom_assert; use crate::loom_exports::sync::atomic::{self, AtomicU64, Ordering}; use super::util::RunOnDrop; -use super::Task; +use super::{raw_waker_vtable, Task}; use super::{CLOSED, POLLING, REF_MASK, WAKE_MASK}; /// Virtual table for a `Runnable`. @@ -77,7 +77,7 @@ where } // Poll the task. - let raw_waker = RawWaker::new(ptr, &Task::::RAW_WAKER_VTABLE); + let raw_waker = RawWaker::new(ptr, raw_waker_vtable::()); let waker = ManuallyDrop::new(Waker::from_raw(raw_waker)); let cx = &mut Context::from_waker(&waker); diff --git a/asynchronix/src/executor/task/tests/general.rs b/asynchronix/src/executor/task/tests/general.rs index beee857..00c42b7 100644 --- a/asynchronix/src/executor/task/tests/general.rs +++ b/asynchronix/src/executor/task/tests/general.rs @@ -136,6 +136,28 @@ impl Drop for MonitoredFuture { } } +// A future that checks whether the waker cloned from the first call to `poll` +// tests equal with `Waker::will_wake` on the second call to `poll`. +struct WillWakeFuture { + waker: Arc>>, +} +impl Future for WillWakeFuture { + type Output = bool; + + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let waker = &mut self.waker.lock().unwrap(); + + match waker.as_ref() { + None => { + **waker = Some(cx.waker().clone()); + + Poll::Pending + } + Some(waker) => Poll::Ready(waker.will_wake(cx.waker())), + } + } +} + #[test] fn task_schedule() { test_prelude!(); @@ -623,3 +645,24 @@ fn task_drop_cycle() { assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3); } + +#[test] +fn task_will_wake() { + test_prelude!(); + + let waker = Arc::new(Mutex::new(None)); + let future = WillWakeFuture { + waker: waker.clone(), + }; + + let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ()); + runnable.run(); + + assert!(promise.poll().is_pending()); + + // Wake the future so it is scheduled another time. + waker.lock().unwrap().as_ref().unwrap().wake_by_ref(); + assert!(run_scheduled_runnable()); + + assert_eq!(promise.poll(), Stage::Ready(true)); +} From 2fa159a87f14dd16cdce39b8e8dd78a674ad733c Mon Sep 17 00:00:00 2001 From: Serge Barral Date: Tue, 7 May 2024 12:44:42 +0200 Subject: [PATCH 08/12] Fix comments --- asynchronix/src/executor/task.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/asynchronix/src/executor/task.rs b/asynchronix/src/executor/task.rs index 9236de9..3b3bd5f 100644 --- a/asynchronix/src/executor/task.rs +++ b/asynchronix/src/executor/task.rs @@ -287,12 +287,12 @@ where /// `Waker::will_wake` as the latter tests the pointers to the virtual tables /// for equality. /// -/// Forcing the function to be inlined appears to solve this problem, but we may -/// want to investigate more robust methods. Tokio has [switched][1] to a single -/// non-generic virtual table declared as `static`, which then delegates each -/// call with another virtual call. This does ensure that `Waker::will_wake` -/// will always work, but the double indirection is a bit unfortunate and its -/// cost would need to be evaluated. +/// Preventing the function from being inlined appears to solve this problem, +/// but we may want to investigate more robust methods. For unrelated reasons, +/// Tokio has switched [1] to a single non-generic virtual table declared as +/// `static` which then delegates each call to another virtual call. This does +/// ensure that `Waker::will_wake` will always work, but the double indirection +/// is a bit unfortunate and its cost would need to be evaluated. /// /// [1]: https://github.com/tokio-rs/tokio/pull/5213 #[inline(never)] From 02eec1b2773b65acaefd71b67d9683a48539badf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ja=C5=ADhien=20Piatlicki?= Date: Mon, 6 May 2024 15:31:50 +0200 Subject: [PATCH 09/12] Add test and improve example --- .github/workflows/loom.yml | 3 +- asynchronix/src/loom_exports.rs | 5 +- asynchronix/src/ports.rs | 46 +++++++--- asynchronix/src/util/cached_rw_lock.rs | 117 +++++++++++++++++++------ 4 files changed, 129 insertions(+), 42 deletions(-) diff --git a/.github/workflows/loom.yml b/.github/workflows/loom.yml index 016e9a0..23a7106 100644 --- a/.github/workflows/loom.yml +++ b/.github/workflows/loom.yml @@ -14,6 +14,7 @@ on: - 'asynchronix/src/ports/output/broadcaster/**' - 'asynchronix/src/ports/source/broadcaster.rs' - 'asynchronix/src/ports/source/broadcaster/**' + - 'asynchronix/src/util/cached_rw_lock.rs' - 'asynchronix/src/util/slot.rs' - 'asynchronix/src/util/sync_cell.rs' @@ -31,4 +32,4 @@ jobs: - name: Run cargo test (Loom) run: cargo test --tests --release env: - RUSTFLAGS: --cfg asynchronix_loom \ No newline at end of file + RUSTFLAGS: --cfg asynchronix_loom diff --git a/asynchronix/src/loom_exports.rs b/asynchronix/src/loom_exports.rs index d340569..df03e12 100644 --- a/asynchronix/src/loom_exports.rs +++ b/asynchronix/src/loom_exports.rs @@ -1,7 +1,8 @@ #[cfg(asynchronix_loom)] #[allow(unused_imports)] pub(crate) mod sync { - pub(crate) use loom::sync::{Arc, Mutex}; + pub(crate) use loom::sync::{Arc, LockResult, Mutex, MutexGuard}; + pub(crate) use std::sync::PoisonError; pub(crate) mod atomic { pub(crate) use loom::sync::atomic::{ @@ -12,7 +13,7 @@ pub(crate) mod sync { #[cfg(not(asynchronix_loom))] #[allow(unused_imports)] pub(crate) mod sync { - pub(crate) use std::sync::{Arc, Mutex}; + pub(crate) use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; pub(crate) mod atomic { pub(crate) use std::sync::atomic::{ diff --git a/asynchronix/src/ports.rs b/asynchronix/src/ports.rs index c764302..60c6821 100644 --- a/asynchronix/src/ports.rs +++ b/asynchronix/src/ports.rs @@ -19,33 +19,55 @@ //! //! #### Example //! -//! The outputs in this example are clones of each other and remain therefore -//! always connected to the same inputs. For an example usage of outputs cloning -//! in submodels assemblies, see the [`assembly example`][assembly]. +//! This example demonstrates a submodel inside a parent model. The output of +//! the submodel is a clone of the parent model output. Both outputs remain +//! therefore always connected to the same inputs. +//! +//! For a more comprehensive example demonstrating output cloning in submodels +//! assemblies, see the [`assembly example`][assembly]. //! //! [assembly]: //! https://github.com/asynchronics/asynchronix/tree/main/asynchronix/examples/assembly.rs //! //! ``` -//! use asynchronix::model::Model; +//! use asynchronix::model::{Model, SetupContext}; //! use asynchronix::ports::Output; +//! use asynchronix::simulation::Mailbox; //! -//! pub struct MyModel { -//! pub output_a: Output, -//! pub output_b: Output, +//! pub struct ChildModel { +//! pub output: Output, //! } //! -//! impl MyModel { +//! impl ChildModel { //! pub fn new() -> Self { -//! let output: Output<_> = Default::default(); //! Self { -//! output_a: output.clone(), -//! output_b: output, +//! output: Default::default(), //! } //! } //! } //! -//! impl Model for MyModel {} +//! impl Model for ChildModel {} +//! +//! pub struct ParentModel { +//! pub output: Output, +//! } +//! +//! impl ParentModel { +//! pub fn new() -> Self { +//! Self { +//! output: Default::default(), +//! } +//! } +//! } +//! +//! impl Model for ParentModel { +//! fn setup(&mut self, setup_context: &SetupContext) { +//! let mut child = ChildModel::new(); +//! let child_mbox = Mailbox::new(); +//! child.output = self.output.clone(); +//! setup_context.add_model(child, child_mbox); +//! } +//! } //! ``` mod input; diff --git a/asynchronix/src/util/cached_rw_lock.rs b/asynchronix/src/util/cached_rw_lock.rs index eba8bf1..d2a9125 100644 --- a/asynchronix/src/util/cached_rw_lock.rs +++ b/asynchronix/src/util/cached_rw_lock.rs @@ -1,6 +1,7 @@ use std::ops::{Deref, DerefMut}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; + +use crate::loom_exports::sync::atomic::{AtomicUsize, Ordering}; +use crate::loom_exports::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; /// A cached read-write lock. /// @@ -13,10 +14,9 @@ use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError}; /// synchronization. #[derive(Clone)] pub(crate) struct CachedRwLock { - local: T, - local_epoch: usize, - shared: Arc>, - epoch: Arc, + value: T, + epoch: usize, + shared: Arc>, } impl CachedRwLock { @@ -24,61 +24,63 @@ impl CachedRwLock { pub(crate) fn new(t: T) -> Self { let shared = t.clone(); Self { - local: t, - local_epoch: 0, - shared: Arc::new(Mutex::new(shared)), - epoch: Arc::new(AtomicUsize::new(0)), + value: t, + epoch: 0, + shared: Arc::new(Shared { + value: Mutex::new(shared), + epoch: AtomicUsize::new(0), + }), } } /// Gives access to the local cache without synchronization. pub(crate) fn read_unsync(&self) -> &T { - &self.local + &self.value } /// Synchronizes the local cache if it is behind the shared data and gives /// access to it. #[allow(dead_code)] pub(crate) fn read(&mut self) -> LockResult<&T> { - if self.epoch.load(Ordering::Relaxed) != self.local_epoch { - match self.shared.lock() { + if self.shared.epoch.load(Ordering::Relaxed) != self.epoch { + match self.shared.value.lock() { LockResult::Ok(shared) => { - self.local = shared.clone(); - self.local_epoch = self.epoch.load(Ordering::Relaxed) + self.value = shared.clone(); + self.epoch = self.shared.epoch.load(Ordering::Relaxed) } - LockResult::Err(_) => return LockResult::Err(PoisonError::new(&self.local)), + LockResult::Err(_) => return LockResult::Err(PoisonError::new(&self.value)), } } - LockResult::Ok(&self.local) + LockResult::Ok(&self.value) } /// Gives write access to the local cache without synchronization so it can /// be used as a scratchpad. #[allow(dead_code)] pub(crate) fn write_scratchpad_unsync(&mut self) -> &mut T { - &mut self.local + &mut self.value } /// Synchronizes the local cache if it is behind the shared data and gives /// write access to it so it can be used as a scratchpad. pub(crate) fn write_scratchpad(&mut self) -> LockResult<&mut T> { - if self.epoch.load(Ordering::Relaxed) != self.local_epoch { - match self.shared.lock() { + if self.shared.epoch.load(Ordering::Relaxed) != self.epoch { + match self.shared.value.lock() { LockResult::Ok(shared) => { - self.local = shared.clone(); - self.local_epoch = self.epoch.load(Ordering::Relaxed) + self.value = shared.clone(); + self.epoch = self.shared.epoch.load(Ordering::Relaxed) } - LockResult::Err(_) => return LockResult::Err(PoisonError::new(&mut self.local)), + LockResult::Err(_) => return LockResult::Err(PoisonError::new(&mut self.value)), } } - LockResult::Ok(&mut self.local) + LockResult::Ok(&mut self.value) } /// Acquires a write lock on the shared data. pub(crate) fn write(&mut self) -> LockResult> { - let guard = self.shared.lock(); - let epoch = self.epoch.load(Ordering::Relaxed) + 1; - self.epoch.store(epoch, Ordering::Relaxed); + let guard = self.shared.value.lock(); + let epoch = self.shared.epoch.load(Ordering::Relaxed) + 1; + self.shared.epoch.store(epoch, Ordering::Relaxed); match guard { LockResult::Ok(shared) => LockResult::Ok(CachedRwLockWriteGuard { guard: shared }), @@ -89,6 +91,11 @@ impl CachedRwLock { } } +struct Shared { + epoch: AtomicUsize, + value: Mutex, +} + /// Write guard. /// /// The lock is released when the guard is dropped. @@ -109,3 +116,59 @@ impl DerefMut for CachedRwLockWriteGuard<'_, T> { &mut self.guard } } + +#[cfg(all(test, asynchronix_loom))] +mod tests { + use super::*; + + use loom::model::Builder; + use loom::thread; + + #[test] + fn loom_cached_rw_lock_write() { + const DEFAULT_PREEMPTION_BOUND: usize = 4; + const ITERATIONS_NUMBER: usize = 5; + + let mut builder = Builder::new(); + if builder.preemption_bound.is_none() { + builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND); + } + + builder.check(move || { + let mut writer0: CachedRwLock = CachedRwLock::new(0); + let mut writer1 = writer0.clone(); + let mut reader = writer0.clone(); + + let th_w = thread::spawn(move || { + for _ in 0..ITERATIONS_NUMBER { + let mut guard = writer0.write().unwrap(); + *guard = *guard + 1; + } + }); + + let th_r = thread::spawn(move || { + let mut value = 0; + let mut prev_value; + for _ in 0..ITERATIONS_NUMBER { + prev_value = value; + value = *reader.write_scratchpad().unwrap(); + assert!( + prev_value <= value, + "Previous value = {}, value = {}", + prev_value, + value + ); + assert_eq!(value, reader.epoch); + } + }); + + for _ in 0..ITERATIONS_NUMBER { + let mut guard = writer1.write().unwrap(); + *guard = *guard + 1; + } + + th_w.join().unwrap(); + th_r.join().unwrap(); + }); + } +} From 195bcdceba30c41d50150af6c90b6ab4dc9d26d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ja=C5=ADhien=20Piatlicki?= Date: Fri, 10 May 2024 10:54:45 +0200 Subject: [PATCH 10/12] Add instance name to model contexts --- asynchronix/examples/assembly.rs | 8 +++-- asynchronix/examples/espresso_machine.rs | 6 ++-- asynchronix/examples/power_supply.rs | 8 ++--- asynchronix/examples/stepper_motor.rs | 35 ++++++++++++++++++--- asynchronix/src/lib.rs | 16 +++++----- asynchronix/src/model/context.rs | 30 ++++++++++++++++-- asynchronix/src/ports.rs | 3 +- asynchronix/src/ports/output/broadcaster.rs | 16 +++++++--- asynchronix/src/ports/source/broadcaster.rs | 16 +++++++--- asynchronix/src/simulation.rs | 3 +- asynchronix/src/simulation/sim_init.rs | 20 ++++++++++-- asynchronix/tests/model_scheduling.rs | 10 +++--- asynchronix/tests/simulation_scheduling.rs | 4 +-- 13 files changed, 131 insertions(+), 44 deletions(-) diff --git a/asynchronix/examples/assembly.rs b/asynchronix/examples/assembly.rs index e550656..8a88a6c 100644 --- a/asynchronix/examples/assembly.rs +++ b/asynchronix/examples/assembly.rs @@ -79,8 +79,8 @@ impl Model for MotorAssembly { // clones. motor.position = self.position.clone(); - setup_context.add_model(driver, driver_mbox); - setup_context.add_model(motor, motor_mbox); + setup_context.add_model(driver, driver_mbox, "driver"); + setup_context.add_model(motor, motor_mbox, "motor"); } } @@ -105,7 +105,9 @@ fn main() { let t0 = MonotonicTime::EPOCH; // Assembly and initialization. - let mut simu = SimInit::new().add_model(assembly, assembly_mbox).init(t0); + let mut simu = SimInit::new() + .add_model(assembly, assembly_mbox, "assembly") + .init(t0); // ---------- // Simulation. diff --git a/asynchronix/examples/espresso_machine.rs b/asynchronix/examples/espresso_machine.rs index bd6b1a6..fcab7c3 100644 --- a/asynchronix/examples/espresso_machine.rs +++ b/asynchronix/examples/espresso_machine.rs @@ -368,9 +368,9 @@ fn main() { // Assembly and initialization. let mut simu = SimInit::new() - .add_model(controller, controller_mbox) - .add_model(pump, pump_mbox) - .add_model(tank, tank_mbox) + .add_model(controller, controller_mbox, "controller") + .add_model(pump, pump_mbox, "pump") + .add_model(tank, tank_mbox, "tank") .init(t0); // ---------- diff --git a/asynchronix/examples/power_supply.rs b/asynchronix/examples/power_supply.rs index bce7b9c..ab694bf 100644 --- a/asynchronix/examples/power_supply.rs +++ b/asynchronix/examples/power_supply.rs @@ -140,10 +140,10 @@ fn main() { // Assembly and initialization. let mut simu = SimInit::new() - .add_model(psu, psu_mbox) - .add_model(load1, load1_mbox) - .add_model(load2, load2_mbox) - .add_model(load3, load3_mbox) + .add_model(psu, psu_mbox, "psu") + .add_model(load1, load1_mbox, "load1") + .add_model(load2, load2_mbox, "load2") + .add_model(load3, load3_mbox, "load3") .init(t0); // ---------- diff --git a/asynchronix/examples/stepper_motor.rs b/asynchronix/examples/stepper_motor.rs index c733af5..5b2130a 100644 --- a/asynchronix/examples/stepper_motor.rs +++ b/asynchronix/examples/stepper_motor.rs @@ -53,8 +53,15 @@ impl Motor { /// For the sake of simplicity, we do as if the rotor rotates /// instantaneously. If the current is too weak to overcome the load or when /// attempting to move to an opposite phase, the position remains unchanged. - pub async fn current_in(&mut self, current: (f64, f64)) { + pub async fn current_in(&mut self, current: (f64, f64), context: &Context) { assert!(!current.0.is_nan() && !current.1.is_nan()); + println!( + "Model instance {} at time {}: setting currents: {:.2} and {:.2}", + context.name(), + context.time(), + current.0, + current.1 + ); let (target_phase, abs_current) = match (current.0 != 0.0, current.1 != 0.0) { (false, false) => return, @@ -78,9 +85,16 @@ impl Motor { } /// Torque applied by the load [N·m] -- input port. - pub fn load(&mut self, torque: f64) { + pub fn load(&mut self, torque: f64, context: &Context) { assert!(torque >= 0.0); + println!( + "Model instance {} at time {}: setting load: {:.2}", + context.name(), + context.time(), + torque + ); + self.torque = torque; } } @@ -124,6 +138,13 @@ impl Driver { /// Sets the pulse rate (sign = direction) [Hz] -- input port. pub async fn pulse_rate(&mut self, pps: f64, context: &Context) { + println!( + "Model instance {} at time {}: setting pps: {:.2}", + context.name(), + context.time(), + pps + ); + let pps = pps.signum() * pps.abs().clamp(Self::MIN_PPS, Self::MAX_PPS); if pps == self.pps { return; @@ -148,6 +169,12 @@ impl Driver { _: (), context: &'a Context, ) -> impl Future + Send + 'a { + println!( + "Model instance {} at time {}: sending pulse", + context.name(), + context.time() + ); + async move { let current_out = match self.next_phase { 0 => (self.current, 0.0), @@ -205,8 +232,8 @@ fn main() { // Assembly and initialization. let mut simu = SimInit::new() - .add_model(driver, driver_mbox) - .add_model(motor, motor_mbox) + .add_model(driver, driver_mbox, "driver") + .add_model(motor, motor_mbox, "motor") .init(t0); // ---------- diff --git a/asynchronix/src/lib.rs b/asynchronix/src/lib.rs index c9c7c76..b8a2954 100644 --- a/asynchronix/src/lib.rs +++ b/asynchronix/src/lib.rs @@ -231,10 +231,10 @@ //! // Pick an arbitrary simulation start time and build the simulation. //! let t0 = MonotonicTime::EPOCH; //! let mut simu = SimInit::new() -//! .add_model(multiplier1, multiplier1_mbox) -//! .add_model(multiplier2, multiplier2_mbox) -//! .add_model(delay1, delay1_mbox) -//! .add_model(delay2, delay2_mbox) +//! .add_model(multiplier1, multiplier1_mbox, "multiplier1") +//! .add_model(multiplier2, multiplier2_mbox, "multiplier2") +//! .add_model(delay1, delay1_mbox, "delay1") +//! .add_model(delay2, delay2_mbox, "delay2") //! .init(t0); //! ``` //! @@ -319,10 +319,10 @@ //! # let input_address = multiplier1_mbox.address(); //! # let t0 = MonotonicTime::EPOCH; //! # let mut simu = SimInit::new() -//! # .add_model(multiplier1, multiplier1_mbox) -//! # .add_model(multiplier2, multiplier2_mbox) -//! # .add_model(delay1, delay1_mbox) -//! # .add_model(delay2, delay2_mbox) +//! # .add_model(multiplier1, multiplier1_mbox, "multiplier1") +//! # .add_model(multiplier2, multiplier2_mbox, "multiplier2") +//! # .add_model(delay1, delay1_mbox, "delay1") +//! # .add_model(delay2, delay2_mbox, "delay2") //! # .init(t0); //! // Send a value to the first multiplier. //! simu.process_event(Multiplier::input, 21.0, &input_address); diff --git a/asynchronix/src/model/context.rs b/asynchronix/src/model/context.rs index 24e0c94..2336196 100644 --- a/asynchronix/src/model/context.rs +++ b/asynchronix/src/model/context.rs @@ -81,6 +81,7 @@ use super::Model; // The self-scheduling caveat seems related to this issue: // https://github.com/rust-lang/rust/issues/78649 pub struct Context { + name: String, sender: Sender, scheduler_queue: Arc>, time: SyncCellReader, @@ -89,17 +90,24 @@ pub struct Context { impl Context { /// Creates a new local context. pub(crate) fn new( + name: String, sender: Sender, scheduler_queue: Arc>, time: SyncCellReader, ) -> Self { Self { + name, sender, scheduler_queue, time, } } + /// Returns the model instance name. + pub fn name(&self) -> &str { + &self.name + } + /// Returns the current simulation time. /// /// # Examples @@ -440,11 +448,13 @@ impl fmt::Debug for Context { /// let b = SubmodelB::default(); /// let a_mbox = Mailbox::new(); /// let b_mbox = Mailbox::new(); +/// let a_name = setup_context.name().to_string() + "::a"; +/// let b_name = setup_context.name().to_string() + "::b"; /// /// a.out.connect(SubmodelB::input, &b_mbox); /// -/// setup_context.add_model(a, a_mbox); -/// setup_context.add_model(b, b_mbox); +/// setup_context.add_model(a, a_mbox, a_name); +/// setup_context.add_model(b, b_mbox, b_name); /// } /// } /// @@ -472,11 +482,25 @@ impl<'a, M: Model> SetupContext<'a, M> { } } + /// Returns the model instance name. + pub fn name(&self) -> &str { + &self.context.name + } + /// Adds a new model and its mailbox to the simulation bench. - pub fn add_model(&self, model: N, mailbox: Mailbox) { + /// + /// The `name` argument needs not be unique (it can be an empty string) and + /// is used for convenience for model instance identification (e.g. for + /// logging purposes). + pub fn add_model(&self, model: N, mailbox: Mailbox, name: impl Into) { + let mut submodel_name = name.into(); + if !self.context.name().is_empty() && !submodel_name.is_empty() { + submodel_name = self.context.name().to_string() + "." + &submodel_name; + } simulation::add_model( model, mailbox, + submodel_name, self.context.scheduler_queue.clone(), self.context.time.clone(), self.executor, diff --git a/asynchronix/src/ports.rs b/asynchronix/src/ports.rs index 60c6821..17362ec 100644 --- a/asynchronix/src/ports.rs +++ b/asynchronix/src/ports.rs @@ -65,7 +65,8 @@ //! let mut child = ChildModel::new(); //! let child_mbox = Mailbox::new(); //! child.output = self.output.clone(); -//! setup_context.add_model(child, child_mbox); +//! let child_name = setup_context.name().to_string() + "::child"; +//! setup_context.add_model(child, child_mbox, child_name); //! } //! } //! ``` diff --git a/asynchronix/src/ports/output/broadcaster.rs b/asynchronix/src/ports/output/broadcaster.rs index cb39f52..b960bf8 100644 --- a/asynchronix/src/ports/output/broadcaster.rs +++ b/asynchronix/src/ports/output/broadcaster.rs @@ -614,8 +614,12 @@ mod tests { let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); - let dummy_context = - Context::new(dummy_address, dummy_priority_queue, dummy_time); + let dummy_context = Context::new( + String::new(), + dummy_address, + dummy_priority_queue, + dummy_time, + ); block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap(); } }) @@ -665,8 +669,12 @@ mod tests { let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); - let dummy_context = - Context::new(dummy_address, dummy_priority_queue, dummy_time); + let dummy_context = Context::new( + String::new(), + dummy_address, + dummy_priority_queue, + dummy_time, + ); block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap(); thread::sleep(std::time::Duration::from_millis(100)); } diff --git a/asynchronix/src/ports/source/broadcaster.rs b/asynchronix/src/ports/source/broadcaster.rs index cff1d50..95a07aa 100644 --- a/asynchronix/src/ports/source/broadcaster.rs +++ b/asynchronix/src/ports/source/broadcaster.rs @@ -497,8 +497,12 @@ mod tests { let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); - let dummy_context = - Context::new(dummy_address, dummy_priority_queue, dummy_time); + let dummy_context = Context::new( + String::new(), + dummy_address, + dummy_priority_queue, + dummy_time, + ); block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap(); } }) @@ -548,8 +552,12 @@ mod tests { let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new())); let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader(); - let dummy_context = - Context::new(dummy_address, dummy_priority_queue, dummy_time); + let dummy_context = Context::new( + String::new(), + dummy_address, + dummy_priority_queue, + dummy_time, + ); block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap(); thread::sleep(std::time::Duration::from_millis(100)); } diff --git a/asynchronix/src/simulation.rs b/asynchronix/src/simulation.rs index 1d8d5f4..6be841e 100644 --- a/asynchronix/src/simulation.rs +++ b/asynchronix/src/simulation.rs @@ -666,13 +666,14 @@ impl Error for QueryError {} pub(crate) fn add_model( mut model: M, mailbox: Mailbox, + name: String, scheduler_queue: Arc>, time: SyncCellReader, executor: &Executor, ) { let sender = mailbox.0.sender(); - let context = Context::new(sender, scheduler_queue, time); + let context = Context::new(name, sender, scheduler_queue, time); let setup_context = SetupContext::new(&mailbox, &context, executor); model.setup(&setup_context); diff --git a/asynchronix/src/simulation/sim_init.rs b/asynchronix/src/simulation/sim_init.rs index ae22589..a8527ca 100644 --- a/asynchronix/src/simulation/sim_init.rs +++ b/asynchronix/src/simulation/sim_init.rs @@ -41,11 +41,27 @@ impl SimInit { } /// Adds a model and its mailbox to the simulation bench. - pub fn add_model(self, model: M, mailbox: Mailbox) -> Self { + /// + /// The `name` argument needs not be unique (it can be the empty string) and + /// is used for convenience for the model instance identification (e.g. for + /// logging purposes). + pub fn add_model( + self, + model: M, + mailbox: Mailbox, + name: impl Into, + ) -> Self { let scheduler_queue = self.scheduler_queue.clone(); let time = self.time.reader(); - add_model(model, mailbox, scheduler_queue, time, &self.executor); + add_model( + model, + mailbox, + name.into(), + scheduler_queue, + time, + &self.executor, + ); self } diff --git a/asynchronix/tests/model_scheduling.rs b/asynchronix/tests/model_scheduling.rs index 3f4afce..2a96408 100644 --- a/asynchronix/tests/model_scheduling.rs +++ b/asynchronix/tests/model_scheduling.rs @@ -33,7 +33,7 @@ fn model_schedule_event() { let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; - let mut simu = SimInit::new().add_model(model, mbox).init(t0); + let mut simu = SimInit::new().add_model(model, mbox, "").init(t0); simu.process_event(TestModel::trigger, (), addr); simu.step(); @@ -78,7 +78,7 @@ fn model_cancel_future_keyed_event() { let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; - let mut simu = SimInit::new().add_model(model, mbox).init(t0); + let mut simu = SimInit::new().add_model(model, mbox, "").init(t0); simu.process_event(TestModel::trigger, (), addr); simu.step(); @@ -124,7 +124,7 @@ fn model_cancel_same_time_keyed_event() { let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; - let mut simu = SimInit::new().add_model(model, mbox).init(t0); + let mut simu = SimInit::new().add_model(model, mbox, "").init(t0); simu.process_event(TestModel::trigger, (), addr); simu.step(); @@ -166,7 +166,7 @@ fn model_schedule_periodic_event() { let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; - let mut simu = SimInit::new().add_model(model, mbox).init(t0); + let mut simu = SimInit::new().add_model(model, mbox, "").init(t0); simu.process_event(TestModel::trigger, (), addr); @@ -216,7 +216,7 @@ fn model_cancel_periodic_event() { let addr = mbox.address(); let t0 = MonotonicTime::EPOCH; - let mut simu = SimInit::new().add_model(model, mbox).init(t0); + let mut simu = SimInit::new().add_model(model, mbox, "").init(t0); simu.process_event(TestModel::trigger, (), addr); diff --git a/asynchronix/tests/simulation_scheduling.rs b/asynchronix/tests/simulation_scheduling.rs index 6919091..3076931 100644 --- a/asynchronix/tests/simulation_scheduling.rs +++ b/asynchronix/tests/simulation_scheduling.rs @@ -38,7 +38,7 @@ fn passthrough_bench( model.output.connect_sink(&out_stream); let addr = mbox.address(); - let simu = SimInit::new().add_model(model, mbox).init(t0); + let simu = SimInit::new().add_model(model, mbox, "").init(t0); (simu, addr, out_stream) } @@ -246,7 +246,7 @@ fn timestamp_bench( let addr = mbox.address(); let simu = SimInit::new() - .add_model(model, mbox) + .add_model(model, mbox, "") .set_clock(clock) .init(t0); From 77e6e569ffbc4c016d180d44f567111c53e8edaa Mon Sep 17 00:00:00 2001 From: Serge Barral Date: Tue, 7 May 2024 17:30:11 +0200 Subject: [PATCH 11/12] Add same-thread executor support --- .github/workflows/ci.yml | 42 +- asynchronix/src/dev_hooks.rs | 2 +- asynchronix/src/executor.rs | 678 +++++------------- asynchronix/src/executor/mt_executor.rs | 576 +++++++++++++++ .../executor/{ => mt_executor}/injector.rs | 0 .../{ => mt_executor}/pool_manager.rs | 0 asynchronix/src/executor/st_executor.rs | 244 +++++++ asynchronix/src/executor/tests.rs | 140 ---- asynchronix/src/executor/worker.rs | 25 - asynchronix/src/macros/scoped_thread_local.rs | 32 +- asynchronix/src/rpc/generic_server.rs | 4 +- asynchronix/src/simulation/sim_init.rs | 19 +- 12 files changed, 1066 insertions(+), 696 deletions(-) create mode 100644 asynchronix/src/executor/mt_executor.rs rename asynchronix/src/executor/{ => mt_executor}/injector.rs (100%) rename asynchronix/src/executor/{ => mt_executor}/pool_manager.rs (100%) create mode 100644 asynchronix/src/executor/st_executor.rs delete mode 100644 asynchronix/src/executor/tests.rs delete mode 100644 asynchronix/src/executor/worker.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b0b9912..1821e5a 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -3,7 +3,7 @@ name: CI on: pull_request: push: - branches: [ main, dev ] + branches: [main, dev] env: RUSTFLAGS: -Dwarnings @@ -42,7 +42,7 @@ jobs: - name: Run cargo test run: cargo test --features="rpc grpc-server" - + loom-dry-run: name: Loom dry run runs-on: ubuntu-latest @@ -70,23 +70,53 @@ jobs: with: components: miri - - name: Run cargo miri tests + - name: Run cargo miri tests (single-threaded executor) + run: cargo miri test --tests --lib --features="rpc grpc-server" + env: + MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1 + + - name: Run cargo miri tests (multi-threaded executor) run: cargo miri test --tests --lib --features="rpc grpc-server" env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4 - - name: Run cargo miri example1 + - name: Run cargo miri example1 (single-threaded executor) + run: cargo miri run --example espresso_machine + env: + MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1 + + - name: Run cargo miri example1 (multi-threaded executor) run: cargo miri run --example espresso_machine env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4 - - name: Run cargo miri example2 + - name: Run cargo miri example2 (single-threaded executor) + run: cargo miri run --example power_supply + env: + MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1 + + - name: Run cargo miri example2 (multi-threaded executor) run: cargo miri run --example power_supply env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4 - - name: Run cargo miri example3 + - name: Run cargo miri example3 (single-threaded executor) run: cargo miri run --example stepper_motor + env: + MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1 + + - name: Run cargo miri example3 (multi-threaded executor) + run: cargo miri run --example stepper_motor + env: + MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4 + + - name: Run cargo miri example4 (single-threaded executor) + run: cargo miri run --example assembly + env: + MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1 + + - name: Run cargo miri example4 (multi-threaded executor) + run: cargo miri run --example assembly env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4 diff --git a/asynchronix/src/dev_hooks.rs b/asynchronix/src/dev_hooks.rs index 96d948c..f79102f 100644 --- a/asynchronix/src/dev_hooks.rs +++ b/asynchronix/src/dev_hooks.rs @@ -15,7 +15,7 @@ impl Executor { /// /// The maximum number of threads is set with the `pool_size` parameter. pub fn new(pool_size: usize) -> Self { - Self(executor::Executor::new(pool_size)) + Self(executor::Executor::new_multi_threaded(pool_size)) } /// Spawns a task which output will never be retrieved. diff --git a/asynchronix/src/executor.rs b/asynchronix/src/executor.rs index b33a603..3d5a8de 100644 --- a/asynchronix/src/executor.rs +++ b/asynchronix/src/executor.rs @@ -1,98 +1,30 @@ -//! Multi-threaded `async` executor. -//! -//! The executor is exclusively designed for message-passing computational -//! tasks. As such, it does not include an I/O reactor and does not consider -//! fairness as a goal in itself. While it does use fair local queues inasmuch -//! as these tend to perform better in message-passing applications, it uses an -//! unfair injection queue and a LIFO slot without attempt to mitigate the -//! effect of badly behaving code (e.g. futures that spin-lock by yielding to -//! the executor; there is for this reason no support for something like tokio's -//! `yield_now`). -//! -//! Another way in which it differs from other `async` executors is that it -//! treats deadlocking as a normal occurrence. This is because in a -//! discrete-time simulator, the simulation of a system at a given time step -//! will make as much progress as possible until it technically reaches a -//! deadlock. Only then does the simulator advance the simulated time to that of -//! the next "event" extracted from a time-sorted priority queue. -//! -//! The design of the executor is largely influenced by the tokio and Go -//! schedulers, both of which are optimized for message-passing applications. In -//! particular, it uses fast, fixed-size thread-local work-stealing queues with -//! a non-stealable LIFO slot in combination with an injector queue, which -//! injector queue is used both to schedule new tasks and to absorb temporary -//! overflow in the local queues. -//! -//! The design of the injector queue is kept very simple compared to tokio, by -//! taking advantage of the fact that the injector is not required to be either -//! LIFO or FIFO. Moving tasks between a local queue and the injector is fast -//! because tasks are moved in batch and are stored contiguously in memory. -//! -//! Another difference with tokio is that, at the moment, the complete subset of -//! active worker threads is stored in a single atomic variable. This makes it -//! possible to rapidly identify free worker threads for stealing operations, -//! with the downside that the maximum number of worker threads is currently -//! limited to `usize::BITS`. This is not expected to constitute a limitation in -//! practice since system simulation is not typically embarrassingly parallel. -//! -//! Probably the largest difference with tokio is the task system, which has -//! better throughput due to less need for synchronization. This mainly results -//! from the use of an atomic notification counter rather than an atomic -//! notification flag, thus alleviating the need to reset the notification flag -//! before polling a future. +//! `async` executor trait. -use std::fmt; -use std::future::Future; -use std::panic::{self, AssertUnwindSafe}; -use std::sync::atomic::{AtomicUsize, Ordering}; -use std::sync::{Arc, Mutex}; -use std::thread::{self, JoinHandle}; -use std::time::{Duration, Instant}; - -use crossbeam_utils::sync::{Parker, Unparker}; -use slab::Slab; - -mod injector; -mod pool_manager; +mod mt_executor; +mod st_executor; mod task; -mod worker; -#[cfg(all(test, not(asynchronix_loom)))] -mod tests; +use std::future::Future; +use std::sync::atomic::AtomicUsize; -use crate::macros::scoped_thread_local::scoped_thread_local; -use crate::util::rng::Rng; - -use self::pool_manager::PoolManager; -use self::task::{CancelToken, Promise, Runnable}; -use self::worker::Worker; - -const BUCKET_SIZE: usize = 128; -const QUEUE_SIZE: usize = BUCKET_SIZE * 2; +use task::Promise; +/// Unique identifier for executor instances. static NEXT_EXECUTOR_ID: AtomicUsize = AtomicUsize::new(0); -type Bucket = injector::Bucket; -type Injector = injector::Injector; -type LocalQueue = st3::fifo::Worker; -type Stealer = st3::fifo::Stealer; - -scoped_thread_local!(static LOCAL_WORKER: Worker); -scoped_thread_local!(static ACTIVE_TASKS: Mutex>); - -/// A multi-threaded `async` executor. -pub(crate) struct Executor { - /// Shared executor data. - context: Arc, - /// List of tasks that have not completed yet. - active_tasks: Arc>>, - /// Parker for the main executor thread. - parker: Parker, - /// Handles to the worker threads. - worker_handles: Vec>, +/// A single-threaded or multi-threaded `async` executor. +#[derive(Debug)] +pub(crate) enum Executor { + StExecutor(st_executor::Executor), + MtExecutor(mt_executor::Executor), } impl Executor { + /// Creates an executor that runs futures on the current thread. + pub(crate) fn new_single_threaded() -> Self { + Self::StExecutor(st_executor::Executor::new()) + } + /// Creates an executor that runs futures on a thread pool. /// /// The maximum number of threads is set with the `num_threads` parameter. @@ -101,78 +33,11 @@ impl Executor { /// /// This will panic if the specified number of threads is zero or is more /// than `usize::BITS`. - pub(crate) fn new(num_threads: usize) -> Self { - let parker = Parker::new(); - let unparker = parker.unparker().clone(); - - let (local_queues_and_parkers, stealers_and_unparkers): (Vec<_>, Vec<_>) = (0..num_threads) - .map(|_| { - let parker = Parker::new(); - let unparker = parker.unparker().clone(); - let local_queue = LocalQueue::new(QUEUE_SIZE); - let stealer = local_queue.stealer(); - - ((local_queue, parker), (stealer, unparker)) - }) - .unzip(); - - // Each executor instance has a unique ID inherited by tasks to ensure - // that tasks are scheduled on their parent executor. - let executor_id = NEXT_EXECUTOR_ID.fetch_add(1, Ordering::Relaxed); - assert!( - executor_id <= usize::MAX / 2, - "too many executors have been instantiated" - ); - - let context = Arc::new(ExecutorContext::new( - executor_id, - unparker, - stealers_and_unparkers.into_iter(), - )); - let active_tasks = Arc::new(Mutex::new(Slab::new())); - - // All workers must be marked as active _before_ spawning the threads to - // make sure that the count of active workers does not fall to zero - // before all workers are blocked on the signal barrier. - context.pool_manager.set_all_workers_active(); - - // Spawn all worker threads. - let worker_handles: Vec<_> = local_queues_and_parkers - .into_iter() - .enumerate() - .map(|(id, (local_queue, worker_parker))| { - let thread_builder = thread::Builder::new().name(format!("Worker #{}", id)); - - thread_builder - .spawn({ - let context = context.clone(); - let active_tasks = active_tasks.clone(); - move || { - let worker = Worker::new(local_queue, context); - ACTIVE_TASKS.set(&active_tasks, || { - LOCAL_WORKER - .set(&worker, || run_local_worker(&worker, id, worker_parker)) - }); - } - }) - .unwrap() - }) - .collect(); - - // Wait until all workers are blocked on the signal barrier. - parker.park(); - assert!(context.pool_manager.pool_is_idle()); - - Self { - context, - active_tasks, - parker, - worker_handles, - } + pub(crate) fn new_multi_threaded(num_threads: usize) -> Self { + Self::MtExecutor(mt_executor::Executor::new(num_threads)) } - /// Spawns a task and returns a promise that can be polled to retrieve the - /// task's output. + /// Spawns a task which output will never be retrieved. /// /// Note that spawned tasks are not executed until [`run()`](Executor::run) /// is called. @@ -182,28 +47,14 @@ impl Executor { T: Future + Send + 'static, T::Output: Send + 'static, { - // Book a slot to store the task cancellation token. - let mut active_tasks = self.active_tasks.lock().unwrap(); - let task_entry = active_tasks.vacant_entry(); - - // Wrap the future so that it removes its cancel token from the - // executor's list when dropped. - let future = CancellableFuture::new(future, task_entry.key()); - - let (promise, runnable, cancel_token) = - task::spawn(future, schedule_task, self.context.executor_id); - - task_entry.insert(cancel_token); - self.context.injector.insert_task(runnable); - - promise + match self { + Self::StExecutor(executor) => executor.spawn(future), + Self::MtExecutor(executor) => executor.spawn(future), + } } /// Spawns a task which output will never be retrieved. /// - /// This is mostly useful to avoid undue reference counting for futures that - /// return a `()` type. - /// /// Note that spawned tasks are not executed until [`run()`](Executor::run) /// is called. pub(crate) fn spawn_and_forget(&self, future: T) @@ -211,354 +62,171 @@ impl Executor { T: Future + Send + 'static, T::Output: Send + 'static, { - // Book a slot to store the task cancellation token. - let mut active_tasks = self.active_tasks.lock().unwrap(); - let task_entry = active_tasks.vacant_entry(); - - // Wrap the future so that it removes its cancel token from the - // executor's list when dropped. - let future = CancellableFuture::new(future, task_entry.key()); - - let (runnable, cancel_token) = - task::spawn_and_forget(future, schedule_task, self.context.executor_id); - - task_entry.insert(cancel_token); - self.context.injector.insert_task(runnable); + match self { + Self::StExecutor(executor) => executor.spawn_and_forget(future), + Self::MtExecutor(executor) => executor.spawn_and_forget(future), + } } /// Execute spawned tasks, blocking until all futures have completed or /// until the executor reaches a deadlock. pub(crate) fn run(&mut self) { - self.context.pool_manager.activate_worker(); + match self { + Self::StExecutor(executor) => executor.run(), + Self::MtExecutor(executor) => executor.run(), + } + } +} - loop { - if let Some(worker_panic) = self.context.pool_manager.take_panic() { - panic::resume_unwind(worker_panic); +#[cfg(all(test, not(asynchronix_loom)))] +mod tests { + use std::sync::atomic::Ordering; + use std::sync::Arc; + + use futures_channel::{mpsc, oneshot}; + use futures_util::StreamExt; + + use super::*; + + /// An object that runs an arbitrary closure when dropped. + struct RunOnDrop { + drop_fn: Option, + } + impl RunOnDrop { + /// Creates a new `RunOnDrop`. + fn new(drop_fn: F) -> Self { + Self { + drop_fn: Some(drop_fn), } - if self.context.pool_manager.pool_is_idle() { - return; - } - - self.parker.park(); } } -} - -impl Drop for Executor { - fn drop(&mut self) { - // Force all threads to return. - self.context.pool_manager.trigger_termination(); - for handle in self.worker_handles.drain(0..) { - handle.join().unwrap(); - } - - // Drop all tasks that have not completed. - // - // A local worker must be set because some tasks may schedule other - // tasks when dropped, which requires that a local worker be available. - let worker = Worker::new(LocalQueue::new(QUEUE_SIZE), self.context.clone()); - LOCAL_WORKER.set(&worker, || { - // Cancel all pending futures. - // - // `ACTIVE_TASKS` is explicitly unset to prevent - // `CancellableFuture::drop()` from trying to remove its own token - // from the list of active tasks as this would result in a reentrant - // lock. This is mainly to stay on the safe side: `ACTIVE_TASKS` - // should not be set on this thread anyway, unless for some reason - // the executor runs inside another executor. - ACTIVE_TASKS.unset(|| { - let mut tasks = self.active_tasks.lock().unwrap(); - for task in tasks.drain() { - task.cancel(); - } - - // Some of the dropped tasks may have scheduled other tasks that - // were not yet cancelled, preventing them from being dropped - // upon cancellation. This is OK: the scheduled tasks will be - // dropped when the local and injector queues are dropped, and - // they cannot re-schedule one another since all tasks were - // cancelled. - }); - }); - } -} - -impl fmt::Debug for Executor { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("Executor").finish_non_exhaustive() - } -} - -/// Shared executor context. -/// -/// This contains all executor resources that can be shared between threads. -struct ExecutorContext { - /// Injector queue. - injector: Injector, - /// Unique executor ID inherited by all tasks spawned on this executor instance. - executor_id: usize, - /// Unparker for the main executor thread. - executor_unparker: Unparker, - /// Manager for all worker threads. - pool_manager: PoolManager, -} - -impl ExecutorContext { - /// Creates a new shared executor context. - pub(super) fn new( - executor_id: usize, - executor_unparker: Unparker, - stealers_and_unparkers: impl Iterator, - ) -> Self { - let (stealers, worker_unparkers): (Vec<_>, Vec<_>) = - stealers_and_unparkers.into_iter().unzip(); - let worker_unparkers = worker_unparkers.into_boxed_slice(); - - Self { - injector: Injector::new(), - executor_id, - executor_unparker, - pool_manager: PoolManager::new( - worker_unparkers.len(), - stealers.into_boxed_slice(), - worker_unparkers, - ), + impl Drop for RunOnDrop { + fn drop(&mut self) { + self.drop_fn.take().map(|f| f()); } } -} -/// A `Future` wrapper that removes its cancellation token from the list of -/// active tasks when dropped. -struct CancellableFuture { - inner: T, - cancellation_key: usize, -} + fn executor_deadlock(mut executor: Executor) { + let (_sender1, receiver1) = oneshot::channel::<()>(); + let (_sender2, receiver2) = oneshot::channel::<()>(); -impl CancellableFuture { - /// Creates a new `CancellableFuture`. - fn new(fut: T, cancellation_key: usize) -> Self { - Self { - inner: fut, - cancellation_key, - } - } -} + let launch_count = Arc::new(AtomicUsize::new(0)); + let completion_count = Arc::new(AtomicUsize::new(0)); -impl Future for CancellableFuture { - type Output = T::Output; + executor.spawn_and_forget({ + let launch_count = launch_count.clone(); + let completion_count = completion_count.clone(); - #[inline(always)] - fn poll( - self: std::pin::Pin<&mut Self>, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll { - unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) } - } -} - -impl Drop for CancellableFuture { - fn drop(&mut self) { - // Remove the task from the list of active tasks if the future is - // dropped on a worker thread. Otherwise do nothing and let the - // executor's drop handler do the cleanup. - let _ = ACTIVE_TASKS.map(|active_tasks| { - // Don't unwrap on `lock()` because this function can be called from - // a destructor and should not panic. In the worse case, the cancel - // token will be left in the list of active tasks, which does - // prevents eager task deallocation but does not cause any issue - // otherwise. - if let Ok(mut active_tasks) = active_tasks.lock() { - let _cancel_token = active_tasks.try_remove(self.cancellation_key); + async move { + launch_count.fetch_add(1, Ordering::Relaxed); + let _ = receiver2.await; + completion_count.fetch_add(1, Ordering::Relaxed); } }); - } -} - -/// Schedules a `Runnable` from within a worker thread. -/// -/// # Panics -/// -/// This function will panic if called from a non-worker thread or if called -/// from the worker thread of another executor instance than the one the task -/// for this `Runnable` was spawned on. -fn schedule_task(task: Runnable, executor_id: usize) { - LOCAL_WORKER - .map(|worker| { - let pool_manager = &worker.executor_context.pool_manager; - let injector = &worker.executor_context.injector; - let local_queue = &worker.local_queue; - let fast_slot = &worker.fast_slot; - - // Check that this task was indeed spawned on this executor. - assert_eq!( - executor_id, worker.executor_context.executor_id, - "Tasks must be awaken on the same executor they are spawned on" - ); - - // Store the task in the fast slot and retrieve the one that was - // formerly stored, if any. - let prev_task = match fast_slot.replace(Some(task)) { - // If there already was a task in the slot, proceed so it can be - // moved to a task queue. - Some(t) => t, - // Otherwise return immediately: this task cannot be stolen so - // there is no point in activating a sibling worker. - None => return, - }; - - // Push the previous task to the local queue if possible or on the - // injector queue otherwise. - if let Err(prev_task) = local_queue.push(prev_task) { - // The local queue is full. Try to move half of it to the - // injector queue; if this fails, just push one task to the - // injector queue. - if let Ok(drain) = local_queue.drain(|_| Bucket::capacity()) { - injector.push_bucket(Bucket::from_iter(drain)); - local_queue.push(prev_task).unwrap(); - } else { - injector.insert_task(prev_task); - } - } - - // A task has been pushed to the local or injector queue: try to - // activate another worker if no worker is currently searching for a - // task. - if pool_manager.searching_worker_count() == 0 { - pool_manager.activate_worker_relaxed(); - } - }) - .expect("Tasks may not be awaken outside executor threads"); -} - -/// Processes all incoming tasks on a worker thread until the `Terminate` signal -/// is received or until it panics. -/// -/// Panics caught in this thread are relayed to the main executor thread. -fn run_local_worker(worker: &Worker, id: usize, parker: Parker) { - let pool_manager = &worker.executor_context.pool_manager; - let injector = &worker.executor_context.injector; - let executor_unparker = &worker.executor_context.executor_unparker; - let local_queue = &worker.local_queue; - let fast_slot = &worker.fast_slot; - - let result = panic::catch_unwind(AssertUnwindSafe(|| { - // Set how long to spin when searching for a task. - const MAX_SEARCH_DURATION: Duration = Duration::from_nanos(1000); - - // Seed a thread RNG with the worker ID. - let rng = Rng::new(id as u64); - - loop { - // Signal barrier: park until notified to continue or terminate. - - // Try to deactivate the worker. - if pool_manager.try_set_worker_inactive(id) { - parker.park(); - // No need to call `begin_worker_search()`: this was done by the - // thread that unparked the worker. - } else if injector.is_empty() { - // This worker could not be deactivated because it was the last - // active worker. In such case, the call to - // `try_set_worker_inactive` establishes a synchronization with - // all threads that pushed tasks to the injector queue but could - // not activate a new worker, which is why some tasks may now be - // visible in the injector queue. - pool_manager.set_all_workers_inactive(); - executor_unparker.unpark(); - parker.park(); - // No need to call `begin_worker_search()`: this was done by the - // thread that unparked the worker. - } else { - pool_manager.begin_worker_search(); - } - - if pool_manager.termination_is_triggered() { - return; - } - - let mut search_start = Instant::now(); - - // Process the tasks one by one. - loop { - // Check the injector queue first. - if let Some(bucket) = injector.pop_bucket() { - let bucket_iter = bucket.into_iter(); - - // There is a _very_ remote possibility that, even though - // the local queue is empty, it has temporarily too little - // spare capacity for the bucket. This could happen if a - // concurrent steal operation was preempted for all the time - // it took to pop and process the remaining tasks and it - // hasn't released the stolen capacity yet. - // - // Unfortunately, we cannot just skip checking the injector - // queue altogether when there isn't enough spare capacity - // in the local queue because this could lead to a race: - // suppose that (1) this thread has earlier pushed tasks - // onto the injector queue, and (2) the stealer has - // processed all stolen tasks before this thread sees the - // capacity restored and at the same time (3) the stealer - // does not yet see the tasks this thread pushed to the - // injector queue; in such scenario, both this thread and - // the stealer thread may park and leave unprocessed tasks - // in the injector queue. - // - // This is the only instance where spinning is used, as the - // probability of this happening is close to zero and the - // complexity of a signaling mechanism (condvar & friends) - // wouldn't carry its weight. - while local_queue.spare_capacity() < bucket_iter.len() {} - - // Since empty buckets are never pushed onto the injector - // queue, we should now have at least one task to process. - local_queue.extend(bucket_iter); - } else { - // The injector queue is empty. Try to steal from active - // siblings. - let mut stealers = pool_manager.shuffled_stealers(Some(id), &rng); - if stealers.all(|stealer| { - stealer - .steal_and_pop(local_queue, |n| n - n / 2) - .map(|(task, _)| { - let prev_task = fast_slot.replace(Some(task)); - assert!(prev_task.is_none()); - }) - .is_err() - }) { - // Give up if unsuccessful for too long. - if (Instant::now() - search_start) > MAX_SEARCH_DURATION { - pool_manager.end_worker_search(); - break; - } - - // Re-try. - continue; - } - } - - // Signal the end of the search so that another worker can be - // activated when a new task is scheduled. - pool_manager.end_worker_search(); - - // Pop tasks from the fast slot or the local queue. - while let Some(task) = fast_slot.take().or_else(|| local_queue.pop()) { - if pool_manager.termination_is_triggered() { - return; - } - task.run(); - } - - // Resume the search for tasks. - pool_manager.begin_worker_search(); - search_start = Instant::now(); - } - } - })); - - // Propagate the panic, if any. - if let Err(panic) = result { - pool_manager.register_panic(panic); - pool_manager.trigger_termination(); - executor_unparker.unpark(); + executor.spawn_and_forget({ + let launch_count = launch_count.clone(); + let completion_count = completion_count.clone(); + + async move { + launch_count.fetch_add(1, Ordering::Relaxed); + let _ = receiver1.await; + completion_count.fetch_add(1, Ordering::Relaxed); + } + }); + + executor.run(); + + // Check that the executor returns on deadlock, i.e. none of the task has + // completed. + assert_eq!(launch_count.load(Ordering::Relaxed), 2); + assert_eq!(completion_count.load(Ordering::Relaxed), 0); + + // Drop the executor and thus the receiver tasks before the senders, + // failing which the senders may signal that the channel has been + // dropped and wake the tasks outside the executor. + drop(executor); + } + + fn executor_drop_cycle(mut executor: Executor) { + let (sender1, mut receiver1) = mpsc::channel(2); + let (sender2, mut receiver2) = mpsc::channel(2); + let (sender3, mut receiver3) = mpsc::channel(2); + + let drop_count = Arc::new(AtomicUsize::new(0)); + + // Spawn 3 tasks that wake one another when dropped. + executor.spawn_and_forget({ + let mut sender2 = sender2.clone(); + let mut sender3 = sender3.clone(); + let drop_count = drop_count.clone(); + + async move { + let _guard = RunOnDrop::new(move || { + let _ = sender2.try_send(()); + let _ = sender3.try_send(()); + drop_count.fetch_add(1, Ordering::Relaxed); + }); + let _ = receiver1.next().await; + } + }); + executor.spawn_and_forget({ + let mut sender1 = sender1.clone(); + let mut sender3 = sender3.clone(); + let drop_count = drop_count.clone(); + + async move { + let _guard = RunOnDrop::new(move || { + let _ = sender1.try_send(()); + let _ = sender3.try_send(()); + drop_count.fetch_add(1, Ordering::Relaxed); + }); + let _ = receiver2.next().await; + } + }); + executor.spawn_and_forget({ + let mut sender1 = sender1.clone(); + let mut sender2 = sender2.clone(); + let drop_count = drop_count.clone(); + + async move { + let _guard = RunOnDrop::new(move || { + let _ = sender1.try_send(()); + let _ = sender2.try_send(()); + drop_count.fetch_add(1, Ordering::Relaxed); + }); + let _ = receiver3.next().await; + } + }); + + executor.run(); + + // Make sure that all tasks are eventually dropped even though each task + // wakes the others when dropped. + drop(executor); + assert_eq!(drop_count.load(Ordering::Relaxed), 3); + } + + #[test] + fn executor_deadlock_st() { + executor_deadlock(Executor::new_single_threaded()); + } + + #[test] + fn executor_deadlock_mt() { + executor_deadlock(Executor::new_multi_threaded(3)); + } + + #[test] + fn executor_deadlock_mt_one_worker() { + executor_deadlock(Executor::new_multi_threaded(1)); + } + #[test] + fn executor_drop_cycle_st() { + executor_drop_cycle(Executor::new_single_threaded()); + } + + #[test] + fn executor_drop_cycle_mt() { + executor_drop_cycle(Executor::new_multi_threaded(3)); } } diff --git a/asynchronix/src/executor/mt_executor.rs b/asynchronix/src/executor/mt_executor.rs new file mode 100644 index 0000000..5859cdf --- /dev/null +++ b/asynchronix/src/executor/mt_executor.rs @@ -0,0 +1,576 @@ +//! Multi-threaded `async` executor. +//! +//! The executor is exclusively designed for message-passing computational +//! tasks. As such, it does not include an I/O reactor and does not consider +//! fairness as a goal in itself. While it does use fair local queues inasmuch +//! as these tend to perform better in message-passing applications, it uses an +//! unfair injection queue and a LIFO slot without attempt to mitigate the +//! effect of badly behaving code (e.g. futures that spin-lock by yielding to +//! the executor; there is for this reason no support for something like tokio's +//! `yield_now`). +//! +//! Another way in which it differs from other `async` executors is that it +//! treats deadlocking as a normal occurrence. This is because in a +//! discrete-time simulator, the simulation of a system at a given time step +//! will make as much progress as possible until it technically reaches a +//! deadlock. Only then does the simulator advance the simulated time to that of +//! the next "event" extracted from a time-sorted priority queue. +//! +//! The design of the executor is largely influenced by the tokio and Go +//! schedulers, both of which are optimized for message-passing applications. In +//! particular, it uses fast, fixed-size thread-local work-stealing queues with +//! a non-stealable LIFO slot in combination with an injector queue, which +//! injector queue is used both to schedule new tasks and to absorb temporary +//! overflow in the local queues. +//! +//! The design of the injector queue is kept very simple compared to tokio, by +//! taking advantage of the fact that the injector is not required to be either +//! LIFO or FIFO. Moving tasks between a local queue and the injector is fast +//! because tasks are moved in batch and are stored contiguously in memory. +//! +//! Another difference with tokio is that, at the moment, the complete subset of +//! active worker threads is stored in a single atomic variable. This makes it +//! possible to rapidly identify free worker threads for stealing operations, +//! with the downside that the maximum number of worker threads is currently +//! limited to `usize::BITS`. This is not expected to constitute a limitation in +//! practice since system simulation is not typically embarrassingly parallel. +//! +//! Probably the largest difference with tokio is the task system, which has +//! better throughput due to less need for synchronization. This mainly results +//! from the use of an atomic notification counter rather than an atomic +//! notification flag, thus alleviating the need to reset the notification flag +//! before polling a future. + +mod injector; +mod pool_manager; + +use std::cell::Cell; +use std::fmt; +use std::future::Future; +use std::panic::{self, AssertUnwindSafe}; +use std::sync::atomic::Ordering; +use std::sync::{Arc, Mutex}; +use std::thread::{self, JoinHandle}; +use std::time::{Duration, Instant}; + +use crossbeam_utils::sync::{Parker, Unparker}; +use slab::Slab; + +use crate::macros::scoped_thread_local::scoped_thread_local; +use crate::util::rng::Rng; + +use super::task::{self, CancelToken, Promise, Runnable}; +use super::NEXT_EXECUTOR_ID; +use pool_manager::PoolManager; + +const BUCKET_SIZE: usize = 128; +const QUEUE_SIZE: usize = BUCKET_SIZE * 2; + +type Bucket = injector::Bucket; +type Injector = injector::Injector; +type LocalQueue = st3::fifo::Worker; +type Stealer = st3::fifo::Stealer; + +scoped_thread_local!(static LOCAL_WORKER: Worker); +scoped_thread_local!(static ACTIVE_TASKS: Mutex>); + +/// A multi-threaded `async` executor. +pub(crate) struct Executor { + /// Shared executor data. + context: Arc, + /// List of tasks that have not completed yet. + active_tasks: Arc>>, + /// Parker for the main executor thread. + parker: Parker, + /// Handles to the worker threads. + worker_handles: Vec>, +} + +impl Executor { + /// Creates an executor that runs futures on a thread pool. + /// + /// The maximum number of threads is set with the `num_threads` parameter. + /// + /// # Panics + /// + /// This will panic if the specified number of threads is zero or is more + /// than `usize::BITS`. + pub(crate) fn new(num_threads: usize) -> Self { + let parker = Parker::new(); + let unparker = parker.unparker().clone(); + + let (local_queues_and_parkers, stealers_and_unparkers): (Vec<_>, Vec<_>) = (0..num_threads) + .map(|_| { + let parker = Parker::new(); + let unparker = parker.unparker().clone(); + let local_queue = LocalQueue::new(QUEUE_SIZE); + let stealer = local_queue.stealer(); + + ((local_queue, parker), (stealer, unparker)) + }) + .unzip(); + + // Each executor instance has a unique ID inherited by tasks to ensure + // that tasks are scheduled on their parent executor. + let executor_id = NEXT_EXECUTOR_ID.fetch_add(1, Ordering::Relaxed); + assert!( + executor_id <= usize::MAX / 2, + "too many executors have been instantiated" + ); + + let context = Arc::new(ExecutorContext::new( + executor_id, + unparker, + stealers_and_unparkers.into_iter(), + )); + let active_tasks = Arc::new(Mutex::new(Slab::new())); + + // All workers must be marked as active _before_ spawning the threads to + // make sure that the count of active workers does not fall to zero + // before all workers are blocked on the signal barrier. + context.pool_manager.set_all_workers_active(); + + // Spawn all worker threads. + let worker_handles: Vec<_> = local_queues_and_parkers + .into_iter() + .enumerate() + .map(|(id, (local_queue, worker_parker))| { + let thread_builder = thread::Builder::new().name(format!("Worker #{}", id)); + + thread_builder + .spawn({ + let context = context.clone(); + let active_tasks = active_tasks.clone(); + move || { + let worker = Worker::new(local_queue, context); + ACTIVE_TASKS.set(&active_tasks, || { + LOCAL_WORKER + .set(&worker, || run_local_worker(&worker, id, worker_parker)) + }); + } + }) + .unwrap() + }) + .collect(); + + // Wait until all workers are blocked on the signal barrier. + parker.park(); + assert!(context.pool_manager.pool_is_idle()); + + Self { + context, + active_tasks, + parker, + worker_handles, + } + } + + /// Spawns a task and returns a promise that can be polled to retrieve the + /// task's output. + /// + /// Note that spawned tasks are not executed until [`run()`](Executor::run) + /// is called. + pub(crate) fn spawn(&self, future: T) -> Promise + where + T: Future + Send + 'static, + T::Output: Send + 'static, + { + // Book a slot to store the task cancellation token. + let mut active_tasks = self.active_tasks.lock().unwrap(); + let task_entry = active_tasks.vacant_entry(); + + // Wrap the future so that it removes its cancel token from the + // executor's list when dropped. + let future = CancellableFuture::new(future, task_entry.key()); + + let (promise, runnable, cancel_token) = + task::spawn(future, schedule_task, self.context.executor_id); + + task_entry.insert(cancel_token); + self.context.injector.insert_task(runnable); + + promise + } + + /// Spawns a task which output will never be retrieved. + /// + /// This is mostly useful to avoid undue reference counting for futures that + /// return a `()` type. + /// + /// Note that spawned tasks are not executed until [`run()`](Executor::run) + /// is called. + pub(crate) fn spawn_and_forget(&self, future: T) + where + T: Future + Send + 'static, + T::Output: Send + 'static, + { + // Book a slot to store the task cancellation token. + let mut active_tasks = self.active_tasks.lock().unwrap(); + let task_entry = active_tasks.vacant_entry(); + + // Wrap the future so that it removes its cancel token from the + // executor's list when dropped. + let future = CancellableFuture::new(future, task_entry.key()); + + let (runnable, cancel_token) = + task::spawn_and_forget(future, schedule_task, self.context.executor_id); + + task_entry.insert(cancel_token); + self.context.injector.insert_task(runnable); + } + + /// Execute spawned tasks, blocking until all futures have completed or + /// until the executor reaches a deadlock. + pub(crate) fn run(&mut self) { + self.context.pool_manager.activate_worker(); + + loop { + if let Some(worker_panic) = self.context.pool_manager.take_panic() { + panic::resume_unwind(worker_panic); + } + if self.context.pool_manager.pool_is_idle() { + return; + } + + self.parker.park(); + } + } +} + +impl Drop for Executor { + fn drop(&mut self) { + // Force all threads to return. + self.context.pool_manager.trigger_termination(); + for handle in self.worker_handles.drain(0..) { + handle.join().unwrap(); + } + + // Drop all tasks that have not completed. + // + // A local worker must be set because some tasks may schedule other + // tasks when dropped, which requires that a local worker be available. + let worker = Worker::new(LocalQueue::new(QUEUE_SIZE), self.context.clone()); + LOCAL_WORKER.set(&worker, || { + // Cancel all pending futures. + // + // `ACTIVE_TASKS` is explicitly unset to prevent + // `CancellableFuture::drop()` from trying to remove its own token + // from the list of active tasks as this would result in a reentrant + // lock. This is mainly to stay on the safe side: `ACTIVE_TASKS` + // should not be set on this thread anyway, unless for some reason + // the executor runs inside another executor. + ACTIVE_TASKS.unset(|| { + let mut tasks = self.active_tasks.lock().unwrap(); + for task in tasks.drain() { + task.cancel(); + } + + // Some of the dropped tasks may have scheduled other tasks that + // were not yet cancelled, preventing them from being dropped + // upon cancellation. This is OK: the scheduled tasks will be + // dropped when the local and injector queues are dropped, and + // they cannot re-schedule one another since all tasks were + // cancelled. + }); + }); + } +} + +impl fmt::Debug for Executor { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Executor").finish_non_exhaustive() + } +} + +/// Shared executor context. +/// +/// This contains all executor resources that can be shared between threads. +struct ExecutorContext { + /// Injector queue. + injector: Injector, + /// Unique executor identifier inherited by all tasks spawned on this + /// executor instance. + executor_id: usize, + /// Unparker for the main executor thread. + executor_unparker: Unparker, + /// Manager for all worker threads. + pool_manager: PoolManager, +} + +impl ExecutorContext { + /// Creates a new shared executor context. + pub(super) fn new( + executor_id: usize, + executor_unparker: Unparker, + stealers_and_unparkers: impl Iterator, + ) -> Self { + let (stealers, worker_unparkers): (Vec<_>, Vec<_>) = + stealers_and_unparkers.into_iter().unzip(); + let worker_unparkers = worker_unparkers.into_boxed_slice(); + + Self { + injector: Injector::new(), + executor_id, + executor_unparker, + pool_manager: PoolManager::new( + worker_unparkers.len(), + stealers.into_boxed_slice(), + worker_unparkers, + ), + } + } +} + +/// A `Future` wrapper that removes its cancellation token from the list of +/// active tasks when dropped. +struct CancellableFuture { + inner: T, + cancellation_key: usize, +} + +impl CancellableFuture { + /// Creates a new `CancellableFuture`. + fn new(fut: T, cancellation_key: usize) -> Self { + Self { + inner: fut, + cancellation_key, + } + } +} + +impl Future for CancellableFuture { + type Output = T::Output; + + #[inline(always)] + fn poll( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) } + } +} + +impl Drop for CancellableFuture { + fn drop(&mut self) { + // Remove the task from the list of active tasks if the future is + // dropped on a worker thread. Otherwise do nothing and let the + // executor's drop handler do the cleanup. + let _ = ACTIVE_TASKS.map(|active_tasks| { + // Don't unwrap on `lock()` because this function can be called from + // a destructor and should not panic. In the worse case, the cancel + // token will be left in the list of active tasks, which does + // prevents eager task deallocation but does not cause any issue + // otherwise. + if let Ok(mut active_tasks) = active_tasks.lock() { + let _cancel_token = active_tasks.try_remove(self.cancellation_key); + } + }); + } +} + +/// A local worker with access to global executor resources. +pub(crate) struct Worker { + local_queue: LocalQueue, + fast_slot: Cell>, + executor_context: Arc, +} + +impl Worker { + /// Creates a new worker. + fn new(local_queue: LocalQueue, executor_context: Arc) -> Self { + Self { + local_queue, + fast_slot: Cell::new(None), + executor_context, + } + } +} + +/// Schedules a `Runnable` from within a worker thread. +/// +/// # Panics +/// +/// This function will panic if called from a non-worker thread or if called +/// from the worker thread of another executor instance than the one the task +/// for this `Runnable` was spawned on. +fn schedule_task(task: Runnable, executor_id: usize) { + LOCAL_WORKER + .map(|worker| { + let pool_manager = &worker.executor_context.pool_manager; + let injector = &worker.executor_context.injector; + let local_queue = &worker.local_queue; + let fast_slot = &worker.fast_slot; + + // Check that this task was indeed spawned on this executor. + assert_eq!( + executor_id, worker.executor_context.executor_id, + "Tasks must be awaken on the same executor they are spawned on" + ); + + // Store the task in the fast slot and retrieve the one that was + // formerly stored, if any. + let prev_task = match fast_slot.replace(Some(task)) { + // If there already was a task in the slot, proceed so it can be + // moved to a task queue. + Some(t) => t, + // Otherwise return immediately: this task cannot be stolen so + // there is no point in activating a sibling worker. + None => return, + }; + + // Push the previous task to the local queue if possible or on the + // injector queue otherwise. + if let Err(prev_task) = local_queue.push(prev_task) { + // The local queue is full. Try to move half of it to the + // injector queue; if this fails, just push one task to the + // injector queue. + if let Ok(drain) = local_queue.drain(|_| Bucket::capacity()) { + injector.push_bucket(Bucket::from_iter(drain)); + local_queue.push(prev_task).unwrap(); + } else { + injector.insert_task(prev_task); + } + } + + // A task has been pushed to the local or injector queue: try to + // activate another worker if no worker is currently searching for a + // task. + if pool_manager.searching_worker_count() == 0 { + pool_manager.activate_worker_relaxed(); + } + }) + .expect("Tasks may not be awaken outside executor threads"); +} + +/// Processes all incoming tasks on a worker thread until the `Terminate` signal +/// is received or until it panics. +/// +/// Panics caught in this thread are relayed to the main executor thread. +fn run_local_worker(worker: &Worker, id: usize, parker: Parker) { + let pool_manager = &worker.executor_context.pool_manager; + let injector = &worker.executor_context.injector; + let executor_unparker = &worker.executor_context.executor_unparker; + let local_queue = &worker.local_queue; + let fast_slot = &worker.fast_slot; + + let result = panic::catch_unwind(AssertUnwindSafe(|| { + // Set how long to spin when searching for a task. + const MAX_SEARCH_DURATION: Duration = Duration::from_nanos(1000); + + // Seed a thread RNG with the worker ID. + let rng = Rng::new(id as u64); + + loop { + // Signal barrier: park until notified to continue or terminate. + + // Try to deactivate the worker. + if pool_manager.try_set_worker_inactive(id) { + parker.park(); + // No need to call `begin_worker_search()`: this was done by the + // thread that unparked the worker. + } else if injector.is_empty() { + // This worker could not be deactivated because it was the last + // active worker. In such case, the call to + // `try_set_worker_inactive` establishes a synchronization with + // all threads that pushed tasks to the injector queue but could + // not activate a new worker, which is why some tasks may now be + // visible in the injector queue. + pool_manager.set_all_workers_inactive(); + executor_unparker.unpark(); + parker.park(); + // No need to call `begin_worker_search()`: this was done by the + // thread that unparked the worker. + } else { + pool_manager.begin_worker_search(); + } + + if pool_manager.termination_is_triggered() { + return; + } + + let mut search_start = Instant::now(); + + // Process the tasks one by one. + loop { + // Check the injector queue first. + if let Some(bucket) = injector.pop_bucket() { + let bucket_iter = bucket.into_iter(); + + // There is a _very_ remote possibility that, even though + // the local queue is empty, it has temporarily too little + // spare capacity for the bucket. This could happen if a + // concurrent steal operation was preempted for all the time + // it took to pop and process the remaining tasks and it + // hasn't released the stolen capacity yet. + // + // Unfortunately, we cannot just skip checking the injector + // queue altogether when there isn't enough spare capacity + // in the local queue because this could lead to a race: + // suppose that (1) this thread has earlier pushed tasks + // onto the injector queue, and (2) the stealer has + // processed all stolen tasks before this thread sees the + // capacity restored and at the same time (3) the stealer + // does not yet see the tasks this thread pushed to the + // injector queue; in such scenario, both this thread and + // the stealer thread may park and leave unprocessed tasks + // in the injector queue. + // + // This is the only instance where spinning is used, as the + // probability of this happening is close to zero and the + // complexity of a signaling mechanism (condvar & friends) + // wouldn't carry its weight. + while local_queue.spare_capacity() < bucket_iter.len() {} + + // Since empty buckets are never pushed onto the injector + // queue, we should now have at least one task to process. + local_queue.extend(bucket_iter); + } else { + // The injector queue is empty. Try to steal from active + // siblings. + let mut stealers = pool_manager.shuffled_stealers(Some(id), &rng); + if stealers.all(|stealer| { + stealer + .steal_and_pop(local_queue, |n| n - n / 2) + .map(|(task, _)| { + let prev_task = fast_slot.replace(Some(task)); + assert!(prev_task.is_none()); + }) + .is_err() + }) { + // Give up if unsuccessful for too long. + if (Instant::now() - search_start) > MAX_SEARCH_DURATION { + pool_manager.end_worker_search(); + break; + } + + // Re-try. + continue; + } + } + + // Signal the end of the search so that another worker can be + // activated when a new task is scheduled. + pool_manager.end_worker_search(); + + // Pop tasks from the fast slot or the local queue. + while let Some(task) = fast_slot.take().or_else(|| local_queue.pop()) { + if pool_manager.termination_is_triggered() { + return; + } + task.run(); + } + + // Resume the search for tasks. + pool_manager.begin_worker_search(); + search_start = Instant::now(); + } + } + })); + + // Propagate the panic, if any. + if let Err(panic) = result { + pool_manager.register_panic(panic); + pool_manager.trigger_termination(); + executor_unparker.unpark(); + } +} diff --git a/asynchronix/src/executor/injector.rs b/asynchronix/src/executor/mt_executor/injector.rs similarity index 100% rename from asynchronix/src/executor/injector.rs rename to asynchronix/src/executor/mt_executor/injector.rs diff --git a/asynchronix/src/executor/pool_manager.rs b/asynchronix/src/executor/mt_executor/pool_manager.rs similarity index 100% rename from asynchronix/src/executor/pool_manager.rs rename to asynchronix/src/executor/mt_executor/pool_manager.rs diff --git a/asynchronix/src/executor/st_executor.rs b/asynchronix/src/executor/st_executor.rs new file mode 100644 index 0000000..ced8c9c --- /dev/null +++ b/asynchronix/src/executor/st_executor.rs @@ -0,0 +1,244 @@ +use std::cell::RefCell; +use std::fmt; +use std::future::Future; +use std::sync::atomic::Ordering; + +use slab::Slab; + +use super::task::{self, CancelToken, Promise, Runnable}; +use super::NEXT_EXECUTOR_ID; + +use crate::macros::scoped_thread_local::scoped_thread_local; + +const QUEUE_MIN_CAPACITY: usize = 32; + +scoped_thread_local!(static EXECUTOR_CONTEXT: ExecutorContext); +scoped_thread_local!(static ACTIVE_TASKS: RefCell>); + +/// A single-threaded `async` executor. +pub(crate) struct Executor { + /// Shared executor data. + context: ExecutorContext, + /// List of tasks that have not completed yet. + active_tasks: RefCell>, +} + +impl Executor { + /// Creates an executor that runs futures on the current thread. + pub(crate) fn new() -> Self { + // Each executor instance has a unique ID inherited by tasks to ensure + // that tasks are scheduled on their parent executor. + let executor_id = NEXT_EXECUTOR_ID.fetch_add(1, Ordering::Relaxed); + assert!( + executor_id <= usize::MAX / 2, + "too many executors have been instantiated" + ); + + let context = ExecutorContext::new(executor_id); + let active_tasks = RefCell::new(Slab::new()); + + Self { + context, + active_tasks, + } + } + + /// Spawns a task and returns a promise that can be polled to retrieve the + /// task's output. + /// + /// Note that spawned tasks are not executed until [`run()`](Executor::run) + /// is called. + pub(crate) fn spawn(&self, future: T) -> Promise + where + T: Future + Send + 'static, + T::Output: Send + 'static, + { + // Book a slot to store the task cancellation token. + let mut active_tasks = self.active_tasks.borrow_mut(); + let task_entry = active_tasks.vacant_entry(); + + // Wrap the future so that it removes its cancel token from the + // executor's list when dropped. + let future = CancellableFuture::new(future, task_entry.key()); + + let (promise, runnable, cancel_token) = + task::spawn(future, schedule_task, self.context.executor_id); + + task_entry.insert(cancel_token); + let mut queue = self.context.queue.borrow_mut(); + queue.push(runnable); + + promise + } + + /// Spawns a task which output will never be retrieved. + /// + /// This is mostly useful to avoid undue reference counting for futures that + /// return a `()` type. + /// + /// Note that spawned tasks are not executed until [`run()`](Executor::run) + /// is called. + pub(crate) fn spawn_and_forget(&self, future: T) + where + T: Future + Send + 'static, + T::Output: Send + 'static, + { + // Book a slot to store the task cancellation token. + let mut active_tasks = self.active_tasks.borrow_mut(); + let task_entry = active_tasks.vacant_entry(); + + // Wrap the future so that it removes its cancel token from the + // executor's list when dropped. + let future = CancellableFuture::new(future, task_entry.key()); + + let (runnable, cancel_token) = + task::spawn_and_forget(future, schedule_task, self.context.executor_id); + + task_entry.insert(cancel_token); + let mut queue = self.context.queue.borrow_mut(); + queue.push(runnable); + } + + /// Execute spawned tasks, blocking until all futures have completed or + /// until the executor reaches a deadlock. + pub(crate) fn run(&mut self) { + ACTIVE_TASKS.set(&self.active_tasks, || { + EXECUTOR_CONTEXT.set(&self.context, || loop { + let task = match self.context.queue.borrow_mut().pop() { + Some(task) => task, + None => break, + }; + + task.run(); + }) + }); + } +} + +impl Drop for Executor { + fn drop(&mut self) { + // Drop all tasks that have not completed. + // + // The executor context must be set because some tasks may schedule + // other tasks when dropped, which requires that the work queue be + // available. + EXECUTOR_CONTEXT.set(&self.context, || { + // Cancel all pending futures. + // + // `ACTIVE_TASKS` is explicitly unset to prevent + // `CancellableFuture::drop()` from trying to remove its own token + // from the list of active tasks as this would result in a nested + // call to `borrow_mut` and thus a panic. This is mainly to stay on + // the safe side: `ACTIVE_TASKS` should not be set anyway, unless + // for some reason the executor runs inside another executor. + ACTIVE_TASKS.unset(|| { + let mut tasks = self.active_tasks.borrow_mut(); + for task in tasks.drain() { + task.cancel(); + } + + // Some of the dropped tasks may have scheduled other tasks that + // were not yet cancelled, preventing them from being dropped + // upon cancellation. This is OK: the scheduled tasks will be + // dropped when the work queue is dropped, and they cannot + // re-schedule one another since all tasks were cancelled. + }); + }); + } +} + +impl fmt::Debug for Executor { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("Executor").finish_non_exhaustive() + } +} + +/// Shared executor context. +/// +/// This contains all executor resources that can be shared between threads. +struct ExecutorContext { + /// Work queue. + queue: RefCell>, + /// Unique executor identifier inherited by all tasks spawned on this + /// executor instance. + executor_id: usize, +} + +impl ExecutorContext { + /// Creates a new shared executor context. + fn new(executor_id: usize) -> Self { + Self { + queue: RefCell::new(Vec::with_capacity(QUEUE_MIN_CAPACITY)), + executor_id, + } + } +} + +/// A `Future` wrapper that removes its cancellation token from the list of +/// active tasks when dropped. +struct CancellableFuture { + inner: T, + cancellation_key: usize, +} + +impl CancellableFuture { + /// Creates a new `CancellableFuture`. + fn new(fut: T, cancellation_key: usize) -> Self { + Self { + inner: fut, + cancellation_key, + } + } +} + +impl Future for CancellableFuture { + type Output = T::Output; + + #[inline(always)] + fn poll( + self: std::pin::Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll { + unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) } + } +} + +impl Drop for CancellableFuture { + fn drop(&mut self) { + // Remove the task from the list of active tasks while the executor is + // running (meaning that `ACTIVE_TASK` is set). Otherwise do nothing and + // let the executor's drop handler do the cleanup. + let _ = ACTIVE_TASKS.map(|active_tasks| { + // Don't use `borrow_mut()` because this function can be called from + // a destructor and should not panic. In the worse case, the cancel + // token will be left in the list of active tasks, which does + // prevents eager task deallocation but does not cause any issue + // otherwise. + if let Ok(mut active_tasks) = active_tasks.try_borrow_mut() { + let _cancel_token = active_tasks.try_remove(self.cancellation_key); + } + }); + } +} + +/// Schedules a `Runnable` from within a worker thread. +/// +/// # Panics +/// +/// This function will panic if called from called outside from the executor +/// work thread or from another executor instance than the one the task for this +/// `Runnable` was spawned on. +fn schedule_task(task: Runnable, executor_id: usize) { + EXECUTOR_CONTEXT + .map(|context| { + // Check that this task was indeed spawned on this executor. + assert_eq!( + executor_id, context.executor_id, + "Tasks must be awaken on the same executor they are spawned on" + ); + + let mut queue = context.queue.borrow_mut(); + queue.push(task); + }) + .expect("Tasks may not be awaken outside executor threads"); +} diff --git a/asynchronix/src/executor/tests.rs b/asynchronix/src/executor/tests.rs deleted file mode 100644 index 7f63b46..0000000 --- a/asynchronix/src/executor/tests.rs +++ /dev/null @@ -1,140 +0,0 @@ -use futures_channel::{mpsc, oneshot}; -use futures_util::StreamExt; - -use super::*; - -/// An object that runs an arbitrary closure when dropped. -struct RunOnDrop { - drop_fn: Option, -} -impl RunOnDrop { - /// Creates a new `RunOnDrop`. - fn new(drop_fn: F) -> Self { - Self { - drop_fn: Some(drop_fn), - } - } -} -impl Drop for RunOnDrop { - fn drop(&mut self) { - self.drop_fn.take().map(|f| f()); - } -} - -#[test] -fn executor_deadlock() { - const NUM_THREADS: usize = 3; - - let (_sender1, receiver1) = oneshot::channel::<()>(); - let (_sender2, receiver2) = oneshot::channel::<()>(); - - let mut executor = Executor::new(NUM_THREADS); - static LAUNCH_COUNT: AtomicUsize = AtomicUsize::new(0); - static COMPLETION_COUNT: AtomicUsize = AtomicUsize::new(0); - - executor.spawn_and_forget(async move { - LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed); - let _ = receiver2.await; - COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed); - }); - executor.spawn_and_forget(async move { - LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed); - let _ = receiver1.await; - COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed); - }); - - executor.run(); - // Check that the executor returns on deadlock, i.e. none of the task has - // completed. - assert_eq!(LAUNCH_COUNT.load(Ordering::Relaxed), 2); - assert_eq!(COMPLETION_COUNT.load(Ordering::Relaxed), 0); -} - -#[test] -fn executor_deadlock_st() { - const NUM_THREADS: usize = 1; - - let (_sender1, receiver1) = oneshot::channel::<()>(); - let (_sender2, receiver2) = oneshot::channel::<()>(); - - let mut executor = Executor::new(NUM_THREADS); - static LAUNCH_COUNT: AtomicUsize = AtomicUsize::new(0); - static COMPLETION_COUNT: AtomicUsize = AtomicUsize::new(0); - - executor.spawn_and_forget(async move { - LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed); - let _ = receiver2.await; - COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed); - }); - executor.spawn_and_forget(async move { - LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed); - let _ = receiver1.await; - COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed); - }); - - executor.run(); - // Check that the executor returnes on deadlock, i.e. none of the task has - // completed. - assert_eq!(LAUNCH_COUNT.load(Ordering::Relaxed), 2); - assert_eq!(COMPLETION_COUNT.load(Ordering::Relaxed), 0); -} - -#[test] -fn executor_drop_cycle() { - const NUM_THREADS: usize = 3; - - let (sender1, mut receiver1) = mpsc::channel(2); - let (sender2, mut receiver2) = mpsc::channel(2); - let (sender3, mut receiver3) = mpsc::channel(2); - - let mut executor = Executor::new(NUM_THREADS); - static DROP_COUNT: AtomicUsize = AtomicUsize::new(0); - - // Spawn 3 tasks that wake one another when dropped. - executor.spawn_and_forget({ - let mut sender2 = sender2.clone(); - let mut sender3 = sender3.clone(); - - async move { - let _guard = RunOnDrop::new(move || { - let _ = sender2.try_send(()); - let _ = sender3.try_send(()); - DROP_COUNT.fetch_add(1, Ordering::Relaxed); - }); - let _ = receiver1.next().await; - } - }); - executor.spawn_and_forget({ - let mut sender1 = sender1.clone(); - let mut sender3 = sender3.clone(); - - async move { - let _guard = RunOnDrop::new(move || { - let _ = sender1.try_send(()); - let _ = sender3.try_send(()); - DROP_COUNT.fetch_add(1, Ordering::Relaxed); - }); - let _ = receiver2.next().await; - } - }); - executor.spawn_and_forget({ - let mut sender1 = sender1.clone(); - let mut sender2 = sender2.clone(); - - async move { - let _guard = RunOnDrop::new(move || { - let _ = sender1.try_send(()); - let _ = sender2.try_send(()); - DROP_COUNT.fetch_add(1, Ordering::Relaxed); - }); - let _ = receiver3.next().await; - } - }); - - executor.run(); - - // Make sure that all tasks are eventually dropped even though each task - // wakes the others when dropped. - drop(executor); - assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3); -} diff --git a/asynchronix/src/executor/worker.rs b/asynchronix/src/executor/worker.rs deleted file mode 100644 index b815276..0000000 --- a/asynchronix/src/executor/worker.rs +++ /dev/null @@ -1,25 +0,0 @@ -use std::cell::Cell; -use std::sync::Arc; - -use super::task::Runnable; - -use super::ExecutorContext; -use super::LocalQueue; - -/// A local worker with access to global executor resources. -pub(crate) struct Worker { - pub(super) local_queue: LocalQueue, - pub(super) fast_slot: Cell>, - pub(super) executor_context: Arc, -} - -impl Worker { - /// Creates a new worker. - pub(super) fn new(local_queue: LocalQueue, executor_context: Arc) -> Self { - Self { - local_queue, - fast_slot: Cell::new(None), - executor_context, - } - } -} diff --git a/asynchronix/src/macros/scoped_thread_local.rs b/asynchronix/src/macros/scoped_thread_local.rs index b60b287..587a8f8 100644 --- a/asynchronix/src/macros/scoped_thread_local.rs +++ b/asynchronix/src/macros/scoped_thread_local.rs @@ -7,19 +7,18 @@ use std::ptr; /// Declare a new thread-local storage scoped key of type `ScopedKey`. /// /// This is based on the `scoped-tls` crate, with slight modifications, such as -/// the use of the newly available `const` qualifier for TLS. +/// the addition of a `ScopedLocalKey::unset` method and the use of a `map` +/// method that returns `Option::None` when the value is not set, rather than +/// panicking as `with` would. macro_rules! scoped_thread_local { ($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty) => ( $(#[$attrs])* $vis static $name: $crate::macros::scoped_thread_local::ScopedLocalKey<$ty> - = $crate::macros::scoped_thread_local::ScopedLocalKey { - inner: { - thread_local!(static FOO: ::std::cell::Cell<*const ()> = const { - std::cell::Cell::new(::std::ptr::null()) - }); - &FOO - }, - _marker: ::std::marker::PhantomData, + = unsafe { + ::std::thread_local!(static FOO: ::std::cell::Cell<*const ()> = const { + ::std::cell::Cell::new(::std::ptr::null()) + }); + $crate::macros::scoped_thread_local::ScopedLocalKey::new(&FOO) }; ) } @@ -28,13 +27,24 @@ pub(crate) use scoped_thread_local; /// Type representing a thread local storage key corresponding to a reference /// to the type parameter `T`. pub(crate) struct ScopedLocalKey { - pub(crate) inner: &'static LocalKey>, - pub(crate) _marker: marker::PhantomData, + inner: &'static LocalKey>, + _marker: marker::PhantomData, } unsafe impl Sync for ScopedLocalKey {} impl ScopedLocalKey { + #[doc(hidden)] + /// # Safety + /// + /// Should only be called through the public macro. + pub(crate) const unsafe fn new(inner: &'static LocalKey>) -> Self { + Self { + inner, + _marker: marker::PhantomData, + } + } + /// Inserts a value into this scoped thread local storage slot for the /// duration of a closure. pub(crate) fn set(&'static self, t: &T, f: F) -> R diff --git a/asynchronix/src/rpc/generic_server.rs b/asynchronix/src/rpc/generic_server.rs index 6f54a93..70032c4 100644 --- a/asynchronix/src/rpc/generic_server.rs +++ b/asynchronix/src/rpc/generic_server.rs @@ -14,8 +14,8 @@ use super::codegen::simulation::*; /// Transport-independent server implementation. /// -/// This implementation implements the protobuf services without any -/// transport-specific management. +/// This implements the protobuf services without any transport-specific +/// management. pub(crate) struct GenericServer { sim_gen: F, sim_context: Option<(Simulation, EndpointRegistry, KeyRegistry)>, diff --git a/asynchronix/src/simulation/sim_init.rs b/asynchronix/src/simulation/sim_init.rs index a8527ca..f22e1fc 100644 --- a/asynchronix/src/simulation/sim_init.rs +++ b/asynchronix/src/simulation/sim_init.rs @@ -25,15 +25,22 @@ impl SimInit { Self::with_num_threads(num_cpus::get()) } - /// Creates a builder for a multithreaded simulation running on the - /// specified number of threads. + /// Creates a builder for a simulation running on the specified number of + /// threads. + /// + /// Note that the number of worker threads is automatically constrained to + /// be between 1 and `usize::BITS` (inclusive). pub fn with_num_threads(num_threads: usize) -> Self { - // The current executor's implementation caps the number of thread to 64 - // on 64-bit systems and 32 on 32-bit systems. - let num_threads = num_threads.min(usize::BITS as usize); + let num_threads = num_threads.clamp(1, usize::BITS as usize); + + let executor = if num_threads == 1 { + Executor::new_single_threaded() + } else { + Executor::new_multi_threaded(num_threads) + }; Self { - executor: Executor::new(num_threads), + executor, scheduler_queue: Arc::new(Mutex::new(PriorityQueue::new())), time: SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)), clock: Box::new(NoClock::new()), From 59d2af51ba93d2cebdf9139fc9c1e3815d9ed6c8 Mon Sep 17 00:00:00 2001 From: Serge Barral Date: Mon, 27 May 2024 23:21:26 +0200 Subject: [PATCH 12/12] Expose the Protobuf simulation service to WASM/JS --- .github/workflows/ci.yml | 29 +++-- asynchronix/Cargo.toml | 12 +- asynchronix/build.rs | 7 +- asynchronix/src/rpc.rs | 7 +- .../src/rpc/api/custom_transport.proto | 50 -------- asynchronix/src/rpc/api/simulation.proto | 17 +++ asynchronix/src/rpc/codegen.rs | 3 +- .../src/rpc/codegen/custom_transport.rs | 111 ------------------ asynchronix/src/rpc/codegen/simulation.rs | 38 ++++++ asynchronix/src/rpc/grpc.rs | 36 +++--- ...eneric_server.rs => simulation_service.rs} | 102 +++++++++------- asynchronix/src/rpc/wasm.rs | 82 +++++++++++++ 12 files changed, 252 insertions(+), 242 deletions(-) delete mode 100644 asynchronix/src/rpc/api/custom_transport.proto delete mode 100644 asynchronix/src/rpc/codegen/custom_transport.rs rename asynchronix/src/rpc/{generic_server.rs => simulation_service.rs} (89%) create mode 100644 asynchronix/src/rpc/wasm.rs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 1821e5a..b8660b9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -28,7 +28,22 @@ jobs: toolchain: ${{ matrix.rust }} - name: Run cargo check - run: cargo check --features="rpc grpc-server" + run: cargo check --features="rpc grpc-service" + + build-wasm: + name: Build wasm32 + runs-on: ubuntu-latest + steps: + - name: Checkout sources + uses: actions/checkout@v3 + + - name: Install toolchain + uses: dtolnay/rust-toolchain@stable + with: + targets: wasm32-unknown-unknown + + - name: Run cargo build (wasm) + run: cargo build --target wasm32-unknown-unknown --features="rpc" test: name: Test suite @@ -41,7 +56,7 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Run cargo test - run: cargo test --features="rpc grpc-server" + run: cargo test --features="rpc grpc-service" loom-dry-run: name: Loom dry run @@ -54,7 +69,7 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Dry-run cargo test (Loom) - run: cargo test --no-run --tests --features="rpc grpc-server" + run: cargo test --no-run --tests --features="rpc grpc-service" env: RUSTFLAGS: --cfg asynchronix_loom @@ -71,12 +86,12 @@ jobs: components: miri - name: Run cargo miri tests (single-threaded executor) - run: cargo miri test --tests --lib --features="rpc grpc-server" + run: cargo miri test --tests --lib --features="rpc grpc-service" env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1 - name: Run cargo miri tests (multi-threaded executor) - run: cargo miri test --tests --lib --features="rpc grpc-server" + run: cargo miri test --tests --lib --features="rpc grpc-service" env: MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4 @@ -134,7 +149,7 @@ jobs: run: cargo fmt --all -- --check - name: Run cargo clippy - run: cargo clippy --features="rpc grpc-server" + run: cargo clippy --features="rpc grpc-service" docs: name: Docs @@ -147,4 +162,4 @@ jobs: uses: dtolnay/rust-toolchain@stable - name: Run cargo doc - run: cargo doc --no-deps --features="rpc grpc-server" --document-private-items + run: cargo doc --no-deps --features="rpc grpc-service" --document-private-items diff --git a/asynchronix/Cargo.toml b/asynchronix/Cargo.toml index 981ce35..2b978dd 100644 --- a/asynchronix/Cargo.toml +++ b/asynchronix/Cargo.toml @@ -26,8 +26,10 @@ autotests = false rpc = ["dep:rmp-serde", "dep:serde", "dep:tonic", "dep:prost", "dep:prost-types", "dep:bytes"] # This feature forces protobuf/gRPC code (re-)generation. rpc-codegen = ["dep:tonic-build"] -# gRPC server. -grpc-server = ["rpc", "dep:tokio"] +# gRPC service. +grpc-service = ["rpc", "dep:tokio" , "tonic/transport"] +# wasm service. +wasm-service = ["rpc", "dep:wasm-bindgen"] # API-unstable public exports meant for external test/benchmarking; development only. dev-hooks = [] # Logging of performance-related statistics; development only. @@ -58,10 +60,12 @@ prost-types = { version = "0.12", optional = true } rmp-serde = { version = "1.1", optional = true } serde = { version = "1", optional = true } -# gRPC dependencies. +# gRPC service dependencies. tokio = { version = "1.0", features=["net"], optional = true } -tonic = { version = "0.11", optional = true } +tonic = { version = "0.11", default-features = false, features=["codegen", "prost"], optional = true } +# WASM service dependencies. +wasm-bindgen = { version = "0.2", optional = true } [target.'cfg(asynchronix_loom)'.dependencies] loom = "0.5" diff --git a/asynchronix/build.rs b/asynchronix/build.rs index fb7492c..d2bb66b 100644 --- a/asynchronix/build.rs +++ b/asynchronix/build.rs @@ -7,14 +7,11 @@ fn main() -> Result<(), Box> { .build_client(false) .out_dir("src/rpc/codegen/"); - #[cfg(all(feature = "rpc-codegen", not(feature = "grpc-server")))] + #[cfg(all(feature = "rpc-codegen", not(feature = "grpc-service")))] let builder = builder.build_server(false); #[cfg(feature = "rpc-codegen")] - builder.compile( - &["simulation.proto", "custom_transport.proto"], - &["src/rpc/api/"], - )?; + builder.compile(&["simulation.proto"], &["src/rpc/api/"])?; Ok(()) } diff --git a/asynchronix/src/rpc.rs b/asynchronix/src/rpc.rs index d67e3d0..9506bfa 100644 --- a/asynchronix/src/rpc.rs +++ b/asynchronix/src/rpc.rs @@ -2,9 +2,12 @@ mod codegen; mod endpoint_registry; -mod generic_server; -#[cfg(feature = "grpc-server")] +#[cfg(feature = "grpc-service")] pub mod grpc; mod key_registry; +mod simulation_service; +#[cfg(feature = "wasm-service")] +pub mod wasm; pub use endpoint_registry::EndpointRegistry; +pub use simulation_service::SimulationService; diff --git a/asynchronix/src/rpc/api/custom_transport.proto b/asynchronix/src/rpc/api/custom_transport.proto deleted file mode 100644 index 46aefb4..0000000 --- a/asynchronix/src/rpc/api/custom_transport.proto +++ /dev/null @@ -1,50 +0,0 @@ -// Additional types for transport implementations which, unlike gRPC, do not -// support auto-generation from the `Simulation` service description. - -syntax = "proto3"; -package custom_transport; - -import "simulation.proto"; - -enum ServerErrorCode { - UNKNOWN_REQUEST = 0; - EMPTY_REQUEST = 1; -} - -message ServerError { - ServerErrorCode code = 1; - string message = 2; -} - -message AnyRequest { - oneof request { // Expects exactly 1 variant. - simulation.InitRequest init_request = 1; - simulation.TimeRequest time_request = 2; - simulation.StepRequest step_request = 3; - simulation.StepUntilRequest step_until_request = 4; - simulation.ScheduleEventRequest schedule_event_request = 5; - simulation.CancelEventRequest cancel_event_request = 6; - simulation.ProcessEventRequest process_event_request = 7; - simulation.ProcessQueryRequest process_query_request = 8; - simulation.ReadEventsRequest read_events_request = 9; - simulation.OpenSinkRequest open_sink_request = 10; - simulation.CloseSinkRequest close_sink_request = 11; - } -} - -message AnyReply { - oneof reply { // Contains exactly 1 variant. - simulation.InitReply init_reply = 1; - simulation.TimeReply time_reply = 2; - simulation.StepReply step_reply = 3; - simulation.StepUntilReply step_until_reply = 4; - simulation.ScheduleEventReply schedule_event_reply = 5; - simulation.CancelEventReply cancel_event_reply = 6; - simulation.ProcessEventReply process_event_reply = 7; - simulation.ProcessQueryReply process_query_reply = 8; - simulation.ReadEventsReply read_events_reply = 9; - simulation.OpenSinkReply open_sink_reply = 10; - simulation.CloseSinkReply close_sink_reply = 11; - ServerError error = 100; - } -} diff --git a/asynchronix/src/rpc/api/simulation.proto b/asynchronix/src/rpc/api/simulation.proto index b12d593..8aa9f68 100644 --- a/asynchronix/src/rpc/api/simulation.proto +++ b/asynchronix/src/rpc/api/simulation.proto @@ -146,6 +146,23 @@ message CloseSinkReply { } } +// A convenience message type for custom transport implementation. +message AnyRequest { + oneof request { // Expects exactly 1 variant. + InitRequest init_request = 1; + TimeRequest time_request = 2; + StepRequest step_request = 3; + StepUntilRequest step_until_request = 4; + ScheduleEventRequest schedule_event_request = 5; + CancelEventRequest cancel_event_request = 6; + ProcessEventRequest process_event_request = 7; + ProcessQueryRequest process_query_request = 8; + ReadEventsRequest read_events_request = 9; + OpenSinkRequest open_sink_request = 10; + CloseSinkRequest close_sink_request = 11; + } +} + service Simulation { rpc Init(InitRequest) returns (InitReply); rpc Time(TimeRequest) returns (TimeReply); diff --git a/asynchronix/src/rpc/codegen.rs b/asynchronix/src/rpc/codegen.rs index 3221cbc..c98125f 100644 --- a/asynchronix/src/rpc/codegen.rs +++ b/asynchronix/src/rpc/codegen.rs @@ -1,7 +1,6 @@ #![allow(unreachable_pub)] #![allow(clippy::enum_variant_names)] +#![allow(missing_docs)] -#[rustfmt::skip] -pub(crate) mod custom_transport; #[rustfmt::skip] pub(crate) mod simulation; diff --git a/asynchronix/src/rpc/codegen/custom_transport.rs b/asynchronix/src/rpc/codegen/custom_transport.rs deleted file mode 100644 index 61eac9d..0000000 --- a/asynchronix/src/rpc/codegen/custom_transport.rs +++ /dev/null @@ -1,111 +0,0 @@ -// This file is @generated by prost-build. -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct ServerError { - #[prost(enumeration = "ServerErrorCode", tag = "1")] - pub code: i32, - #[prost(string, tag = "2")] - pub message: ::prost::alloc::string::String, -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AnyRequest { - /// Expects exactly 1 variant. - #[prost(oneof = "any_request::Request", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11")] - pub request: ::core::option::Option, -} -/// Nested message and enum types in `AnyRequest`. -pub mod any_request { - /// Expects exactly 1 variant. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Request { - #[prost(message, tag = "1")] - InitRequest(super::super::simulation::InitRequest), - #[prost(message, tag = "2")] - TimeRequest(super::super::simulation::TimeRequest), - #[prost(message, tag = "3")] - StepRequest(super::super::simulation::StepRequest), - #[prost(message, tag = "4")] - StepUntilRequest(super::super::simulation::StepUntilRequest), - #[prost(message, tag = "5")] - ScheduleEventRequest(super::super::simulation::ScheduleEventRequest), - #[prost(message, tag = "6")] - CancelEventRequest(super::super::simulation::CancelEventRequest), - #[prost(message, tag = "7")] - ProcessEventRequest(super::super::simulation::ProcessEventRequest), - #[prost(message, tag = "8")] - ProcessQueryRequest(super::super::simulation::ProcessQueryRequest), - #[prost(message, tag = "9")] - ReadEventsRequest(super::super::simulation::ReadEventsRequest), - #[prost(message, tag = "10")] - OpenSinkRequest(super::super::simulation::OpenSinkRequest), - #[prost(message, tag = "11")] - CloseSinkRequest(super::super::simulation::CloseSinkRequest), - } -} -#[allow(clippy::derive_partial_eq_without_eq)] -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AnyReply { - /// Contains exactly 1 variant. - #[prost(oneof = "any_reply::Reply", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 100")] - pub reply: ::core::option::Option, -} -/// Nested message and enum types in `AnyReply`. -pub mod any_reply { - /// Contains exactly 1 variant. - #[allow(clippy::derive_partial_eq_without_eq)] - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum Reply { - #[prost(message, tag = "1")] - InitReply(super::super::simulation::InitReply), - #[prost(message, tag = "2")] - TimeReply(super::super::simulation::TimeReply), - #[prost(message, tag = "3")] - StepReply(super::super::simulation::StepReply), - #[prost(message, tag = "4")] - StepUntilReply(super::super::simulation::StepUntilReply), - #[prost(message, tag = "5")] - ScheduleEventReply(super::super::simulation::ScheduleEventReply), - #[prost(message, tag = "6")] - CancelEventReply(super::super::simulation::CancelEventReply), - #[prost(message, tag = "7")] - ProcessEventReply(super::super::simulation::ProcessEventReply), - #[prost(message, tag = "8")] - ProcessQueryReply(super::super::simulation::ProcessQueryReply), - #[prost(message, tag = "9")] - ReadEventsReply(super::super::simulation::ReadEventsReply), - #[prost(message, tag = "10")] - OpenSinkReply(super::super::simulation::OpenSinkReply), - #[prost(message, tag = "11")] - CloseSinkReply(super::super::simulation::CloseSinkReply), - #[prost(message, tag = "100")] - Error(super::ServerError), - } -} -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] -#[repr(i32)] -pub enum ServerErrorCode { - UnknownRequest = 0, - EmptyRequest = 1, -} -impl ServerErrorCode { - /// String value of the enum field names used in the ProtoBuf definition. - /// - /// The values are not transformed in any way and thus are considered stable - /// (if the ProtoBuf definition does not change) and safe for programmatic use. - pub fn as_str_name(&self) -> &'static str { - match self { - ServerErrorCode::UnknownRequest => "UNKNOWN_REQUEST", - ServerErrorCode::EmptyRequest => "EMPTY_REQUEST", - } - } - /// Creates an enum from field names used in the ProtoBuf definition. - pub fn from_str_name(value: &str) -> ::core::option::Option { - match value { - "UNKNOWN_REQUEST" => Some(Self::UnknownRequest), - "EMPTY_REQUEST" => Some(Self::EmptyRequest), - _ => None, - } - } -} diff --git a/asynchronix/src/rpc/codegen/simulation.rs b/asynchronix/src/rpc/codegen/simulation.rs index aefb660..672aed1 100644 --- a/asynchronix/src/rpc/codegen/simulation.rs +++ b/asynchronix/src/rpc/codegen/simulation.rs @@ -332,6 +332,44 @@ pub mod close_sink_reply { Error(super::Error), } } +/// A convenience message type for custom transport implementation. +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AnyRequest { + /// Expects exactly 1 variant. + #[prost(oneof = "any_request::Request", tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11")] + pub request: ::core::option::Option, +} +/// Nested message and enum types in `AnyRequest`. +pub mod any_request { + /// Expects exactly 1 variant. + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Request { + #[prost(message, tag = "1")] + InitRequest(super::InitRequest), + #[prost(message, tag = "2")] + TimeRequest(super::TimeRequest), + #[prost(message, tag = "3")] + StepRequest(super::StepRequest), + #[prost(message, tag = "4")] + StepUntilRequest(super::StepUntilRequest), + #[prost(message, tag = "5")] + ScheduleEventRequest(super::ScheduleEventRequest), + #[prost(message, tag = "6")] + CancelEventRequest(super::CancelEventRequest), + #[prost(message, tag = "7")] + ProcessEventRequest(super::ProcessEventRequest), + #[prost(message, tag = "8")] + ProcessQueryRequest(super::ProcessQueryRequest), + #[prost(message, tag = "9")] + ReadEventsRequest(super::ReadEventsRequest), + #[prost(message, tag = "10")] + OpenSinkRequest(super::OpenSinkRequest), + #[prost(message, tag = "11")] + CloseSinkRequest(super::CloseSinkRequest), + } +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ErrorCode { diff --git a/asynchronix/src/rpc/grpc.rs b/asynchronix/src/rpc/grpc.rs index 94809e9..02b4bf5 100644 --- a/asynchronix/src/rpc/grpc.rs +++ b/asynchronix/src/rpc/grpc.rs @@ -1,4 +1,4 @@ -//! GRPC simulation server. +//! gRPC simulation service. use std::net::SocketAddr; use std::sync::Mutex; @@ -10,12 +10,12 @@ use crate::rpc::EndpointRegistry; use crate::simulation::SimInit; use super::codegen::simulation::*; -use super::generic_server::GenericServer; +use super::simulation_service::SimulationService; -/// Runs a GRPC simulation server. +/// Runs a gRPC simulation server. /// /// The first argument is a closure that is called every time the simulation is -/// started by the remote client. It must create a new `SimInit` object +/// (re)started by the remote client. It must create a new `SimInit` object /// complemented by a registry that exposes the public event and query /// interface. pub fn run(sim_gen: F, addr: SocketAddr) -> Result<(), Box> @@ -27,7 +27,7 @@ where .enable_io() .build()?; - let sim_manager = GrpcServer::new(sim_gen); + let sim_manager = GrpcSimulationService::new(sim_gen); rt.block_on(async move { Server::builder() @@ -39,33 +39,27 @@ where }) } -struct GrpcServer -where - F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, -{ - inner: Mutex>, +struct GrpcSimulationService { + inner: Mutex, } -impl GrpcServer -where - F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, -{ - fn new(sim_gen: F) -> Self { +impl GrpcSimulationService { + fn new(sim_gen: F) -> Self + where + F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, + { Self { - inner: Mutex::new(GenericServer::new(sim_gen)), + inner: Mutex::new(SimulationService::new(sim_gen)), } } - fn inner(&self) -> MutexGuard<'_, GenericServer> { + fn inner(&self) -> MutexGuard<'_, SimulationService> { self.inner.lock().unwrap() } } #[tonic::async_trait] -impl simulation_server::Simulation for GrpcServer -where - F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, -{ +impl simulation_server::Simulation for GrpcSimulationService { async fn init(&self, request: Request) -> Result, Status> { let request = request.into_inner(); diff --git a/asynchronix/src/rpc/generic_server.rs b/asynchronix/src/rpc/simulation_service.rs similarity index 89% rename from asynchronix/src/rpc/generic_server.rs rename to asynchronix/src/rpc/simulation_service.rs index 70032c4..9c53001 100644 --- a/asynchronix/src/rpc/generic_server.rs +++ b/asynchronix/src/rpc/simulation_service.rs @@ -1,3 +1,5 @@ +use std::error; +use std::fmt; use std::time::Duration; use bytes::Buf; @@ -9,86 +11,87 @@ use crate::rpc::key_registry::{KeyRegistry, KeyRegistryId}; use crate::rpc::EndpointRegistry; use crate::simulation::{SimInit, Simulation}; -use super::codegen::custom_transport::*; use super::codegen::simulation::*; -/// Transport-independent server implementation. +/// Protobuf-based simulation manager. /// -/// This implements the protobuf services without any transport-specific -/// management. -pub(crate) struct GenericServer { - sim_gen: F, +/// A `SimulationService` enables the management of the lifecycle of a +/// simulation, including creating a +/// [`Simulation`](crate::simulation::Simulation), invoking its methods and +/// instantiating a new simulation. +/// +/// Its methods map the various RPC service methods defined in +/// `simulation.proto`. +pub struct SimulationService { + sim_gen: Box (SimInit, EndpointRegistry) + Send + 'static>, sim_context: Option<(Simulation, EndpointRegistry, KeyRegistry)>, } -impl GenericServer -where - F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, -{ - /// Creates a new `GenericServer` without any active simulation. - pub(crate) fn new(sim_gen: F) -> Self { +impl SimulationService { + /// Creates a new `SimulationService` without any active simulation. + /// + /// The argument is a closure that is called every time the simulation is + /// (re)started by the remote client. It must create a new `SimInit` object + /// complemented by a registry that exposes the public event and query + /// interface. + pub fn new(sim_gen: F) -> Self + where + F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, + { Self { - sim_gen, + sim_gen: Box::new(sim_gen), sim_context: None, } } - /// Processes an encoded `AnyRequest` message and returns an encoded - /// `AnyReply`. - #[allow(dead_code)] - pub(crate) fn service_request(&mut self, request_buf: B) -> Vec + /// Processes an encoded `AnyRequest` message and returns an encoded reply. + pub fn process_request(&mut self, request_buf: B) -> Result, InvalidRequest> where B: Buf, { - let reply = match AnyRequest::decode(request_buf) { + match AnyRequest::decode(request_buf) { Ok(AnyRequest { request: Some(req) }) => match req { any_request::Request::InitRequest(request) => { - any_reply::Reply::InitReply(self.init(request)) + Ok(self.init(request).encode_to_vec()) } any_request::Request::TimeRequest(request) => { - any_reply::Reply::TimeReply(self.time(request)) + Ok(self.time(request).encode_to_vec()) } any_request::Request::StepRequest(request) => { - any_reply::Reply::StepReply(self.step(request)) + Ok(self.step(request).encode_to_vec()) } any_request::Request::StepUntilRequest(request) => { - any_reply::Reply::StepUntilReply(self.step_until(request)) + Ok(self.step_until(request).encode_to_vec()) } any_request::Request::ScheduleEventRequest(request) => { - any_reply::Reply::ScheduleEventReply(self.schedule_event(request)) + Ok(self.schedule_event(request).encode_to_vec()) } any_request::Request::CancelEventRequest(request) => { - any_reply::Reply::CancelEventReply(self.cancel_event(request)) + Ok(self.cancel_event(request).encode_to_vec()) } any_request::Request::ProcessEventRequest(request) => { - any_reply::Reply::ProcessEventReply(self.process_event(request)) + Ok(self.process_event(request).encode_to_vec()) } any_request::Request::ProcessQueryRequest(request) => { - any_reply::Reply::ProcessQueryReply(self.process_query(request)) + Ok(self.process_query(request).encode_to_vec()) } any_request::Request::ReadEventsRequest(request) => { - any_reply::Reply::ReadEventsReply(self.read_events(request)) + Ok(self.read_events(request).encode_to_vec()) } any_request::Request::OpenSinkRequest(request) => { - any_reply::Reply::OpenSinkReply(self.open_sink(request)) + Ok(self.open_sink(request).encode_to_vec()) } any_request::Request::CloseSinkRequest(request) => { - any_reply::Reply::CloseSinkReply(self.close_sink(request)) + Ok(self.close_sink(request).encode_to_vec()) } }, - Ok(AnyRequest { request: None }) => any_reply::Reply::Error(ServerError { - code: ServerErrorCode::EmptyRequest as i32, - message: "the message did not contain any request".to_string(), + Ok(AnyRequest { request: None }) => Err(InvalidRequest { + description: "the message did not contain any request".to_string(), }), - Err(err) => any_reply::Reply::Error(ServerError { - code: ServerErrorCode::UnknownRequest as i32, - message: format!("bad request: {}", err), + Err(err) => Err(InvalidRequest { + description: format!("bad request: {}", err), }), - }; - - let reply = AnyReply { reply: Some(reply) }; - - reply.encode_to_vec() + } } /// Initialize a simulation with the provided time. @@ -606,6 +609,25 @@ where } } +impl fmt::Debug for SimulationService { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("SimulationService").finish_non_exhaustive() + } +} + +#[derive(Clone, Debug)] +pub struct InvalidRequest { + description: String, +} + +impl fmt::Display for InvalidRequest { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str(&self.description) + } +} + +impl error::Error for InvalidRequest {} + /// Attempts a cast from a `MonotonicTime` to a protobuf `Timestamp`. /// /// This will fail if the time is outside the protobuf-specified range for diff --git a/asynchronix/src/rpc/wasm.rs b/asynchronix/src/rpc/wasm.rs new file mode 100644 index 0000000..5526e6e --- /dev/null +++ b/asynchronix/src/rpc/wasm.rs @@ -0,0 +1,82 @@ +//! WASM simulation service. +//! +//! This module provides [`WasmSimulationService`], a thin wrapper over a +//! [`SimulationService`] that can be use from JavaScript. +//! +//! Although it is readily possible to use a +//! [`Simulation`](crate::simulation::Simulation) object from WASM, +//! [`WasmSimulationService`] goes further by exposing the complete simulation +//! API to JavaScript through protobuf. +//! +//! Keep in mind that WASM only supports single-threaded execution and therefore +//! any simulation bench compiled to WASM should instantiate simulations with +//! either [`SimInit::new()`](crate::simulation::SimInit::new) or +//! [`SimInit::with_num_threads(1)`](crate::simulation::SimInit::with_num_threads), +//! failing which the simulation will panic upon initialization. +//! +//! [`WasmSimulationService`] is exported to the JavaScript namespace as +//! `SimulationService`, and [`WasmSimulationService::process_request`] as +//! `SimulationService.processRequest`. + +use wasm_bindgen::prelude::*; + +use super::{EndpointRegistry, SimulationService}; +use crate::simulation::SimInit; + +/// A simulation service that can be used from JavaScript. +/// +/// This would typically be used by implementing a `run` function in Rust and +/// export it to WASM: +/// +/// ```no_run +/// #[wasm_bindgen] +/// pub fn run() -> WasmSimulationService { +/// WasmSimulationService::new(my_custom_bench_generator) +/// } +/// ``` +/// +/// which can then be used on the JS side to create a `SimulationService` as a +/// JS object, e.g. with: +/// +/// ```js +/// const simu = run(); +/// +/// // ...build a protobuf request and encode it as a `Uint8Array`... +/// +/// const reply = simu.processRequest(myRequest); +/// +/// // ...decode the protobuf reply... +/// ``` +#[wasm_bindgen(js_name = SimulationService)] +#[derive(Debug)] +pub struct WasmSimulationService(SimulationService); + +#[wasm_bindgen(js_class = SimulationService)] +impl WasmSimulationService { + /// Processes a protobuf-encoded `AnyRequest` message and returns a + /// protobuf-encoded reply. + /// + /// For the Protocol Buffer definitions, see the `simulation.proto` file. + #[wasm_bindgen(js_name = processRequest)] + pub fn process_request(&mut self, request: &[u8]) -> Result, JsError> { + self.0 + .process_request(request) + .map(|reply| reply.into_boxed_slice()) + .map_err(|e| JsError::new(&e.to_string())) + } +} + +impl WasmSimulationService { + /// Creates a new `SimulationService` without any active simulation. + /// + /// The argument is a closure that is called every time the simulation is + /// (re)started by the remote client. It must create a new `SimInit` object + /// complemented by a registry that exposes the public event and query + /// interface. + pub fn new(sim_gen: F) -> Self + where + F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static, + { + Self(SimulationService::new(sim_gen)) + } +}