forked from ROMEO/nexosim
Merge pull request #24 from asynchronics/dev
Merge private dev branch into main
This commit is contained in:
commit
a7e691c002
69
.github/workflows/ci.yml
vendored
69
.github/workflows/ci.yml
vendored
@ -3,7 +3,7 @@ name: CI
|
|||||||
on:
|
on:
|
||||||
pull_request:
|
pull_request:
|
||||||
push:
|
push:
|
||||||
branches: [ main ]
|
branches: [main, dev]
|
||||||
|
|
||||||
env:
|
env:
|
||||||
RUSTFLAGS: -Dwarnings
|
RUSTFLAGS: -Dwarnings
|
||||||
@ -17,7 +17,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
rust:
|
rust:
|
||||||
- stable
|
- stable
|
||||||
- 1.64.0
|
- 1.77.0
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout sources
|
- name: Checkout sources
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@ -28,7 +28,22 @@ jobs:
|
|||||||
toolchain: ${{ matrix.rust }}
|
toolchain: ${{ matrix.rust }}
|
||||||
|
|
||||||
- name: Run cargo check
|
- name: Run cargo check
|
||||||
run: cargo check --all-features
|
run: cargo check --features="rpc grpc-service"
|
||||||
|
|
||||||
|
build-wasm:
|
||||||
|
name: Build wasm32
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout sources
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Install toolchain
|
||||||
|
uses: dtolnay/rust-toolchain@stable
|
||||||
|
with:
|
||||||
|
targets: wasm32-unknown-unknown
|
||||||
|
|
||||||
|
- name: Run cargo build (wasm)
|
||||||
|
run: cargo build --target wasm32-unknown-unknown --features="rpc"
|
||||||
|
|
||||||
test:
|
test:
|
||||||
name: Test suite
|
name: Test suite
|
||||||
@ -41,7 +56,7 @@ jobs:
|
|||||||
uses: dtolnay/rust-toolchain@stable
|
uses: dtolnay/rust-toolchain@stable
|
||||||
|
|
||||||
- name: Run cargo test
|
- name: Run cargo test
|
||||||
run: cargo test
|
run: cargo test --features="rpc grpc-service"
|
||||||
|
|
||||||
loom-dry-run:
|
loom-dry-run:
|
||||||
name: Loom dry run
|
name: Loom dry run
|
||||||
@ -54,7 +69,7 @@ jobs:
|
|||||||
uses: dtolnay/rust-toolchain@stable
|
uses: dtolnay/rust-toolchain@stable
|
||||||
|
|
||||||
- name: Dry-run cargo test (Loom)
|
- name: Dry-run cargo test (Loom)
|
||||||
run: cargo test --no-run --tests
|
run: cargo test --no-run --tests --features="rpc grpc-service"
|
||||||
env:
|
env:
|
||||||
RUSTFLAGS: --cfg asynchronix_loom
|
RUSTFLAGS: --cfg asynchronix_loom
|
||||||
|
|
||||||
@ -70,23 +85,53 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
components: miri
|
components: miri
|
||||||
|
|
||||||
- name: Run cargo miri tests
|
- name: Run cargo miri tests (single-threaded executor)
|
||||||
run: cargo miri test --tests --lib
|
run: cargo miri test --tests --lib --features="rpc grpc-service"
|
||||||
|
env:
|
||||||
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
|
||||||
|
|
||||||
|
- name: Run cargo miri tests (multi-threaded executor)
|
||||||
|
run: cargo miri test --tests --lib --features="rpc grpc-service"
|
||||||
env:
|
env:
|
||||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
||||||
|
|
||||||
- name: Run cargo miri example1
|
- name: Run cargo miri example1 (single-threaded executor)
|
||||||
|
run: cargo miri run --example espresso_machine
|
||||||
|
env:
|
||||||
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
|
||||||
|
|
||||||
|
- name: Run cargo miri example1 (multi-threaded executor)
|
||||||
run: cargo miri run --example espresso_machine
|
run: cargo miri run --example espresso_machine
|
||||||
env:
|
env:
|
||||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
||||||
|
|
||||||
- name: Run cargo miri example2
|
- name: Run cargo miri example2 (single-threaded executor)
|
||||||
|
run: cargo miri run --example power_supply
|
||||||
|
env:
|
||||||
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
|
||||||
|
|
||||||
|
- name: Run cargo miri example2 (multi-threaded executor)
|
||||||
run: cargo miri run --example power_supply
|
run: cargo miri run --example power_supply
|
||||||
env:
|
env:
|
||||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
||||||
|
|
||||||
- name: Run cargo miri example3
|
- name: Run cargo miri example3 (single-threaded executor)
|
||||||
run: cargo miri run --example stepper_motor
|
run: cargo miri run --example stepper_motor
|
||||||
|
env:
|
||||||
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
|
||||||
|
|
||||||
|
- name: Run cargo miri example3 (multi-threaded executor)
|
||||||
|
run: cargo miri run --example stepper_motor
|
||||||
|
env:
|
||||||
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
||||||
|
|
||||||
|
- name: Run cargo miri example4 (single-threaded executor)
|
||||||
|
run: cargo miri run --example assembly
|
||||||
|
env:
|
||||||
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
|
||||||
|
|
||||||
|
- name: Run cargo miri example4 (multi-threaded executor)
|
||||||
|
run: cargo miri run --example assembly
|
||||||
env:
|
env:
|
||||||
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
|
||||||
|
|
||||||
@ -104,7 +149,7 @@ jobs:
|
|||||||
run: cargo fmt --all -- --check
|
run: cargo fmt --all -- --check
|
||||||
|
|
||||||
- name: Run cargo clippy
|
- name: Run cargo clippy
|
||||||
run: cargo clippy
|
run: cargo clippy --features="rpc grpc-service"
|
||||||
|
|
||||||
docs:
|
docs:
|
||||||
name: Docs
|
name: Docs
|
||||||
@ -117,4 +162,4 @@ jobs:
|
|||||||
uses: dtolnay/rust-toolchain@stable
|
uses: dtolnay/rust-toolchain@stable
|
||||||
|
|
||||||
- name: Run cargo doc
|
- name: Run cargo doc
|
||||||
run: cargo doc --no-deps --document-private-items
|
run: cargo doc --no-deps --features="rpc grpc-service" --document-private-items
|
||||||
|
8
.github/workflows/loom.yml
vendored
8
.github/workflows/loom.yml
vendored
@ -10,10 +10,12 @@ on:
|
|||||||
- 'asynchronix/src/executor/task.rs'
|
- 'asynchronix/src/executor/task.rs'
|
||||||
- 'asynchronix/src/executor/task/**'
|
- 'asynchronix/src/executor/task/**'
|
||||||
- 'asynchronix/src/loom_exports.rs'
|
- 'asynchronix/src/loom_exports.rs'
|
||||||
- 'asynchronix/src/model/ports/broadcaster.rs'
|
- 'asynchronix/src/ports/output/broadcaster.rs'
|
||||||
- 'asynchronix/src/model/ports/broadcaster/**'
|
- 'asynchronix/src/ports/output/broadcaster/**'
|
||||||
|
- 'asynchronix/src/ports/source/broadcaster.rs'
|
||||||
|
- 'asynchronix/src/ports/source/broadcaster/**'
|
||||||
|
- 'asynchronix/src/util/cached_rw_lock.rs'
|
||||||
- 'asynchronix/src/util/slot.rs'
|
- 'asynchronix/src/util/slot.rs'
|
||||||
- 'asynchronix/src/util/spsc_queue.rs'
|
|
||||||
- 'asynchronix/src/util/sync_cell.rs'
|
- 'asynchronix/src/util/sync_cell.rs'
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
@ -9,7 +9,7 @@ name = "asynchronix"
|
|||||||
authors = ["Serge Barral <serge.barral@asynchronics.com>"]
|
authors = ["Serge Barral <serge.barral@asynchronics.com>"]
|
||||||
version = "0.2.2"
|
version = "0.2.2"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.64"
|
rust-version = "1.77.0"
|
||||||
license = "MIT OR Apache-2.0"
|
license = "MIT OR Apache-2.0"
|
||||||
repository = "https://github.com/asynchronics/asynchronix"
|
repository = "https://github.com/asynchronics/asynchronix"
|
||||||
readme = "../README.md"
|
readme = "../README.md"
|
||||||
@ -20,17 +20,29 @@ categories = ["simulation", "aerospace", "science"]
|
|||||||
keywords = ["simulation", "discrete-event", "systems", "cyberphysical", "real-time"]
|
keywords = ["simulation", "discrete-event", "systems", "cyberphysical", "real-time"]
|
||||||
autotests = false
|
autotests = false
|
||||||
|
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
serde = ["dep:serde"]
|
# Remote procedure call API.
|
||||||
|
rpc = ["dep:rmp-serde", "dep:serde", "dep:tonic", "dep:prost", "dep:prost-types", "dep:bytes"]
|
||||||
|
# This feature forces protobuf/gRPC code (re-)generation.
|
||||||
|
rpc-codegen = ["dep:tonic-build"]
|
||||||
|
# gRPC service.
|
||||||
|
grpc-service = ["rpc", "dep:tokio" , "tonic/transport"]
|
||||||
|
# wasm service.
|
||||||
|
wasm-service = ["rpc", "dep:wasm-bindgen"]
|
||||||
# API-unstable public exports meant for external test/benchmarking; development only.
|
# API-unstable public exports meant for external test/benchmarking; development only.
|
||||||
dev-hooks = []
|
dev-hooks = []
|
||||||
# Logging of performance-related statistics; development only.
|
# Logging of performance-related statistics; development only.
|
||||||
dev-logs = []
|
dev-logs = []
|
||||||
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
# Mandatory dependencies.
|
||||||
async-event = "0.1"
|
async-event = "0.1"
|
||||||
crossbeam-utils = "0.8"
|
crossbeam-utils = "0.8"
|
||||||
diatomic-waker = "0.1"
|
diatomic-waker = "0.1"
|
||||||
|
dyn-clone = "1.0"
|
||||||
|
futures-channel = "0.3"
|
||||||
futures-task = "0.3"
|
futures-task = "0.3"
|
||||||
multishot = "0.3.2"
|
multishot = "0.3.2"
|
||||||
num_cpus = "1.13"
|
num_cpus = "1.13"
|
||||||
@ -39,21 +51,36 @@ recycle-box = "0.2"
|
|||||||
slab = "0.4"
|
slab = "0.4"
|
||||||
spin_sleep = "1"
|
spin_sleep = "1"
|
||||||
st3 = "0.4"
|
st3 = "0.4"
|
||||||
|
tai-time = "0.3"
|
||||||
|
|
||||||
[dependencies.serde]
|
# Common RPC dependencies.
|
||||||
version = "1"
|
bytes = { version = "1", default-features = false, optional = true }
|
||||||
optional = true
|
prost = { version = "0.12", optional = true }
|
||||||
features = ["derive"]
|
prost-types = { version = "0.12", optional = true }
|
||||||
|
rmp-serde = { version = "1.1", optional = true }
|
||||||
|
serde = { version = "1", optional = true }
|
||||||
|
|
||||||
|
# gRPC service dependencies.
|
||||||
|
tokio = { version = "1.0", features=["net"], optional = true }
|
||||||
|
tonic = { version = "0.11", default-features = false, features=["codegen", "prost"], optional = true }
|
||||||
|
|
||||||
|
# WASM service dependencies.
|
||||||
|
wasm-bindgen = { version = "0.2", optional = true }
|
||||||
|
|
||||||
[target.'cfg(asynchronix_loom)'.dependencies]
|
[target.'cfg(asynchronix_loom)'.dependencies]
|
||||||
loom = "0.5"
|
loom = "0.5"
|
||||||
waker-fn = "1.1"
|
waker-fn = "1.1"
|
||||||
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
futures-util = "0.3"
|
futures-util = "0.3"
|
||||||
futures-channel = "0.3"
|
|
||||||
futures-executor = "0.3"
|
futures-executor = "0.3"
|
||||||
|
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
tonic-build = { version = "0.11", optional = true }
|
||||||
|
|
||||||
|
|
||||||
[[test]]
|
[[test]]
|
||||||
name = "integration"
|
name = "integration"
|
||||||
path = "tests/tests.rs"
|
path = "tests/tests.rs"
|
||||||
|
17
asynchronix/build.rs
Normal file
17
asynchronix/build.rs
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
// Prevent warnings when checking for flag `asynchronix_loom`.
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(asynchronix_loom)");
|
||||||
|
|
||||||
|
#[cfg(feature = "rpc-codegen")]
|
||||||
|
let builder = tonic_build::configure()
|
||||||
|
.build_client(false)
|
||||||
|
.out_dir("src/rpc/codegen/");
|
||||||
|
|
||||||
|
#[cfg(all(feature = "rpc-codegen", not(feature = "grpc-service")))]
|
||||||
|
let builder = builder.build_server(false);
|
||||||
|
|
||||||
|
#[cfg(feature = "rpc-codegen")]
|
||||||
|
builder.compile(&["simulation.proto"], &["src/rpc/api/"])?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
155
asynchronix/examples/assembly.rs
Normal file
155
asynchronix/examples/assembly.rs
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
//! Example: an assembly consisting of a current-controlled stepper motor and
|
||||||
|
//! its driver.
|
||||||
|
//!
|
||||||
|
//! This example demonstrates in particular:
|
||||||
|
//!
|
||||||
|
//! * submodels,
|
||||||
|
//! * outputs cloning,
|
||||||
|
//! * self-scheduling methods,
|
||||||
|
//! * model setup,
|
||||||
|
//! * model initialization,
|
||||||
|
//! * simulation monitoring with event streams.
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! ┌──────────────────────────────────────────────┐
|
||||||
|
//! │ Assembly │
|
||||||
|
//! │ ┌──────────┐ ┌──────────┐ │
|
||||||
|
//! PPS │ │ │ coil currents │ │ │position
|
||||||
|
//! Pulse rate ●───────▶│──▶│ Driver ├───────────────▶│ Motor ├──▶│─────────▶
|
||||||
|
//! (±freq)│ │ │ (IA, IB) │ │ │(0:199)
|
||||||
|
//! │ └──────────┘ └──────────┘ │
|
||||||
|
//! └──────────────────────────────────────────────┘
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use asynchronix::model::{Model, SetupContext};
|
||||||
|
use asynchronix::ports::{EventBuffer, Output};
|
||||||
|
use asynchronix::simulation::{Mailbox, SimInit};
|
||||||
|
use asynchronix::time::MonotonicTime;
|
||||||
|
|
||||||
|
mod stepper_motor;
|
||||||
|
|
||||||
|
pub use stepper_motor::{Driver, Motor};
|
||||||
|
|
||||||
|
pub struct MotorAssembly {
|
||||||
|
pub position: Output<u16>,
|
||||||
|
init_pos: u16,
|
||||||
|
load: Output<f64>,
|
||||||
|
pps: Output<f64>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl MotorAssembly {
|
||||||
|
pub fn new(init_pos: u16) -> Self {
|
||||||
|
Self {
|
||||||
|
position: Default::default(),
|
||||||
|
init_pos,
|
||||||
|
load: Default::default(),
|
||||||
|
pps: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Sets the pulse rate (sign = direction) [Hz] -- input port.
|
||||||
|
pub async fn pulse_rate(&mut self, pps: f64) {
|
||||||
|
self.pps.send(pps).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Torque applied by the load [N·m] -- input port.
|
||||||
|
pub async fn load(&mut self, torque: f64) {
|
||||||
|
self.load.send(torque).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Model for MotorAssembly {
|
||||||
|
fn setup(&mut self, setup_context: &SetupContext<Self>) {
|
||||||
|
let mut motor = Motor::new(self.init_pos);
|
||||||
|
let mut driver = Driver::new(1.0);
|
||||||
|
|
||||||
|
// Mailboxes.
|
||||||
|
let motor_mbox = Mailbox::new();
|
||||||
|
let driver_mbox = Mailbox::new();
|
||||||
|
|
||||||
|
// Connections.
|
||||||
|
self.pps.connect(Driver::pulse_rate, &driver_mbox);
|
||||||
|
self.load.connect(Motor::load, &motor_mbox);
|
||||||
|
driver.current_out.connect(Motor::current_in, &motor_mbox);
|
||||||
|
// Note: it is important to clone `position` from the parent to the
|
||||||
|
// submodel so that all connections made by the user to the parent model
|
||||||
|
// are preserved. Connections added after cloning are reflected in all
|
||||||
|
// clones.
|
||||||
|
motor.position = self.position.clone();
|
||||||
|
|
||||||
|
setup_context.add_model(driver, driver_mbox, "driver");
|
||||||
|
setup_context.add_model(motor, motor_mbox, "motor");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
// ---------------
|
||||||
|
// Bench assembly.
|
||||||
|
// ---------------
|
||||||
|
|
||||||
|
// Models.
|
||||||
|
let init_pos = 123;
|
||||||
|
let mut assembly = MotorAssembly::new(init_pos);
|
||||||
|
|
||||||
|
// Mailboxes.
|
||||||
|
let assembly_mbox = Mailbox::new();
|
||||||
|
let assembly_addr = assembly_mbox.address();
|
||||||
|
|
||||||
|
// Model handles for simulation.
|
||||||
|
let mut position = EventBuffer::new();
|
||||||
|
assembly.position.connect_sink(&position);
|
||||||
|
|
||||||
|
// Start time (arbitrary since models do not depend on absolute time).
|
||||||
|
let t0 = MonotonicTime::EPOCH;
|
||||||
|
|
||||||
|
// Assembly and initialization.
|
||||||
|
let mut simu = SimInit::new()
|
||||||
|
.add_model(assembly, assembly_mbox, "assembly")
|
||||||
|
.init(t0);
|
||||||
|
|
||||||
|
// ----------
|
||||||
|
// Simulation.
|
||||||
|
// ----------
|
||||||
|
|
||||||
|
// Check initial conditions.
|
||||||
|
let mut t = t0;
|
||||||
|
assert_eq!(simu.time(), t);
|
||||||
|
assert_eq!(position.next(), Some(init_pos));
|
||||||
|
assert!(position.next().is_none());
|
||||||
|
|
||||||
|
// Start the motor in 2s with a PPS of 10Hz.
|
||||||
|
simu.schedule_event(
|
||||||
|
Duration::from_secs(2),
|
||||||
|
MotorAssembly::pulse_rate,
|
||||||
|
10.0,
|
||||||
|
&assembly_addr,
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Advance simulation time to two next events.
|
||||||
|
simu.step();
|
||||||
|
t += Duration::new(2, 0);
|
||||||
|
assert_eq!(simu.time(), t);
|
||||||
|
simu.step();
|
||||||
|
t += Duration::new(0, 100_000_000);
|
||||||
|
assert_eq!(simu.time(), t);
|
||||||
|
|
||||||
|
// Whichever the starting position, after two phase increments from the
|
||||||
|
// driver the rotor should have synchronized with the driver, with a
|
||||||
|
// position given by this beautiful formula.
|
||||||
|
let mut pos = (((init_pos + 1) / 4) * 4 + 1) % Motor::STEPS_PER_REV;
|
||||||
|
assert_eq!(position.by_ref().last().unwrap(), pos);
|
||||||
|
|
||||||
|
// Advance simulation time by 0.9s, which with a 10Hz PPS should correspond to
|
||||||
|
// 9 position increments.
|
||||||
|
simu.step_by(Duration::new(0, 900_000_000));
|
||||||
|
t += Duration::new(0, 900_000_000);
|
||||||
|
assert_eq!(simu.time(), t);
|
||||||
|
for _ in 0..9 {
|
||||||
|
pos = (pos + 1) % Motor::STEPS_PER_REV;
|
||||||
|
assert_eq!(position.next(), Some(pos));
|
||||||
|
}
|
||||||
|
assert!(position.next().is_none());
|
||||||
|
}
|
@ -31,13 +31,12 @@
|
|||||||
//! (-)
|
//! (-)
|
||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use std::future::Future;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use asynchronix::model::{InitializedModel, Model, Output};
|
use asynchronix::model::{Context, InitializedModel, Model};
|
||||||
use asynchronix::simulation::{Mailbox, SimInit};
|
use asynchronix::ports::{EventSlot, Output};
|
||||||
use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
|
use asynchronix::simulation::{ActionKey, Mailbox, SimInit};
|
||||||
|
use asynchronix::time::MonotonicTime;
|
||||||
|
|
||||||
/// Water pump.
|
/// Water pump.
|
||||||
pub struct Pump {
|
pub struct Pump {
|
||||||
@ -81,7 +80,7 @@ pub struct Controller {
|
|||||||
water_sense: WaterSenseState,
|
water_sense: WaterSenseState,
|
||||||
/// Event key, which if present indicates that the machine is currently
|
/// Event key, which if present indicates that the machine is currently
|
||||||
/// brewing -- internal state.
|
/// brewing -- internal state.
|
||||||
stop_brew_key: Option<EventKey>,
|
stop_brew_key: Option<ActionKey>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Controller {
|
impl Controller {
|
||||||
@ -121,7 +120,7 @@ impl Controller {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Starts brewing or cancels the current brew -- input port.
|
/// Starts brewing or cancels the current brew -- input port.
|
||||||
pub async fn brew_cmd(&mut self, _: (), scheduler: &Scheduler<Self>) {
|
pub async fn brew_cmd(&mut self, _: (), context: &Context<Self>) {
|
||||||
// If a brew was ongoing, sending the brew command is interpreted as a
|
// If a brew was ongoing, sending the brew command is interpreted as a
|
||||||
// request to cancel it.
|
// request to cancel it.
|
||||||
if let Some(key) = self.stop_brew_key.take() {
|
if let Some(key) = self.stop_brew_key.take() {
|
||||||
@ -140,7 +139,7 @@ impl Controller {
|
|||||||
|
|
||||||
// Schedule the `stop_brew()` method and turn on the pump.
|
// Schedule the `stop_brew()` method and turn on the pump.
|
||||||
self.stop_brew_key = Some(
|
self.stop_brew_key = Some(
|
||||||
scheduler
|
context
|
||||||
.schedule_keyed_event(self.brew_time, Self::stop_brew, ())
|
.schedule_keyed_event(self.brew_time, Self::stop_brew, ())
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
);
|
);
|
||||||
@ -189,7 +188,7 @@ impl Tank {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Water volume added [m³] -- input port.
|
/// Water volume added [m³] -- input port.
|
||||||
pub async fn fill(&mut self, added_volume: f64, scheduler: &Scheduler<Self>) {
|
pub async fn fill(&mut self, added_volume: f64, context: &Context<Self>) {
|
||||||
// Ignore zero and negative values. We could also impose a maximum based
|
// Ignore zero and negative values. We could also impose a maximum based
|
||||||
// on tank capacity.
|
// on tank capacity.
|
||||||
if added_volume <= 0.0 {
|
if added_volume <= 0.0 {
|
||||||
@ -207,11 +206,11 @@ impl Tank {
|
|||||||
state.set_empty_key.cancel();
|
state.set_empty_key.cancel();
|
||||||
|
|
||||||
// Update the volume, saturating at 0 in case of rounding errors.
|
// Update the volume, saturating at 0 in case of rounding errors.
|
||||||
let time = scheduler.time();
|
let time = context.time();
|
||||||
let elapsed_time = time.duration_since(state.last_volume_update).as_secs_f64();
|
let elapsed_time = time.duration_since(state.last_volume_update).as_secs_f64();
|
||||||
self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0);
|
self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0);
|
||||||
|
|
||||||
self.schedule_empty(state.flow_rate, time, scheduler).await;
|
self.schedule_empty(state.flow_rate, time, context).await;
|
||||||
|
|
||||||
// There is no need to broadcast the state of the water sense since
|
// There is no need to broadcast the state of the water sense since
|
||||||
// it could not be previously `Empty` (otherwise the dynamic state
|
// it could not be previously `Empty` (otherwise the dynamic state
|
||||||
@ -229,10 +228,10 @@ impl Tank {
|
|||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// This method will panic if the flow rate is negative.
|
/// This method will panic if the flow rate is negative.
|
||||||
pub async fn set_flow_rate(&mut self, flow_rate: f64, scheduler: &Scheduler<Self>) {
|
pub async fn set_flow_rate(&mut self, flow_rate: f64, context: &Context<Self>) {
|
||||||
assert!(flow_rate >= 0.0);
|
assert!(flow_rate >= 0.0);
|
||||||
|
|
||||||
let time = scheduler.time();
|
let time = context.time();
|
||||||
|
|
||||||
// If the flow rate was non-zero up to now, update the volume.
|
// If the flow rate was non-zero up to now, update the volume.
|
||||||
if let Some(state) = self.dynamic_state.take() {
|
if let Some(state) = self.dynamic_state.take() {
|
||||||
@ -244,7 +243,7 @@ impl Tank {
|
|||||||
self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0);
|
self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.schedule_empty(flow_rate, time, scheduler).await;
|
self.schedule_empty(flow_rate, time, context).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Schedules a callback for when the tank becomes empty.
|
/// Schedules a callback for when the tank becomes empty.
|
||||||
@ -257,7 +256,7 @@ impl Tank {
|
|||||||
&mut self,
|
&mut self,
|
||||||
flow_rate: f64,
|
flow_rate: f64,
|
||||||
time: MonotonicTime,
|
time: MonotonicTime,
|
||||||
scheduler: &Scheduler<Self>,
|
context: &Context<Self>,
|
||||||
) {
|
) {
|
||||||
// Determine when the tank will be empty at the current flow rate.
|
// Determine when the tank will be empty at the current flow rate.
|
||||||
let duration_until_empty = if self.volume == 0.0 {
|
let duration_until_empty = if self.volume == 0.0 {
|
||||||
@ -274,7 +273,7 @@ impl Tank {
|
|||||||
let duration_until_empty = Duration::from_secs_f64(duration_until_empty);
|
let duration_until_empty = Duration::from_secs_f64(duration_until_empty);
|
||||||
|
|
||||||
// Schedule the next update.
|
// Schedule the next update.
|
||||||
match scheduler.schedule_keyed_event(duration_until_empty, Self::set_empty, ()) {
|
match context.schedule_keyed_event(duration_until_empty, Self::set_empty, ()) {
|
||||||
Ok(set_empty_key) => {
|
Ok(set_empty_key) => {
|
||||||
let state = TankDynamicState {
|
let state = TankDynamicState {
|
||||||
last_volume_update: time,
|
last_volume_update: time,
|
||||||
@ -301,21 +300,16 @@ impl Tank {
|
|||||||
|
|
||||||
impl Model for Tank {
|
impl Model for Tank {
|
||||||
/// Broadcasts the initial state of the water sense.
|
/// Broadcasts the initial state of the water sense.
|
||||||
fn init(
|
async fn init(mut self, _: &Context<Self>) -> InitializedModel<Self> {
|
||||||
mut self,
|
self.water_sense
|
||||||
_scheduler: &Scheduler<Self>,
|
.send(if self.volume == 0.0 {
|
||||||
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
|
WaterSenseState::Empty
|
||||||
Box::pin(async move {
|
} else {
|
||||||
self.water_sense
|
WaterSenseState::NotEmpty
|
||||||
.send(if self.volume == 0.0 {
|
})
|
||||||
WaterSenseState::Empty
|
.await;
|
||||||
} else {
|
|
||||||
WaterSenseState::NotEmpty
|
|
||||||
})
|
|
||||||
.await;
|
|
||||||
|
|
||||||
self.into()
|
self.into()
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -323,7 +317,7 @@ impl Model for Tank {
|
|||||||
/// is non-zero.
|
/// is non-zero.
|
||||||
struct TankDynamicState {
|
struct TankDynamicState {
|
||||||
last_volume_update: MonotonicTime,
|
last_volume_update: MonotonicTime,
|
||||||
set_empty_key: EventKey,
|
set_empty_key: ActionKey,
|
||||||
flow_rate: f64,
|
flow_rate: f64,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,7 +358,8 @@ fn main() {
|
|||||||
pump.flow_rate.connect(Tank::set_flow_rate, &tank_mbox);
|
pump.flow_rate.connect(Tank::set_flow_rate, &tank_mbox);
|
||||||
|
|
||||||
// Model handles for simulation.
|
// Model handles for simulation.
|
||||||
let mut flow_rate = pump.flow_rate.connect_slot().0;
|
let mut flow_rate = EventSlot::new();
|
||||||
|
pump.flow_rate.connect_sink(&flow_rate);
|
||||||
let controller_addr = controller_mbox.address();
|
let controller_addr = controller_mbox.address();
|
||||||
let tank_addr = tank_mbox.address();
|
let tank_addr = tank_mbox.address();
|
||||||
|
|
||||||
@ -373,9 +368,9 @@ fn main() {
|
|||||||
|
|
||||||
// Assembly and initialization.
|
// Assembly and initialization.
|
||||||
let mut simu = SimInit::new()
|
let mut simu = SimInit::new()
|
||||||
.add_model(controller, controller_mbox)
|
.add_model(controller, controller_mbox, "controller")
|
||||||
.add_model(pump, pump_mbox)
|
.add_model(pump, pump_mbox, "pump")
|
||||||
.add_model(tank, tank_mbox)
|
.add_model(tank, tank_mbox, "tank")
|
||||||
.init(t0);
|
.init(t0);
|
||||||
|
|
||||||
// ----------
|
// ----------
|
||||||
@ -387,48 +382,48 @@ fn main() {
|
|||||||
assert_eq!(simu.time(), t);
|
assert_eq!(simu.time(), t);
|
||||||
|
|
||||||
// Brew one espresso shot with the default brew time.
|
// Brew one espresso shot with the default brew time.
|
||||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
simu.process_event(Controller::brew_cmd, (), &controller_addr);
|
||||||
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
|
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
|
||||||
|
|
||||||
simu.step();
|
simu.step();
|
||||||
t += Controller::DEFAULT_BREW_TIME;
|
t += Controller::DEFAULT_BREW_TIME;
|
||||||
assert_eq!(simu.time(), t);
|
assert_eq!(simu.time(), t);
|
||||||
assert_eq!(flow_rate.take(), Some(0.0));
|
assert_eq!(flow_rate.next(), Some(0.0));
|
||||||
|
|
||||||
// Drink too much coffee.
|
// Drink too much coffee.
|
||||||
let volume_per_shot = pump_flow_rate * Controller::DEFAULT_BREW_TIME.as_secs_f64();
|
let volume_per_shot = pump_flow_rate * Controller::DEFAULT_BREW_TIME.as_secs_f64();
|
||||||
let shots_per_tank = (init_tank_volume / volume_per_shot) as u64; // YOLO--who cares about floating-point rounding errors?
|
let shots_per_tank = (init_tank_volume / volume_per_shot) as u64; // YOLO--who cares about floating-point rounding errors?
|
||||||
for _ in 0..(shots_per_tank - 1) {
|
for _ in 0..(shots_per_tank - 1) {
|
||||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
simu.process_event(Controller::brew_cmd, (), &controller_addr);
|
||||||
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
|
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
|
||||||
simu.step();
|
simu.step();
|
||||||
t += Controller::DEFAULT_BREW_TIME;
|
t += Controller::DEFAULT_BREW_TIME;
|
||||||
assert_eq!(simu.time(), t);
|
assert_eq!(simu.time(), t);
|
||||||
assert_eq!(flow_rate.take(), Some(0.0));
|
assert_eq!(flow_rate.next(), Some(0.0));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that the tank becomes empty before the completion of the next shot.
|
// Check that the tank becomes empty before the completion of the next shot.
|
||||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
simu.process_event(Controller::brew_cmd, (), &controller_addr);
|
||||||
simu.step();
|
simu.step();
|
||||||
assert!(simu.time() < t + Controller::DEFAULT_BREW_TIME);
|
assert!(simu.time() < t + Controller::DEFAULT_BREW_TIME);
|
||||||
t = simu.time();
|
t = simu.time();
|
||||||
assert_eq!(flow_rate.take(), Some(0.0));
|
assert_eq!(flow_rate.next(), Some(0.0));
|
||||||
|
|
||||||
// Try to brew another shot while the tank is still empty.
|
// Try to brew another shot while the tank is still empty.
|
||||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
simu.process_event(Controller::brew_cmd, (), &controller_addr);
|
||||||
assert!(flow_rate.take().is_none());
|
assert!(flow_rate.next().is_none());
|
||||||
|
|
||||||
// Change the brew time and fill up the tank.
|
// Change the brew time and fill up the tank.
|
||||||
let brew_time = Duration::new(30, 0);
|
let brew_time = Duration::new(30, 0);
|
||||||
simu.send_event(Controller::brew_time, brew_time, &controller_addr);
|
simu.process_event(Controller::brew_time, brew_time, &controller_addr);
|
||||||
simu.send_event(Tank::fill, 1.0e-3, tank_addr);
|
simu.process_event(Tank::fill, 1.0e-3, tank_addr);
|
||||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
simu.process_event(Controller::brew_cmd, (), &controller_addr);
|
||||||
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
|
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
|
||||||
|
|
||||||
simu.step();
|
simu.step();
|
||||||
t += brew_time;
|
t += brew_time;
|
||||||
assert_eq!(simu.time(), t);
|
assert_eq!(simu.time(), t);
|
||||||
assert_eq!(flow_rate.take(), Some(0.0));
|
assert_eq!(flow_rate.next(), Some(0.0));
|
||||||
|
|
||||||
// Interrupt the brew after 15s by pressing again the brew button.
|
// Interrupt the brew after 15s by pressing again the brew button.
|
||||||
simu.schedule_event(
|
simu.schedule_event(
|
||||||
@ -438,11 +433,11 @@ fn main() {
|
|||||||
&controller_addr,
|
&controller_addr,
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
simu.send_event(Controller::brew_cmd, (), &controller_addr);
|
simu.process_event(Controller::brew_cmd, (), &controller_addr);
|
||||||
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
|
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
|
||||||
|
|
||||||
simu.step();
|
simu.step();
|
||||||
t += Duration::from_secs(15);
|
t += Duration::from_secs(15);
|
||||||
assert_eq!(simu.time(), t);
|
assert_eq!(simu.time(), t);
|
||||||
assert_eq!(flow_rate.take(), Some(0.0));
|
assert_eq!(flow_rate.next(), Some(0.0));
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,8 @@
|
|||||||
//! │ ├───────────────────────────────▶ Total power
|
//! │ ├───────────────────────────────▶ Total power
|
||||||
//! └──────────┘
|
//! └──────────┘
|
||||||
//! ```
|
//! ```
|
||||||
use asynchronix::model::{Model, Output, Requestor};
|
use asynchronix::model::Model;
|
||||||
|
use asynchronix::ports::{EventSlot, Output, Requestor};
|
||||||
use asynchronix::simulation::{Mailbox, SimInit};
|
use asynchronix::simulation::{Mailbox, SimInit};
|
||||||
use asynchronix::time::MonotonicTime;
|
use asynchronix::time::MonotonicTime;
|
||||||
|
|
||||||
@ -124,10 +125,14 @@ fn main() {
|
|||||||
psu.pwr_out.connect(Load::pwr_in, &load3_mbox);
|
psu.pwr_out.connect(Load::pwr_in, &load3_mbox);
|
||||||
|
|
||||||
// Model handles for simulation.
|
// Model handles for simulation.
|
||||||
let mut psu_power = psu.power.connect_slot().0;
|
let mut psu_power = EventSlot::new();
|
||||||
let mut load1_power = load1.power.connect_slot().0;
|
let mut load1_power = EventSlot::new();
|
||||||
let mut load2_power = load2.power.connect_slot().0;
|
let mut load2_power = EventSlot::new();
|
||||||
let mut load3_power = load3.power.connect_slot().0;
|
let mut load3_power = EventSlot::new();
|
||||||
|
psu.power.connect_sink(&psu_power);
|
||||||
|
load1.power.connect_sink(&load1_power);
|
||||||
|
load2.power.connect_sink(&load2_power);
|
||||||
|
load3.power.connect_sink(&load3_power);
|
||||||
let psu_addr = psu_mbox.address();
|
let psu_addr = psu_mbox.address();
|
||||||
|
|
||||||
// Start time (arbitrary since models do not depend on absolute time).
|
// Start time (arbitrary since models do not depend on absolute time).
|
||||||
@ -135,10 +140,10 @@ fn main() {
|
|||||||
|
|
||||||
// Assembly and initialization.
|
// Assembly and initialization.
|
||||||
let mut simu = SimInit::new()
|
let mut simu = SimInit::new()
|
||||||
.add_model(psu, psu_mbox)
|
.add_model(psu, psu_mbox, "psu")
|
||||||
.add_model(load1, load1_mbox)
|
.add_model(load1, load1_mbox, "load1")
|
||||||
.add_model(load2, load2_mbox)
|
.add_model(load2, load2_mbox, "load2")
|
||||||
.add_model(load3, load3_mbox)
|
.add_model(load3, load3_mbox, "load3")
|
||||||
.init(t0);
|
.init(t0);
|
||||||
|
|
||||||
// ----------
|
// ----------
|
||||||
@ -153,14 +158,14 @@ fn main() {
|
|||||||
|
|
||||||
// Vary the supply voltage, check the load and power supply consumptions.
|
// Vary the supply voltage, check the load and power supply consumptions.
|
||||||
for voltage in [10.0, 15.0, 20.0] {
|
for voltage in [10.0, 15.0, 20.0] {
|
||||||
simu.send_event(PowerSupply::voltage_setting, voltage, &psu_addr);
|
simu.process_event(PowerSupply::voltage_setting, voltage, &psu_addr);
|
||||||
|
|
||||||
let v_square = voltage * voltage;
|
let v_square = voltage * voltage;
|
||||||
assert!(same_power(load1_power.take().unwrap(), v_square / r1));
|
assert!(same_power(load1_power.next().unwrap(), v_square / r1));
|
||||||
assert!(same_power(load2_power.take().unwrap(), v_square / r2));
|
assert!(same_power(load2_power.next().unwrap(), v_square / r2));
|
||||||
assert!(same_power(load3_power.take().unwrap(), v_square / r3));
|
assert!(same_power(load3_power.next().unwrap(), v_square / r3));
|
||||||
assert!(same_power(
|
assert!(same_power(
|
||||||
psu_power.take().unwrap(),
|
psu_power.next().unwrap(),
|
||||||
v_square * (1.0 / r1 + 1.0 / r2 + 1.0 / r3)
|
v_square * (1.0 / r1 + 1.0 / r2 + 1.0 / r3)
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
@ -15,12 +15,12 @@
|
|||||||
//! ```
|
//! ```
|
||||||
|
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::pin::Pin;
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use asynchronix::model::{InitializedModel, Model, Output};
|
use asynchronix::model::{Context, InitializedModel, Model};
|
||||||
|
use asynchronix::ports::{EventBuffer, Output};
|
||||||
use asynchronix::simulation::{Mailbox, SimInit};
|
use asynchronix::simulation::{Mailbox, SimInit};
|
||||||
use asynchronix::time::{MonotonicTime, Scheduler};
|
use asynchronix::time::MonotonicTime;
|
||||||
|
|
||||||
/// Stepper motor.
|
/// Stepper motor.
|
||||||
pub struct Motor {
|
pub struct Motor {
|
||||||
@ -40,7 +40,7 @@ impl Motor {
|
|||||||
pub const TORQUE_CONSTANT: f64 = 1.0;
|
pub const TORQUE_CONSTANT: f64 = 1.0;
|
||||||
|
|
||||||
/// Creates a motor with the specified initial position.
|
/// Creates a motor with the specified initial position.
|
||||||
fn new(position: u16) -> Self {
|
pub fn new(position: u16) -> Self {
|
||||||
Self {
|
Self {
|
||||||
position: Default::default(),
|
position: Default::default(),
|
||||||
pos: position % Self::STEPS_PER_REV,
|
pos: position % Self::STEPS_PER_REV,
|
||||||
@ -53,8 +53,15 @@ impl Motor {
|
|||||||
/// For the sake of simplicity, we do as if the rotor rotates
|
/// For the sake of simplicity, we do as if the rotor rotates
|
||||||
/// instantaneously. If the current is too weak to overcome the load or when
|
/// instantaneously. If the current is too weak to overcome the load or when
|
||||||
/// attempting to move to an opposite phase, the position remains unchanged.
|
/// attempting to move to an opposite phase, the position remains unchanged.
|
||||||
pub async fn current_in(&mut self, current: (f64, f64)) {
|
pub async fn current_in(&mut self, current: (f64, f64), context: &Context<Self>) {
|
||||||
assert!(!current.0.is_nan() && !current.1.is_nan());
|
assert!(!current.0.is_nan() && !current.1.is_nan());
|
||||||
|
println!(
|
||||||
|
"Model instance {} at time {}: setting currents: {:.2} and {:.2}",
|
||||||
|
context.name(),
|
||||||
|
context.time(),
|
||||||
|
current.0,
|
||||||
|
current.1
|
||||||
|
);
|
||||||
|
|
||||||
let (target_phase, abs_current) = match (current.0 != 0.0, current.1 != 0.0) {
|
let (target_phase, abs_current) = match (current.0 != 0.0, current.1 != 0.0) {
|
||||||
(false, false) => return,
|
(false, false) => return,
|
||||||
@ -78,24 +85,25 @@ impl Motor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Torque applied by the load [N·m] -- input port.
|
/// Torque applied by the load [N·m] -- input port.
|
||||||
pub fn load(&mut self, torque: f64) {
|
pub fn load(&mut self, torque: f64, context: &Context<Self>) {
|
||||||
assert!(torque >= 0.0);
|
assert!(torque >= 0.0);
|
||||||
|
|
||||||
|
println!(
|
||||||
|
"Model instance {} at time {}: setting load: {:.2}",
|
||||||
|
context.name(),
|
||||||
|
context.time(),
|
||||||
|
torque
|
||||||
|
);
|
||||||
|
|
||||||
self.torque = torque;
|
self.torque = torque;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Model for Motor {
|
impl Model for Motor {
|
||||||
/// Broadcasts the initial position of the motor.
|
/// Broadcasts the initial position of the motor.
|
||||||
fn init(
|
async fn init(mut self, _: &Context<Self>) -> InitializedModel<Self> {
|
||||||
mut self,
|
self.position.send(self.pos).await;
|
||||||
_scheduler: &Scheduler<Self>,
|
self.into()
|
||||||
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
self.position.send(self.pos).await;
|
|
||||||
|
|
||||||
self.into()
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -129,7 +137,14 @@ impl Driver {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Sets the pulse rate (sign = direction) [Hz] -- input port.
|
/// Sets the pulse rate (sign = direction) [Hz] -- input port.
|
||||||
pub async fn pulse_rate(&mut self, pps: f64, scheduler: &Scheduler<Self>) {
|
pub async fn pulse_rate(&mut self, pps: f64, context: &Context<Self>) {
|
||||||
|
println!(
|
||||||
|
"Model instance {} at time {}: setting pps: {:.2}",
|
||||||
|
context.name(),
|
||||||
|
context.time(),
|
||||||
|
pps
|
||||||
|
);
|
||||||
|
|
||||||
let pps = pps.signum() * pps.abs().clamp(Self::MIN_PPS, Self::MAX_PPS);
|
let pps = pps.signum() * pps.abs().clamp(Self::MIN_PPS, Self::MAX_PPS);
|
||||||
if pps == self.pps {
|
if pps == self.pps {
|
||||||
return;
|
return;
|
||||||
@ -141,7 +156,7 @@ impl Driver {
|
|||||||
// Trigger the rotation if the motor is currently idle. Otherwise the
|
// Trigger the rotation if the motor is currently idle. Otherwise the
|
||||||
// new value will be accounted for at the next pulse.
|
// new value will be accounted for at the next pulse.
|
||||||
if is_idle {
|
if is_idle {
|
||||||
self.send_pulse((), scheduler).await;
|
self.send_pulse((), context).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -152,8 +167,14 @@ impl Driver {
|
|||||||
fn send_pulse<'a>(
|
fn send_pulse<'a>(
|
||||||
&'a mut self,
|
&'a mut self,
|
||||||
_: (),
|
_: (),
|
||||||
scheduler: &'a Scheduler<Self>,
|
context: &'a Context<Self>,
|
||||||
) -> impl Future<Output = ()> + Send + 'a {
|
) -> impl Future<Output = ()> + Send + 'a {
|
||||||
|
println!(
|
||||||
|
"Model instance {} at time {}: sending pulse",
|
||||||
|
context.name(),
|
||||||
|
context.time()
|
||||||
|
);
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let current_out = match self.next_phase {
|
let current_out = match self.next_phase {
|
||||||
0 => (self.current, 0.0),
|
0 => (self.current, 0.0),
|
||||||
@ -173,7 +194,7 @@ impl Driver {
|
|||||||
let pulse_duration = Duration::from_secs_f64(1.0 / self.pps.abs());
|
let pulse_duration = Duration::from_secs_f64(1.0 / self.pps.abs());
|
||||||
|
|
||||||
// Schedule the next pulse.
|
// Schedule the next pulse.
|
||||||
scheduler
|
context
|
||||||
.schedule_event(pulse_duration, Self::send_pulse, ())
|
.schedule_event(pulse_duration, Self::send_pulse, ())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
@ -182,6 +203,7 @@ impl Driver {
|
|||||||
|
|
||||||
impl Model for Driver {}
|
impl Model for Driver {}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
fn main() {
|
fn main() {
|
||||||
// ---------------
|
// ---------------
|
||||||
// Bench assembly.
|
// Bench assembly.
|
||||||
@ -200,7 +222,8 @@ fn main() {
|
|||||||
driver.current_out.connect(Motor::current_in, &motor_mbox);
|
driver.current_out.connect(Motor::current_in, &motor_mbox);
|
||||||
|
|
||||||
// Model handles for simulation.
|
// Model handles for simulation.
|
||||||
let mut position = motor.position.connect_stream().0;
|
let mut position = EventBuffer::new();
|
||||||
|
motor.position.connect_sink(&position);
|
||||||
let motor_addr = motor_mbox.address();
|
let motor_addr = motor_mbox.address();
|
||||||
let driver_addr = driver_mbox.address();
|
let driver_addr = driver_mbox.address();
|
||||||
|
|
||||||
@ -209,8 +232,8 @@ fn main() {
|
|||||||
|
|
||||||
// Assembly and initialization.
|
// Assembly and initialization.
|
||||||
let mut simu = SimInit::new()
|
let mut simu = SimInit::new()
|
||||||
.add_model(driver, driver_mbox)
|
.add_model(driver, driver_mbox, "driver")
|
||||||
.add_model(motor, motor_mbox)
|
.add_model(motor, motor_mbox, "motor")
|
||||||
.init(t0);
|
.init(t0);
|
||||||
|
|
||||||
// ----------
|
// ----------
|
||||||
@ -258,7 +281,7 @@ fn main() {
|
|||||||
assert!(position.next().is_none());
|
assert!(position.next().is_none());
|
||||||
|
|
||||||
// Increase the load beyond the torque limit for a 1A driver current.
|
// Increase the load beyond the torque limit for a 1A driver current.
|
||||||
simu.send_event(Motor::load, 2.0, &motor_addr);
|
simu.process_event(Motor::load, 2.0, &motor_addr);
|
||||||
|
|
||||||
// Advance simulation time and check that the motor is blocked.
|
// Advance simulation time and check that the motor is blocked.
|
||||||
simu.step();
|
simu.step();
|
||||||
@ -274,7 +297,7 @@ fn main() {
|
|||||||
|
|
||||||
// Decrease the load below the torque limit for a 1A driver current and
|
// Decrease the load below the torque limit for a 1A driver current and
|
||||||
// advance simulation time.
|
// advance simulation time.
|
||||||
simu.send_event(Motor::load, 0.5, &motor_addr);
|
simu.process_event(Motor::load, 0.5, &motor_addr);
|
||||||
simu.step();
|
simu.step();
|
||||||
t += Duration::new(0, 100_000_000);
|
t += Duration::new(0, 100_000_000);
|
||||||
|
|
||||||
@ -298,7 +321,7 @@ fn main() {
|
|||||||
|
|
||||||
// Now make the motor rotate in the opposite direction. Note that this
|
// Now make the motor rotate in the opposite direction. Note that this
|
||||||
// driver only accounts for a new PPS at the next pulse.
|
// driver only accounts for a new PPS at the next pulse.
|
||||||
simu.send_event(Driver::pulse_rate, -10.0, &driver_addr);
|
simu.process_event(Driver::pulse_rate, -10.0, &driver_addr);
|
||||||
simu.step();
|
simu.step();
|
||||||
t += Duration::new(0, 100_000_000);
|
t += Duration::new(0, 100_000_000);
|
||||||
assert_eq!(simu.time(), t);
|
assert_eq!(simu.time(), t);
|
||||||
|
@ -8,7 +8,6 @@ use std::error;
|
|||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::num::NonZeroUsize;
|
|
||||||
use std::sync::atomic::{self, AtomicUsize, Ordering};
|
use std::sync::atomic::{self, AtomicUsize, Ordering};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@ -19,8 +18,7 @@ use recycle_box::RecycleBox;
|
|||||||
use queue::{PopError, PushError, Queue};
|
use queue::{PopError, PushError, Queue};
|
||||||
use recycle_box::coerce_box;
|
use recycle_box::coerce_box;
|
||||||
|
|
||||||
use crate::model::Model;
|
use crate::model::{Context, Model};
|
||||||
use crate::time::Scheduler;
|
|
||||||
|
|
||||||
/// Data shared between the receiver and the senders.
|
/// Data shared between the receiver and the senders.
|
||||||
struct Inner<M> {
|
struct Inner<M> {
|
||||||
@ -46,7 +44,7 @@ impl<M: 'static> Inner<M> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A receiver which can asynchronously execute `async` message that take an
|
/// A receiver which can asynchronously execute `async` message that take an
|
||||||
/// argument of type `&mut M` and an optional `&Scheduler<M>` argument.
|
/// argument of type `&mut M` and an optional `&Context<M>` argument.
|
||||||
pub(crate) struct Receiver<M> {
|
pub(crate) struct Receiver<M> {
|
||||||
/// Shared data.
|
/// Shared data.
|
||||||
inner: Arc<Inner<M>>,
|
inner: Arc<Inner<M>>,
|
||||||
@ -91,7 +89,7 @@ impl<M: Model> Receiver<M> {
|
|||||||
pub(crate) async fn recv(
|
pub(crate) async fn recv(
|
||||||
&mut self,
|
&mut self,
|
||||||
model: &mut M,
|
model: &mut M,
|
||||||
scheduler: &Scheduler<M>,
|
context: &Context<M>,
|
||||||
) -> Result<(), RecvError> {
|
) -> Result<(), RecvError> {
|
||||||
let msg = unsafe {
|
let msg = unsafe {
|
||||||
self.inner
|
self.inner
|
||||||
@ -107,7 +105,7 @@ impl<M: Model> Receiver<M> {
|
|||||||
match msg {
|
match msg {
|
||||||
Some(mut msg) => {
|
Some(mut msg) => {
|
||||||
// Consume the message to obtain a boxed future.
|
// Consume the message to obtain a boxed future.
|
||||||
let fut = msg.call_once(model, scheduler, self.future_box.take().unwrap());
|
let fut = msg.call_once(model, context, self.future_box.take().unwrap());
|
||||||
|
|
||||||
// Now that `msg` was consumed and its slot in the queue was
|
// Now that `msg` was consumed and its slot in the queue was
|
||||||
// freed, signal to one awaiting sender that one slot is
|
// freed, signal to one awaiting sender that one slot is
|
||||||
@ -154,7 +152,7 @@ impl<M: Model> Receiver<M> {
|
|||||||
/// time, but an identifier may be reused after all handles to a channel
|
/// time, but an identifier may be reused after all handles to a channel
|
||||||
/// have been dropped.
|
/// have been dropped.
|
||||||
pub(crate) fn channel_id(&self) -> ChannelId {
|
pub(crate) fn channel_id(&self) -> ChannelId {
|
||||||
ChannelId(NonZeroUsize::new(&*self.inner as *const Inner<M> as usize).unwrap())
|
ChannelId(&*self.inner as *const Inner<M> as usize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -189,7 +187,7 @@ impl<M: Model> Sender<M> {
|
|||||||
where
|
where
|
||||||
F: for<'a> FnOnce(
|
F: for<'a> FnOnce(
|
||||||
&'a mut M,
|
&'a mut M,
|
||||||
&'a Scheduler<M>,
|
&'a Context<M>,
|
||||||
RecycleBox<()>,
|
RecycleBox<()>,
|
||||||
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>
|
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>
|
||||||
+ Send
|
+ Send
|
||||||
@ -255,8 +253,8 @@ impl<M: Model> Sender<M> {
|
|||||||
/// All channels are guaranteed to have different identifiers at any given
|
/// All channels are guaranteed to have different identifiers at any given
|
||||||
/// time, but an identifier may be reused after all handles to a channel
|
/// time, but an identifier may be reused after all handles to a channel
|
||||||
/// have been dropped.
|
/// have been dropped.
|
||||||
pub(crate) fn channel_id(&self) -> ChannelId {
|
pub(crate) fn channel_id(&self) -> usize {
|
||||||
ChannelId(NonZeroUsize::new(&*self.inner as *const Inner<M> as usize).unwrap())
|
Arc::as_ptr(&self.inner) as usize
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -312,7 +310,7 @@ impl<M> fmt::Debug for Sender<M> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// A closure that can be called once to create a future boxed in a `RecycleBox`
|
/// A closure that can be called once to create a future boxed in a `RecycleBox`
|
||||||
/// from an `&mut M`, a `&Scheduler<M>` and an empty `RecycleBox`.
|
/// from an `&mut M`, a `&Context<M>` and an empty `RecycleBox`.
|
||||||
///
|
///
|
||||||
/// This is basically a workaround to emulate an `FnOnce` with the equivalent of
|
/// This is basically a workaround to emulate an `FnOnce` with the equivalent of
|
||||||
/// an `FnMut` so that it is possible to call it as a `dyn` trait stored in a
|
/// an `FnMut` so that it is possible to call it as a `dyn` trait stored in a
|
||||||
@ -328,7 +326,7 @@ trait MessageFn<M: Model>: Send {
|
|||||||
fn call_once<'a>(
|
fn call_once<'a>(
|
||||||
&mut self,
|
&mut self,
|
||||||
model: &'a mut M,
|
model: &'a mut M,
|
||||||
scheduler: &'a Scheduler<M>,
|
context: &'a Context<M>,
|
||||||
recycle_box: RecycleBox<()>,
|
recycle_box: RecycleBox<()>,
|
||||||
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>;
|
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>;
|
||||||
}
|
}
|
||||||
@ -350,7 +348,7 @@ impl<F, M: Model> MessageFn<M> for MessageFnOnce<F, M>
|
|||||||
where
|
where
|
||||||
F: for<'a> FnOnce(
|
F: for<'a> FnOnce(
|
||||||
&'a mut M,
|
&'a mut M,
|
||||||
&'a Scheduler<M>,
|
&'a Context<M>,
|
||||||
RecycleBox<()>,
|
RecycleBox<()>,
|
||||||
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>
|
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>
|
||||||
+ Send,
|
+ Send,
|
||||||
@ -358,18 +356,18 @@ where
|
|||||||
fn call_once<'a>(
|
fn call_once<'a>(
|
||||||
&mut self,
|
&mut self,
|
||||||
model: &'a mut M,
|
model: &'a mut M,
|
||||||
scheduler: &'a Scheduler<M>,
|
context: &'a Context<M>,
|
||||||
recycle_box: RecycleBox<()>,
|
recycle_box: RecycleBox<()>,
|
||||||
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a> {
|
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a> {
|
||||||
let closure = self.msg_fn.take().unwrap();
|
let closure = self.msg_fn.take().unwrap();
|
||||||
|
|
||||||
(closure)(model, scheduler, recycle_box)
|
(closure)(model, context, recycle_box)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Unique identifier for a channel.
|
/// Unique identifier for a channel.
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
pub(crate) struct ChannelId(NonZeroUsize);
|
pub(crate) struct ChannelId(usize);
|
||||||
|
|
||||||
impl fmt::Display for ChannelId {
|
impl fmt::Display for ChannelId {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
@ -85,7 +85,7 @@ struct Slot<T: ?Sized> {
|
|||||||
message: UnsafeCell<MessageBox<T>>,
|
message: UnsafeCell<MessageBox<T>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An fast MPSC queue that stores its items in recyclable boxes.
|
/// A fast MPSC queue that stores its items in recyclable boxes.
|
||||||
///
|
///
|
||||||
/// The item may be unsized.
|
/// The item may be unsized.
|
||||||
///
|
///
|
||||||
|
@ -15,7 +15,7 @@ impl Executor {
|
|||||||
///
|
///
|
||||||
/// The maximum number of threads is set with the `pool_size` parameter.
|
/// The maximum number of threads is set with the `pool_size` parameter.
|
||||||
pub fn new(pool_size: usize) -> Self {
|
pub fn new(pool_size: usize) -> Self {
|
||||||
Self(executor::Executor::new(pool_size))
|
Self(executor::Executor::new_multi_threaded(pool_size))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Spawns a task which output will never be retrieved.
|
/// Spawns a task which output will never be retrieved.
|
||||||
|
@ -1,98 +1,30 @@
|
|||||||
//! Multi-threaded `async` executor.
|
//! `async` executor trait.
|
||||||
//!
|
|
||||||
//! The executor is exclusively designed for message-passing computational
|
|
||||||
//! tasks. As such, it does not include an I/O reactor and does not consider
|
|
||||||
//! fairness as a goal in itself. While it does use fair local queues inasmuch
|
|
||||||
//! as these tend to perform better in message-passing applications, it uses an
|
|
||||||
//! unfair injection queue and a LIFO slot without attempt to mitigate the
|
|
||||||
//! effect of badly behaving code (e.g. futures that spin-lock by yielding to
|
|
||||||
//! the executor; there is for this reason no support for something like tokio's
|
|
||||||
//! `yield_now`).
|
|
||||||
//!
|
|
||||||
//! Another way in which it differs from other `async` executors is that it
|
|
||||||
//! treats deadlocking as a normal occurrence. This is because in a
|
|
||||||
//! discrete-time simulator, the simulation of a system at a given time step
|
|
||||||
//! will make as much progress as possible until it technically reaches a
|
|
||||||
//! deadlock. Only then does the simulator advance the simulated time to that of
|
|
||||||
//! the next "event" extracted from a time-sorted priority queue.
|
|
||||||
//!
|
|
||||||
//! The design of the executor is largely influenced by the tokio and Go
|
|
||||||
//! schedulers, both of which are optimized for message-passing applications. In
|
|
||||||
//! particular, it uses fast, fixed-size thread-local work-stealing queues with
|
|
||||||
//! a non-stealable LIFO slot in combination with an injector queue, which
|
|
||||||
//! injector queue is used both to schedule new tasks and to absorb temporary
|
|
||||||
//! overflow in the local queues.
|
|
||||||
//!
|
|
||||||
//! The design of the injector queue is kept very simple compared to tokio, by
|
|
||||||
//! taking advantage of the fact that the injector is not required to be either
|
|
||||||
//! LIFO or FIFO. Moving tasks between a local queue and the injector is fast
|
|
||||||
//! because tasks are moved in batch and are stored contiguously in memory.
|
|
||||||
//!
|
|
||||||
//! Another difference with tokio is that, at the moment, the complete subset of
|
|
||||||
//! active worker threads is stored in a single atomic variable. This makes it
|
|
||||||
//! possible to rapidly identify free worker threads for stealing operations,
|
|
||||||
//! with the downside that the maximum number of worker threads is currently
|
|
||||||
//! limited to `usize::BITS`. This is not expected to constitute a limitation in
|
|
||||||
//! practice since system simulation is not typically embarrassingly parallel.
|
|
||||||
//!
|
|
||||||
//! Probably the largest difference with tokio is the task system, which has
|
|
||||||
//! better throughput due to less need for synchronization. This mainly results
|
|
||||||
//! from the use of an atomic notification counter rather than an atomic
|
|
||||||
//! notification flag, thus alleviating the need to reset the notification flag
|
|
||||||
//! before polling a future.
|
|
||||||
|
|
||||||
use std::fmt;
|
mod mt_executor;
|
||||||
use std::future::Future;
|
mod st_executor;
|
||||||
use std::panic::{self, AssertUnwindSafe};
|
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::thread::{self, JoinHandle};
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
|
|
||||||
use crossbeam_utils::sync::{Parker, Unparker};
|
|
||||||
use slab::Slab;
|
|
||||||
|
|
||||||
mod injector;
|
|
||||||
mod pool_manager;
|
|
||||||
mod task;
|
mod task;
|
||||||
mod worker;
|
|
||||||
|
|
||||||
#[cfg(all(test, not(asynchronix_loom)))]
|
use std::future::Future;
|
||||||
mod tests;
|
use std::sync::atomic::AtomicUsize;
|
||||||
|
|
||||||
use crate::macros::scoped_thread_local::scoped_thread_local;
|
use task::Promise;
|
||||||
use crate::util::rng::Rng;
|
|
||||||
|
|
||||||
use self::pool_manager::PoolManager;
|
|
||||||
use self::task::{CancelToken, Promise, Runnable};
|
|
||||||
use self::worker::Worker;
|
|
||||||
|
|
||||||
const BUCKET_SIZE: usize = 128;
|
|
||||||
const QUEUE_SIZE: usize = BUCKET_SIZE * 2;
|
|
||||||
|
|
||||||
|
/// Unique identifier for executor instances.
|
||||||
static NEXT_EXECUTOR_ID: AtomicUsize = AtomicUsize::new(0);
|
static NEXT_EXECUTOR_ID: AtomicUsize = AtomicUsize::new(0);
|
||||||
|
|
||||||
type Bucket = injector::Bucket<Runnable, BUCKET_SIZE>;
|
/// A single-threaded or multi-threaded `async` executor.
|
||||||
type Injector = injector::Injector<Runnable, BUCKET_SIZE>;
|
#[derive(Debug)]
|
||||||
type LocalQueue = st3::fifo::Worker<Runnable>;
|
pub(crate) enum Executor {
|
||||||
type Stealer = st3::fifo::Stealer<Runnable>;
|
StExecutor(st_executor::Executor),
|
||||||
|
MtExecutor(mt_executor::Executor),
|
||||||
scoped_thread_local!(static LOCAL_WORKER: Worker);
|
|
||||||
scoped_thread_local!(static ACTIVE_TASKS: Mutex<Slab<CancelToken>>);
|
|
||||||
|
|
||||||
/// A multi-threaded `async` executor.
|
|
||||||
pub(crate) struct Executor {
|
|
||||||
/// Shared executor data.
|
|
||||||
context: Arc<ExecutorContext>,
|
|
||||||
/// List of tasks that have not completed yet.
|
|
||||||
active_tasks: Arc<Mutex<Slab<CancelToken>>>,
|
|
||||||
/// Parker for the main executor thread.
|
|
||||||
parker: Parker,
|
|
||||||
/// Join handles of the worker threads.
|
|
||||||
worker_handles: Vec<JoinHandle<()>>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Executor {
|
impl Executor {
|
||||||
|
/// Creates an executor that runs futures on the current thread.
|
||||||
|
pub(crate) fn new_single_threaded() -> Self {
|
||||||
|
Self::StExecutor(st_executor::Executor::new())
|
||||||
|
}
|
||||||
|
|
||||||
/// Creates an executor that runs futures on a thread pool.
|
/// Creates an executor that runs futures on a thread pool.
|
||||||
///
|
///
|
||||||
/// The maximum number of threads is set with the `num_threads` parameter.
|
/// The maximum number of threads is set with the `num_threads` parameter.
|
||||||
@ -101,78 +33,11 @@ impl Executor {
|
|||||||
///
|
///
|
||||||
/// This will panic if the specified number of threads is zero or is more
|
/// This will panic if the specified number of threads is zero or is more
|
||||||
/// than `usize::BITS`.
|
/// than `usize::BITS`.
|
||||||
pub(crate) fn new(num_threads: usize) -> Self {
|
pub(crate) fn new_multi_threaded(num_threads: usize) -> Self {
|
||||||
let parker = Parker::new();
|
Self::MtExecutor(mt_executor::Executor::new(num_threads))
|
||||||
let unparker = parker.unparker().clone();
|
|
||||||
|
|
||||||
let (local_queues_and_parkers, stealers_and_unparkers): (Vec<_>, Vec<_>) = (0..num_threads)
|
|
||||||
.map(|_| {
|
|
||||||
let parker = Parker::new();
|
|
||||||
let unparker = parker.unparker().clone();
|
|
||||||
let local_queue = LocalQueue::new(QUEUE_SIZE);
|
|
||||||
let stealer = local_queue.stealer();
|
|
||||||
|
|
||||||
((local_queue, parker), (stealer, unparker))
|
|
||||||
})
|
|
||||||
.unzip();
|
|
||||||
|
|
||||||
// Each executor instance has a unique ID inherited by tasks to ensure
|
|
||||||
// that tasks are scheduled on their parent executor.
|
|
||||||
let executor_id = NEXT_EXECUTOR_ID.fetch_add(1, Ordering::Relaxed);
|
|
||||||
assert!(
|
|
||||||
executor_id <= usize::MAX / 2,
|
|
||||||
"too many executors have been instantiated"
|
|
||||||
);
|
|
||||||
|
|
||||||
let context = Arc::new(ExecutorContext::new(
|
|
||||||
executor_id,
|
|
||||||
unparker,
|
|
||||||
stealers_and_unparkers.into_iter(),
|
|
||||||
));
|
|
||||||
let active_tasks = Arc::new(Mutex::new(Slab::new()));
|
|
||||||
|
|
||||||
// All workers must be marked as active _before_ spawning the threads to
|
|
||||||
// make sure that the count of active workers does not fall to zero
|
|
||||||
// before all workers are blocked on the signal barrier.
|
|
||||||
context.pool_manager.set_all_workers_active();
|
|
||||||
|
|
||||||
// Spawn all worker threads.
|
|
||||||
let worker_handles: Vec<_> = local_queues_and_parkers
|
|
||||||
.into_iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(id, (local_queue, worker_parker))| {
|
|
||||||
let thread_builder = thread::Builder::new().name(format!("Worker #{}", id));
|
|
||||||
|
|
||||||
thread_builder
|
|
||||||
.spawn({
|
|
||||||
let context = context.clone();
|
|
||||||
let active_tasks = active_tasks.clone();
|
|
||||||
move || {
|
|
||||||
let worker = Worker::new(local_queue, context);
|
|
||||||
ACTIVE_TASKS.set(&active_tasks, || {
|
|
||||||
LOCAL_WORKER
|
|
||||||
.set(&worker, || run_local_worker(&worker, id, worker_parker))
|
|
||||||
});
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.unwrap()
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
// Wait until all workers are blocked on the signal barrier.
|
|
||||||
parker.park();
|
|
||||||
assert!(context.pool_manager.pool_is_idle());
|
|
||||||
|
|
||||||
Self {
|
|
||||||
context,
|
|
||||||
active_tasks,
|
|
||||||
parker,
|
|
||||||
worker_handles,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Spawns a task and returns a promise that can be polled to retrieve the
|
/// Spawns a task which output will never be retrieved.
|
||||||
/// task's output.
|
|
||||||
///
|
///
|
||||||
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
||||||
/// is called.
|
/// is called.
|
||||||
@ -182,28 +47,14 @@ impl Executor {
|
|||||||
T: Future + Send + 'static,
|
T: Future + Send + 'static,
|
||||||
T::Output: Send + 'static,
|
T::Output: Send + 'static,
|
||||||
{
|
{
|
||||||
// Book a slot to store the task cancellation token.
|
match self {
|
||||||
let mut active_tasks = self.active_tasks.lock().unwrap();
|
Self::StExecutor(executor) => executor.spawn(future),
|
||||||
let task_entry = active_tasks.vacant_entry();
|
Self::MtExecutor(executor) => executor.spawn(future),
|
||||||
|
}
|
||||||
// Wrap the future so that it removes its cancel token from the
|
|
||||||
// executor's list when dropped.
|
|
||||||
let future = CancellableFuture::new(future, task_entry.key());
|
|
||||||
|
|
||||||
let (promise, runnable, cancel_token) =
|
|
||||||
task::spawn(future, schedule_task, self.context.executor_id);
|
|
||||||
|
|
||||||
task_entry.insert(cancel_token);
|
|
||||||
self.context.injector.insert_task(runnable);
|
|
||||||
|
|
||||||
promise
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Spawns a task which output will never be retrieved.
|
/// Spawns a task which output will never be retrieved.
|
||||||
///
|
///
|
||||||
/// This is mostly useful to avoid undue reference counting for futures that
|
|
||||||
/// return a `()` type.
|
|
||||||
///
|
|
||||||
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
||||||
/// is called.
|
/// is called.
|
||||||
pub(crate) fn spawn_and_forget<T>(&self, future: T)
|
pub(crate) fn spawn_and_forget<T>(&self, future: T)
|
||||||
@ -211,354 +62,171 @@ impl Executor {
|
|||||||
T: Future + Send + 'static,
|
T: Future + Send + 'static,
|
||||||
T::Output: Send + 'static,
|
T::Output: Send + 'static,
|
||||||
{
|
{
|
||||||
// Book a slot to store the task cancellation token.
|
match self {
|
||||||
let mut active_tasks = self.active_tasks.lock().unwrap();
|
Self::StExecutor(executor) => executor.spawn_and_forget(future),
|
||||||
let task_entry = active_tasks.vacant_entry();
|
Self::MtExecutor(executor) => executor.spawn_and_forget(future),
|
||||||
|
}
|
||||||
// Wrap the future so that it removes its cancel token from the
|
|
||||||
// executor's list when dropped.
|
|
||||||
let future = CancellableFuture::new(future, task_entry.key());
|
|
||||||
|
|
||||||
let (runnable, cancel_token) =
|
|
||||||
task::spawn_and_forget(future, schedule_task, self.context.executor_id);
|
|
||||||
|
|
||||||
task_entry.insert(cancel_token);
|
|
||||||
self.context.injector.insert_task(runnable);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Execute spawned tasks, blocking until all futures have completed or
|
/// Execute spawned tasks, blocking until all futures have completed or
|
||||||
/// until the executor reaches a deadlock.
|
/// until the executor reaches a deadlock.
|
||||||
pub(crate) fn run(&mut self) {
|
pub(crate) fn run(&mut self) {
|
||||||
self.context.pool_manager.activate_worker();
|
match self {
|
||||||
|
Self::StExecutor(executor) => executor.run(),
|
||||||
|
Self::MtExecutor(executor) => executor.run(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
loop {
|
#[cfg(all(test, not(asynchronix_loom)))]
|
||||||
if let Some(worker_panic) = self.context.pool_manager.take_panic() {
|
mod tests {
|
||||||
panic::resume_unwind(worker_panic);
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use futures_channel::{mpsc, oneshot};
|
||||||
|
use futures_util::StreamExt;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
/// An object that runs an arbitrary closure when dropped.
|
||||||
|
struct RunOnDrop<F: FnOnce()> {
|
||||||
|
drop_fn: Option<F>,
|
||||||
|
}
|
||||||
|
impl<F: FnOnce()> RunOnDrop<F> {
|
||||||
|
/// Creates a new `RunOnDrop`.
|
||||||
|
fn new(drop_fn: F) -> Self {
|
||||||
|
Self {
|
||||||
|
drop_fn: Some(drop_fn),
|
||||||
}
|
}
|
||||||
if self.context.pool_manager.pool_is_idle() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.parker.park();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
impl<F: FnOnce()> Drop for RunOnDrop<F> {
|
||||||
|
fn drop(&mut self) {
|
||||||
impl Drop for Executor {
|
self.drop_fn.take().map(|f| f());
|
||||||
fn drop(&mut self) {
|
|
||||||
// Force all threads to return.
|
|
||||||
self.context.pool_manager.trigger_termination();
|
|
||||||
for handle in self.worker_handles.drain(0..) {
|
|
||||||
handle.join().unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drop all tasks that have not completed.
|
|
||||||
//
|
|
||||||
// A local worker must be set because some tasks may schedule other
|
|
||||||
// tasks when dropped, which requires that a local worker be available.
|
|
||||||
let worker = Worker::new(LocalQueue::new(QUEUE_SIZE), self.context.clone());
|
|
||||||
LOCAL_WORKER.set(&worker, || {
|
|
||||||
// Cancel all pending futures.
|
|
||||||
//
|
|
||||||
// `ACTIVE_TASKS` is explicitly unset to prevent
|
|
||||||
// `CancellableFuture::drop()` from trying to remove its own token
|
|
||||||
// from the list of active tasks as this would result in a reentrant
|
|
||||||
// lock. This is mainly to stay on the safe side: `ACTIVE_TASKS`
|
|
||||||
// should not be set on this thread anyway, unless for some reason
|
|
||||||
// the executor runs inside another executor.
|
|
||||||
ACTIVE_TASKS.unset(|| {
|
|
||||||
let mut tasks = self.active_tasks.lock().unwrap();
|
|
||||||
for task in tasks.drain() {
|
|
||||||
task.cancel();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Some of the dropped tasks may have scheduled other tasks that
|
|
||||||
// were not yet cancelled, preventing them from being dropped
|
|
||||||
// upon cancellation. This is OK: the scheduled tasks will be
|
|
||||||
// dropped when the local and injector queues are dropped, and
|
|
||||||
// they cannot re-schedule one another since all tasks were
|
|
||||||
// cancelled.
|
|
||||||
});
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Executor {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
f.debug_struct("Executor").finish_non_exhaustive()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Shared executor context.
|
|
||||||
///
|
|
||||||
/// This contains all executor resources that can be shared between threads.
|
|
||||||
struct ExecutorContext {
|
|
||||||
/// Injector queue.
|
|
||||||
injector: Injector,
|
|
||||||
/// Unique executor ID inherited by all tasks spawned on this executor instance.
|
|
||||||
executor_id: usize,
|
|
||||||
/// Unparker for the main executor thread.
|
|
||||||
executor_unparker: Unparker,
|
|
||||||
/// Manager for all worker threads.
|
|
||||||
pool_manager: PoolManager,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ExecutorContext {
|
|
||||||
/// Creates a new shared executor context.
|
|
||||||
pub(super) fn new(
|
|
||||||
executor_id: usize,
|
|
||||||
executor_unparker: Unparker,
|
|
||||||
stealers_and_unparkers: impl Iterator<Item = (Stealer, Unparker)>,
|
|
||||||
) -> Self {
|
|
||||||
let (stealers, worker_unparkers): (Vec<_>, Vec<_>) =
|
|
||||||
stealers_and_unparkers.into_iter().unzip();
|
|
||||||
let worker_unparkers = worker_unparkers.into_boxed_slice();
|
|
||||||
|
|
||||||
Self {
|
|
||||||
injector: Injector::new(),
|
|
||||||
executor_id,
|
|
||||||
executor_unparker,
|
|
||||||
pool_manager: PoolManager::new(
|
|
||||||
worker_unparkers.len(),
|
|
||||||
stealers.into_boxed_slice(),
|
|
||||||
worker_unparkers,
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
/// A `Future` wrapper that removes its cancellation token from the list of
|
fn executor_deadlock(mut executor: Executor) {
|
||||||
/// active tasks when dropped.
|
let (_sender1, receiver1) = oneshot::channel::<()>();
|
||||||
struct CancellableFuture<T: Future> {
|
let (_sender2, receiver2) = oneshot::channel::<()>();
|
||||||
inner: T,
|
|
||||||
cancellation_key: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Future> CancellableFuture<T> {
|
let launch_count = Arc::new(AtomicUsize::new(0));
|
||||||
/// Creates a new `CancellableFuture`.
|
let completion_count = Arc::new(AtomicUsize::new(0));
|
||||||
fn new(fut: T, cancellation_key: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
inner: fut,
|
|
||||||
cancellation_key,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Future> Future for CancellableFuture<T> {
|
executor.spawn_and_forget({
|
||||||
type Output = T::Output;
|
let launch_count = launch_count.clone();
|
||||||
|
let completion_count = completion_count.clone();
|
||||||
|
|
||||||
#[inline(always)]
|
async move {
|
||||||
fn poll(
|
launch_count.fetch_add(1, Ordering::Relaxed);
|
||||||
self: std::pin::Pin<&mut Self>,
|
let _ = receiver2.await;
|
||||||
cx: &mut std::task::Context<'_>,
|
completion_count.fetch_add(1, Ordering::Relaxed);
|
||||||
) -> std::task::Poll<Self::Output> {
|
|
||||||
unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Future> Drop for CancellableFuture<T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
// Remove the task from the list of active tasks if the future is
|
|
||||||
// dropped on a worker thread. Otherwise do nothing and let the
|
|
||||||
// executor's drop handler do the cleanup.
|
|
||||||
let _ = ACTIVE_TASKS.map(|active_tasks| {
|
|
||||||
// Don't unwrap on `lock()` because this function can be called from
|
|
||||||
// a destructor and should not panic. In the worse case, the cancel
|
|
||||||
// token will be left in the list of active tasks, which does
|
|
||||||
// prevents eager task deallocation but does not cause any issue
|
|
||||||
// otherwise.
|
|
||||||
if let Ok(mut active_tasks) = active_tasks.lock() {
|
|
||||||
let _cancel_token = active_tasks.try_remove(self.cancellation_key);
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
executor.spawn_and_forget({
|
||||||
}
|
let launch_count = launch_count.clone();
|
||||||
|
let completion_count = completion_count.clone();
|
||||||
/// Schedules a `Runnable` from within a worker thread.
|
|
||||||
///
|
async move {
|
||||||
/// # Panics
|
launch_count.fetch_add(1, Ordering::Relaxed);
|
||||||
///
|
let _ = receiver1.await;
|
||||||
/// This function will panic if called from a non-worker thread or if called
|
completion_count.fetch_add(1, Ordering::Relaxed);
|
||||||
/// from the worker thread of another executor instance than the one the task
|
}
|
||||||
/// for this `Runnable` was spawned on.
|
});
|
||||||
fn schedule_task(task: Runnable, executor_id: usize) {
|
|
||||||
LOCAL_WORKER
|
executor.run();
|
||||||
.map(|worker| {
|
|
||||||
let pool_manager = &worker.executor_context.pool_manager;
|
// Check that the executor returns on deadlock, i.e. none of the task has
|
||||||
let injector = &worker.executor_context.injector;
|
// completed.
|
||||||
let local_queue = &worker.local_queue;
|
assert_eq!(launch_count.load(Ordering::Relaxed), 2);
|
||||||
let fast_slot = &worker.fast_slot;
|
assert_eq!(completion_count.load(Ordering::Relaxed), 0);
|
||||||
|
|
||||||
// Check that this task was indeed spawned on this executor.
|
// Drop the executor and thus the receiver tasks before the senders,
|
||||||
assert_eq!(
|
// failing which the senders may signal that the channel has been
|
||||||
executor_id, worker.executor_context.executor_id,
|
// dropped and wake the tasks outside the executor.
|
||||||
"Tasks must be awaken on the same executor they are spawned on"
|
drop(executor);
|
||||||
);
|
}
|
||||||
|
|
||||||
// Store the task in the fast slot and retrieve the one that was
|
fn executor_drop_cycle(mut executor: Executor) {
|
||||||
// formerly stored, if any.
|
let (sender1, mut receiver1) = mpsc::channel(2);
|
||||||
let prev_task = match fast_slot.replace(Some(task)) {
|
let (sender2, mut receiver2) = mpsc::channel(2);
|
||||||
// If there already was a task in the slot, proceed so it can be
|
let (sender3, mut receiver3) = mpsc::channel(2);
|
||||||
// moved to a task queue.
|
|
||||||
Some(t) => t,
|
let drop_count = Arc::new(AtomicUsize::new(0));
|
||||||
// Otherwise return immediately: this task cannot be stolen so
|
|
||||||
// there is no point in activating a sibling worker.
|
// Spawn 3 tasks that wake one another when dropped.
|
||||||
None => return,
|
executor.spawn_and_forget({
|
||||||
};
|
let mut sender2 = sender2.clone();
|
||||||
|
let mut sender3 = sender3.clone();
|
||||||
// Push the previous task to the local queue if possible or on the
|
let drop_count = drop_count.clone();
|
||||||
// injector queue otherwise.
|
|
||||||
if let Err(prev_task) = local_queue.push(prev_task) {
|
async move {
|
||||||
// The local queue is full. Try to move half of it to the
|
let _guard = RunOnDrop::new(move || {
|
||||||
// injector queue; if this fails, just push one task to the
|
let _ = sender2.try_send(());
|
||||||
// injector queue.
|
let _ = sender3.try_send(());
|
||||||
if let Ok(drain) = local_queue.drain(|_| Bucket::capacity()) {
|
drop_count.fetch_add(1, Ordering::Relaxed);
|
||||||
injector.push_bucket(Bucket::from_iter(drain));
|
});
|
||||||
local_queue.push(prev_task).unwrap();
|
let _ = receiver1.next().await;
|
||||||
} else {
|
}
|
||||||
injector.insert_task(prev_task);
|
});
|
||||||
}
|
executor.spawn_and_forget({
|
||||||
}
|
let mut sender1 = sender1.clone();
|
||||||
|
let mut sender3 = sender3.clone();
|
||||||
// A task has been pushed to the local or injector queue: try to
|
let drop_count = drop_count.clone();
|
||||||
// activate another worker if no worker is currently searching for a
|
|
||||||
// task.
|
async move {
|
||||||
if pool_manager.searching_worker_count() == 0 {
|
let _guard = RunOnDrop::new(move || {
|
||||||
pool_manager.activate_worker_relaxed();
|
let _ = sender1.try_send(());
|
||||||
}
|
let _ = sender3.try_send(());
|
||||||
})
|
drop_count.fetch_add(1, Ordering::Relaxed);
|
||||||
.expect("Tasks may not be awaken outside executor threads");
|
});
|
||||||
}
|
let _ = receiver2.next().await;
|
||||||
|
}
|
||||||
/// Processes all incoming tasks on a worker thread until the `Terminate` signal
|
});
|
||||||
/// is received or until it panics.
|
executor.spawn_and_forget({
|
||||||
///
|
let mut sender1 = sender1.clone();
|
||||||
/// Panics caught in this thread are relayed to the main executor thread.
|
let mut sender2 = sender2.clone();
|
||||||
fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
|
let drop_count = drop_count.clone();
|
||||||
let pool_manager = &worker.executor_context.pool_manager;
|
|
||||||
let injector = &worker.executor_context.injector;
|
async move {
|
||||||
let executor_unparker = &worker.executor_context.executor_unparker;
|
let _guard = RunOnDrop::new(move || {
|
||||||
let local_queue = &worker.local_queue;
|
let _ = sender1.try_send(());
|
||||||
let fast_slot = &worker.fast_slot;
|
let _ = sender2.try_send(());
|
||||||
|
drop_count.fetch_add(1, Ordering::Relaxed);
|
||||||
let result = panic::catch_unwind(AssertUnwindSafe(|| {
|
});
|
||||||
// Set how long to spin when searching for a task.
|
let _ = receiver3.next().await;
|
||||||
const MAX_SEARCH_DURATION: Duration = Duration::from_nanos(1000);
|
}
|
||||||
|
});
|
||||||
// Seed a thread RNG with the worker ID.
|
|
||||||
let rng = Rng::new(id as u64);
|
executor.run();
|
||||||
|
|
||||||
loop {
|
// Make sure that all tasks are eventually dropped even though each task
|
||||||
// Signal barrier: park until notified to continue or terminate.
|
// wakes the others when dropped.
|
||||||
|
drop(executor);
|
||||||
// Try to deactivate the worker.
|
assert_eq!(drop_count.load(Ordering::Relaxed), 3);
|
||||||
if pool_manager.try_set_worker_inactive(id) {
|
}
|
||||||
parker.park();
|
|
||||||
// No need to call `begin_worker_search()`: this was done by the
|
#[test]
|
||||||
// thread that unparked the worker.
|
fn executor_deadlock_st() {
|
||||||
} else if injector.is_empty() {
|
executor_deadlock(Executor::new_single_threaded());
|
||||||
// This worker could not be deactivated because it was the last
|
}
|
||||||
// active worker. In such case, the call to
|
|
||||||
// `try_set_worker_inactive` establishes a synchronization with
|
#[test]
|
||||||
// all threads that pushed tasks to the injector queue but could
|
fn executor_deadlock_mt() {
|
||||||
// not activate a new worker, which is why some tasks may now be
|
executor_deadlock(Executor::new_multi_threaded(3));
|
||||||
// visible in the injector queue.
|
}
|
||||||
pool_manager.set_all_workers_inactive();
|
|
||||||
executor_unparker.unpark();
|
#[test]
|
||||||
parker.park();
|
fn executor_deadlock_mt_one_worker() {
|
||||||
// No need to call `begin_worker_search()`: this was done by the
|
executor_deadlock(Executor::new_multi_threaded(1));
|
||||||
// thread that unparked the worker.
|
}
|
||||||
} else {
|
#[test]
|
||||||
pool_manager.begin_worker_search();
|
fn executor_drop_cycle_st() {
|
||||||
}
|
executor_drop_cycle(Executor::new_single_threaded());
|
||||||
|
}
|
||||||
if pool_manager.termination_is_triggered() {
|
|
||||||
return;
|
#[test]
|
||||||
}
|
fn executor_drop_cycle_mt() {
|
||||||
|
executor_drop_cycle(Executor::new_multi_threaded(3));
|
||||||
let mut search_start = Instant::now();
|
|
||||||
|
|
||||||
// Process the tasks one by one.
|
|
||||||
loop {
|
|
||||||
// Check the injector queue first.
|
|
||||||
if let Some(bucket) = injector.pop_bucket() {
|
|
||||||
let bucket_iter = bucket.into_iter();
|
|
||||||
|
|
||||||
// There is a _very_ remote possibility that, even though
|
|
||||||
// the local queue is empty, it has temporarily too little
|
|
||||||
// spare capacity for the bucket. This could happen if a
|
|
||||||
// concurrent steal operation was preempted for all the time
|
|
||||||
// it took to pop and process the remaining tasks and it
|
|
||||||
// hasn't released the stolen capacity yet.
|
|
||||||
//
|
|
||||||
// Unfortunately, we cannot just skip checking the injector
|
|
||||||
// queue altogether when there isn't enough spare capacity
|
|
||||||
// in the local queue because this could lead to a race:
|
|
||||||
// suppose that (1) this thread has earlier pushed tasks
|
|
||||||
// onto the injector queue, and (2) the stealer has
|
|
||||||
// processed all stolen tasks before this thread sees the
|
|
||||||
// capacity restored and at the same time (3) the stealer
|
|
||||||
// does not yet see the tasks this thread pushed to the
|
|
||||||
// injector queue; in such scenario, both this thread and
|
|
||||||
// the stealer thread may park and leave unprocessed tasks
|
|
||||||
// in the injector queue.
|
|
||||||
//
|
|
||||||
// This is the only instance where spinning is used, as the
|
|
||||||
// probability of this happening is close to zero and the
|
|
||||||
// complexity of a signaling mechanism (condvar & friends)
|
|
||||||
// wouldn't carry its weight.
|
|
||||||
while local_queue.spare_capacity() < bucket_iter.len() {}
|
|
||||||
|
|
||||||
// Since empty buckets are never pushed onto the injector
|
|
||||||
// queue, we should now have at least one task to process.
|
|
||||||
local_queue.extend(bucket_iter);
|
|
||||||
} else {
|
|
||||||
// The injector queue is empty. Try to steal from active
|
|
||||||
// siblings.
|
|
||||||
let mut stealers = pool_manager.shuffled_stealers(Some(id), &rng);
|
|
||||||
if stealers.all(|stealer| {
|
|
||||||
stealer
|
|
||||||
.steal_and_pop(local_queue, |n| n - n / 2)
|
|
||||||
.map(|(task, _)| {
|
|
||||||
let prev_task = fast_slot.replace(Some(task));
|
|
||||||
assert!(prev_task.is_none());
|
|
||||||
})
|
|
||||||
.is_err()
|
|
||||||
}) {
|
|
||||||
// Give up if unsuccessful for too long.
|
|
||||||
if (Instant::now() - search_start) > MAX_SEARCH_DURATION {
|
|
||||||
pool_manager.end_worker_search();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Re-try.
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signal the end of the search so that another worker can be
|
|
||||||
// activated when a new task is scheduled.
|
|
||||||
pool_manager.end_worker_search();
|
|
||||||
|
|
||||||
// Pop tasks from the fast slot or the local queue.
|
|
||||||
while let Some(task) = fast_slot.take().or_else(|| local_queue.pop()) {
|
|
||||||
if pool_manager.termination_is_triggered() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
task.run();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Resume the search for tasks.
|
|
||||||
pool_manager.begin_worker_search();
|
|
||||||
search_start = Instant::now();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}));
|
|
||||||
|
|
||||||
// Propagate the panic, if any.
|
|
||||||
if let Err(panic) = result {
|
|
||||||
pool_manager.register_panic(panic);
|
|
||||||
pool_manager.trigger_termination();
|
|
||||||
executor_unparker.unpark();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
576
asynchronix/src/executor/mt_executor.rs
Normal file
576
asynchronix/src/executor/mt_executor.rs
Normal file
@ -0,0 +1,576 @@
|
|||||||
|
//! Multi-threaded `async` executor.
|
||||||
|
//!
|
||||||
|
//! The executor is exclusively designed for message-passing computational
|
||||||
|
//! tasks. As such, it does not include an I/O reactor and does not consider
|
||||||
|
//! fairness as a goal in itself. While it does use fair local queues inasmuch
|
||||||
|
//! as these tend to perform better in message-passing applications, it uses an
|
||||||
|
//! unfair injection queue and a LIFO slot without attempt to mitigate the
|
||||||
|
//! effect of badly behaving code (e.g. futures that spin-lock by yielding to
|
||||||
|
//! the executor; there is for this reason no support for something like tokio's
|
||||||
|
//! `yield_now`).
|
||||||
|
//!
|
||||||
|
//! Another way in which it differs from other `async` executors is that it
|
||||||
|
//! treats deadlocking as a normal occurrence. This is because in a
|
||||||
|
//! discrete-time simulator, the simulation of a system at a given time step
|
||||||
|
//! will make as much progress as possible until it technically reaches a
|
||||||
|
//! deadlock. Only then does the simulator advance the simulated time to that of
|
||||||
|
//! the next "event" extracted from a time-sorted priority queue.
|
||||||
|
//!
|
||||||
|
//! The design of the executor is largely influenced by the tokio and Go
|
||||||
|
//! schedulers, both of which are optimized for message-passing applications. In
|
||||||
|
//! particular, it uses fast, fixed-size thread-local work-stealing queues with
|
||||||
|
//! a non-stealable LIFO slot in combination with an injector queue, which
|
||||||
|
//! injector queue is used both to schedule new tasks and to absorb temporary
|
||||||
|
//! overflow in the local queues.
|
||||||
|
//!
|
||||||
|
//! The design of the injector queue is kept very simple compared to tokio, by
|
||||||
|
//! taking advantage of the fact that the injector is not required to be either
|
||||||
|
//! LIFO or FIFO. Moving tasks between a local queue and the injector is fast
|
||||||
|
//! because tasks are moved in batch and are stored contiguously in memory.
|
||||||
|
//!
|
||||||
|
//! Another difference with tokio is that, at the moment, the complete subset of
|
||||||
|
//! active worker threads is stored in a single atomic variable. This makes it
|
||||||
|
//! possible to rapidly identify free worker threads for stealing operations,
|
||||||
|
//! with the downside that the maximum number of worker threads is currently
|
||||||
|
//! limited to `usize::BITS`. This is not expected to constitute a limitation in
|
||||||
|
//! practice since system simulation is not typically embarrassingly parallel.
|
||||||
|
//!
|
||||||
|
//! Probably the largest difference with tokio is the task system, which has
|
||||||
|
//! better throughput due to less need for synchronization. This mainly results
|
||||||
|
//! from the use of an atomic notification counter rather than an atomic
|
||||||
|
//! notification flag, thus alleviating the need to reset the notification flag
|
||||||
|
//! before polling a future.
|
||||||
|
|
||||||
|
mod injector;
|
||||||
|
mod pool_manager;
|
||||||
|
|
||||||
|
use std::cell::Cell;
|
||||||
|
use std::fmt;
|
||||||
|
use std::future::Future;
|
||||||
|
use std::panic::{self, AssertUnwindSafe};
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::thread::{self, JoinHandle};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
|
use crossbeam_utils::sync::{Parker, Unparker};
|
||||||
|
use slab::Slab;
|
||||||
|
|
||||||
|
use crate::macros::scoped_thread_local::scoped_thread_local;
|
||||||
|
use crate::util::rng::Rng;
|
||||||
|
|
||||||
|
use super::task::{self, CancelToken, Promise, Runnable};
|
||||||
|
use super::NEXT_EXECUTOR_ID;
|
||||||
|
use pool_manager::PoolManager;
|
||||||
|
|
||||||
|
const BUCKET_SIZE: usize = 128;
|
||||||
|
const QUEUE_SIZE: usize = BUCKET_SIZE * 2;
|
||||||
|
|
||||||
|
type Bucket = injector::Bucket<Runnable, BUCKET_SIZE>;
|
||||||
|
type Injector = injector::Injector<Runnable, BUCKET_SIZE>;
|
||||||
|
type LocalQueue = st3::fifo::Worker<Runnable>;
|
||||||
|
type Stealer = st3::fifo::Stealer<Runnable>;
|
||||||
|
|
||||||
|
scoped_thread_local!(static LOCAL_WORKER: Worker);
|
||||||
|
scoped_thread_local!(static ACTIVE_TASKS: Mutex<Slab<CancelToken>>);
|
||||||
|
|
||||||
|
/// A multi-threaded `async` executor.
|
||||||
|
pub(crate) struct Executor {
|
||||||
|
/// Shared executor data.
|
||||||
|
context: Arc<ExecutorContext>,
|
||||||
|
/// List of tasks that have not completed yet.
|
||||||
|
active_tasks: Arc<Mutex<Slab<CancelToken>>>,
|
||||||
|
/// Parker for the main executor thread.
|
||||||
|
parker: Parker,
|
||||||
|
/// Handles to the worker threads.
|
||||||
|
worker_handles: Vec<JoinHandle<()>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Executor {
|
||||||
|
/// Creates an executor that runs futures on a thread pool.
|
||||||
|
///
|
||||||
|
/// The maximum number of threads is set with the `num_threads` parameter.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This will panic if the specified number of threads is zero or is more
|
||||||
|
/// than `usize::BITS`.
|
||||||
|
pub(crate) fn new(num_threads: usize) -> Self {
|
||||||
|
let parker = Parker::new();
|
||||||
|
let unparker = parker.unparker().clone();
|
||||||
|
|
||||||
|
let (local_queues_and_parkers, stealers_and_unparkers): (Vec<_>, Vec<_>) = (0..num_threads)
|
||||||
|
.map(|_| {
|
||||||
|
let parker = Parker::new();
|
||||||
|
let unparker = parker.unparker().clone();
|
||||||
|
let local_queue = LocalQueue::new(QUEUE_SIZE);
|
||||||
|
let stealer = local_queue.stealer();
|
||||||
|
|
||||||
|
((local_queue, parker), (stealer, unparker))
|
||||||
|
})
|
||||||
|
.unzip();
|
||||||
|
|
||||||
|
// Each executor instance has a unique ID inherited by tasks to ensure
|
||||||
|
// that tasks are scheduled on their parent executor.
|
||||||
|
let executor_id = NEXT_EXECUTOR_ID.fetch_add(1, Ordering::Relaxed);
|
||||||
|
assert!(
|
||||||
|
executor_id <= usize::MAX / 2,
|
||||||
|
"too many executors have been instantiated"
|
||||||
|
);
|
||||||
|
|
||||||
|
let context = Arc::new(ExecutorContext::new(
|
||||||
|
executor_id,
|
||||||
|
unparker,
|
||||||
|
stealers_and_unparkers.into_iter(),
|
||||||
|
));
|
||||||
|
let active_tasks = Arc::new(Mutex::new(Slab::new()));
|
||||||
|
|
||||||
|
// All workers must be marked as active _before_ spawning the threads to
|
||||||
|
// make sure that the count of active workers does not fall to zero
|
||||||
|
// before all workers are blocked on the signal barrier.
|
||||||
|
context.pool_manager.set_all_workers_active();
|
||||||
|
|
||||||
|
// Spawn all worker threads.
|
||||||
|
let worker_handles: Vec<_> = local_queues_and_parkers
|
||||||
|
.into_iter()
|
||||||
|
.enumerate()
|
||||||
|
.map(|(id, (local_queue, worker_parker))| {
|
||||||
|
let thread_builder = thread::Builder::new().name(format!("Worker #{}", id));
|
||||||
|
|
||||||
|
thread_builder
|
||||||
|
.spawn({
|
||||||
|
let context = context.clone();
|
||||||
|
let active_tasks = active_tasks.clone();
|
||||||
|
move || {
|
||||||
|
let worker = Worker::new(local_queue, context);
|
||||||
|
ACTIVE_TASKS.set(&active_tasks, || {
|
||||||
|
LOCAL_WORKER
|
||||||
|
.set(&worker, || run_local_worker(&worker, id, worker_parker))
|
||||||
|
});
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
// Wait until all workers are blocked on the signal barrier.
|
||||||
|
parker.park();
|
||||||
|
assert!(context.pool_manager.pool_is_idle());
|
||||||
|
|
||||||
|
Self {
|
||||||
|
context,
|
||||||
|
active_tasks,
|
||||||
|
parker,
|
||||||
|
worker_handles,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawns a task and returns a promise that can be polled to retrieve the
|
||||||
|
/// task's output.
|
||||||
|
///
|
||||||
|
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
||||||
|
/// is called.
|
||||||
|
pub(crate) fn spawn<T>(&self, future: T) -> Promise<T::Output>
|
||||||
|
where
|
||||||
|
T: Future + Send + 'static,
|
||||||
|
T::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
// Book a slot to store the task cancellation token.
|
||||||
|
let mut active_tasks = self.active_tasks.lock().unwrap();
|
||||||
|
let task_entry = active_tasks.vacant_entry();
|
||||||
|
|
||||||
|
// Wrap the future so that it removes its cancel token from the
|
||||||
|
// executor's list when dropped.
|
||||||
|
let future = CancellableFuture::new(future, task_entry.key());
|
||||||
|
|
||||||
|
let (promise, runnable, cancel_token) =
|
||||||
|
task::spawn(future, schedule_task, self.context.executor_id);
|
||||||
|
|
||||||
|
task_entry.insert(cancel_token);
|
||||||
|
self.context.injector.insert_task(runnable);
|
||||||
|
|
||||||
|
promise
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawns a task which output will never be retrieved.
|
||||||
|
///
|
||||||
|
/// This is mostly useful to avoid undue reference counting for futures that
|
||||||
|
/// return a `()` type.
|
||||||
|
///
|
||||||
|
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
||||||
|
/// is called.
|
||||||
|
pub(crate) fn spawn_and_forget<T>(&self, future: T)
|
||||||
|
where
|
||||||
|
T: Future + Send + 'static,
|
||||||
|
T::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
// Book a slot to store the task cancellation token.
|
||||||
|
let mut active_tasks = self.active_tasks.lock().unwrap();
|
||||||
|
let task_entry = active_tasks.vacant_entry();
|
||||||
|
|
||||||
|
// Wrap the future so that it removes its cancel token from the
|
||||||
|
// executor's list when dropped.
|
||||||
|
let future = CancellableFuture::new(future, task_entry.key());
|
||||||
|
|
||||||
|
let (runnable, cancel_token) =
|
||||||
|
task::spawn_and_forget(future, schedule_task, self.context.executor_id);
|
||||||
|
|
||||||
|
task_entry.insert(cancel_token);
|
||||||
|
self.context.injector.insert_task(runnable);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute spawned tasks, blocking until all futures have completed or
|
||||||
|
/// until the executor reaches a deadlock.
|
||||||
|
pub(crate) fn run(&mut self) {
|
||||||
|
self.context.pool_manager.activate_worker();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
if let Some(worker_panic) = self.context.pool_manager.take_panic() {
|
||||||
|
panic::resume_unwind(worker_panic);
|
||||||
|
}
|
||||||
|
if self.context.pool_manager.pool_is_idle() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.parker.park();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Executor {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Force all threads to return.
|
||||||
|
self.context.pool_manager.trigger_termination();
|
||||||
|
for handle in self.worker_handles.drain(0..) {
|
||||||
|
handle.join().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop all tasks that have not completed.
|
||||||
|
//
|
||||||
|
// A local worker must be set because some tasks may schedule other
|
||||||
|
// tasks when dropped, which requires that a local worker be available.
|
||||||
|
let worker = Worker::new(LocalQueue::new(QUEUE_SIZE), self.context.clone());
|
||||||
|
LOCAL_WORKER.set(&worker, || {
|
||||||
|
// Cancel all pending futures.
|
||||||
|
//
|
||||||
|
// `ACTIVE_TASKS` is explicitly unset to prevent
|
||||||
|
// `CancellableFuture::drop()` from trying to remove its own token
|
||||||
|
// from the list of active tasks as this would result in a reentrant
|
||||||
|
// lock. This is mainly to stay on the safe side: `ACTIVE_TASKS`
|
||||||
|
// should not be set on this thread anyway, unless for some reason
|
||||||
|
// the executor runs inside another executor.
|
||||||
|
ACTIVE_TASKS.unset(|| {
|
||||||
|
let mut tasks = self.active_tasks.lock().unwrap();
|
||||||
|
for task in tasks.drain() {
|
||||||
|
task.cancel();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some of the dropped tasks may have scheduled other tasks that
|
||||||
|
// were not yet cancelled, preventing them from being dropped
|
||||||
|
// upon cancellation. This is OK: the scheduled tasks will be
|
||||||
|
// dropped when the local and injector queues are dropped, and
|
||||||
|
// they cannot re-schedule one another since all tasks were
|
||||||
|
// cancelled.
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Executor {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("Executor").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shared executor context.
|
||||||
|
///
|
||||||
|
/// This contains all executor resources that can be shared between threads.
|
||||||
|
struct ExecutorContext {
|
||||||
|
/// Injector queue.
|
||||||
|
injector: Injector,
|
||||||
|
/// Unique executor identifier inherited by all tasks spawned on this
|
||||||
|
/// executor instance.
|
||||||
|
executor_id: usize,
|
||||||
|
/// Unparker for the main executor thread.
|
||||||
|
executor_unparker: Unparker,
|
||||||
|
/// Manager for all worker threads.
|
||||||
|
pool_manager: PoolManager,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExecutorContext {
|
||||||
|
/// Creates a new shared executor context.
|
||||||
|
pub(super) fn new(
|
||||||
|
executor_id: usize,
|
||||||
|
executor_unparker: Unparker,
|
||||||
|
stealers_and_unparkers: impl Iterator<Item = (Stealer, Unparker)>,
|
||||||
|
) -> Self {
|
||||||
|
let (stealers, worker_unparkers): (Vec<_>, Vec<_>) =
|
||||||
|
stealers_and_unparkers.into_iter().unzip();
|
||||||
|
let worker_unparkers = worker_unparkers.into_boxed_slice();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
injector: Injector::new(),
|
||||||
|
executor_id,
|
||||||
|
executor_unparker,
|
||||||
|
pool_manager: PoolManager::new(
|
||||||
|
worker_unparkers.len(),
|
||||||
|
stealers.into_boxed_slice(),
|
||||||
|
worker_unparkers,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A `Future` wrapper that removes its cancellation token from the list of
|
||||||
|
/// active tasks when dropped.
|
||||||
|
struct CancellableFuture<T: Future> {
|
||||||
|
inner: T,
|
||||||
|
cancellation_key: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Future> CancellableFuture<T> {
|
||||||
|
/// Creates a new `CancellableFuture`.
|
||||||
|
fn new(fut: T, cancellation_key: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: fut,
|
||||||
|
cancellation_key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Future> Future for CancellableFuture<T> {
|
||||||
|
type Output = T::Output;
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn poll(
|
||||||
|
self: std::pin::Pin<&mut Self>,
|
||||||
|
cx: &mut std::task::Context<'_>,
|
||||||
|
) -> std::task::Poll<Self::Output> {
|
||||||
|
unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Future> Drop for CancellableFuture<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Remove the task from the list of active tasks if the future is
|
||||||
|
// dropped on a worker thread. Otherwise do nothing and let the
|
||||||
|
// executor's drop handler do the cleanup.
|
||||||
|
let _ = ACTIVE_TASKS.map(|active_tasks| {
|
||||||
|
// Don't unwrap on `lock()` because this function can be called from
|
||||||
|
// a destructor and should not panic. In the worse case, the cancel
|
||||||
|
// token will be left in the list of active tasks, which does
|
||||||
|
// prevents eager task deallocation but does not cause any issue
|
||||||
|
// otherwise.
|
||||||
|
if let Ok(mut active_tasks) = active_tasks.lock() {
|
||||||
|
let _cancel_token = active_tasks.try_remove(self.cancellation_key);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A local worker with access to global executor resources.
|
||||||
|
pub(crate) struct Worker {
|
||||||
|
local_queue: LocalQueue,
|
||||||
|
fast_slot: Cell<Option<Runnable>>,
|
||||||
|
executor_context: Arc<ExecutorContext>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Worker {
|
||||||
|
/// Creates a new worker.
|
||||||
|
fn new(local_queue: LocalQueue, executor_context: Arc<ExecutorContext>) -> Self {
|
||||||
|
Self {
|
||||||
|
local_queue,
|
||||||
|
fast_slot: Cell::new(None),
|
||||||
|
executor_context,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a `Runnable` from within a worker thread.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This function will panic if called from a non-worker thread or if called
|
||||||
|
/// from the worker thread of another executor instance than the one the task
|
||||||
|
/// for this `Runnable` was spawned on.
|
||||||
|
fn schedule_task(task: Runnable, executor_id: usize) {
|
||||||
|
LOCAL_WORKER
|
||||||
|
.map(|worker| {
|
||||||
|
let pool_manager = &worker.executor_context.pool_manager;
|
||||||
|
let injector = &worker.executor_context.injector;
|
||||||
|
let local_queue = &worker.local_queue;
|
||||||
|
let fast_slot = &worker.fast_slot;
|
||||||
|
|
||||||
|
// Check that this task was indeed spawned on this executor.
|
||||||
|
assert_eq!(
|
||||||
|
executor_id, worker.executor_context.executor_id,
|
||||||
|
"Tasks must be awaken on the same executor they are spawned on"
|
||||||
|
);
|
||||||
|
|
||||||
|
// Store the task in the fast slot and retrieve the one that was
|
||||||
|
// formerly stored, if any.
|
||||||
|
let prev_task = match fast_slot.replace(Some(task)) {
|
||||||
|
// If there already was a task in the slot, proceed so it can be
|
||||||
|
// moved to a task queue.
|
||||||
|
Some(t) => t,
|
||||||
|
// Otherwise return immediately: this task cannot be stolen so
|
||||||
|
// there is no point in activating a sibling worker.
|
||||||
|
None => return,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Push the previous task to the local queue if possible or on the
|
||||||
|
// injector queue otherwise.
|
||||||
|
if let Err(prev_task) = local_queue.push(prev_task) {
|
||||||
|
// The local queue is full. Try to move half of it to the
|
||||||
|
// injector queue; if this fails, just push one task to the
|
||||||
|
// injector queue.
|
||||||
|
if let Ok(drain) = local_queue.drain(|_| Bucket::capacity()) {
|
||||||
|
injector.push_bucket(Bucket::from_iter(drain));
|
||||||
|
local_queue.push(prev_task).unwrap();
|
||||||
|
} else {
|
||||||
|
injector.insert_task(prev_task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A task has been pushed to the local or injector queue: try to
|
||||||
|
// activate another worker if no worker is currently searching for a
|
||||||
|
// task.
|
||||||
|
if pool_manager.searching_worker_count() == 0 {
|
||||||
|
pool_manager.activate_worker_relaxed();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.expect("Tasks may not be awaken outside executor threads");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Processes all incoming tasks on a worker thread until the `Terminate` signal
|
||||||
|
/// is received or until it panics.
|
||||||
|
///
|
||||||
|
/// Panics caught in this thread are relayed to the main executor thread.
|
||||||
|
fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
|
||||||
|
let pool_manager = &worker.executor_context.pool_manager;
|
||||||
|
let injector = &worker.executor_context.injector;
|
||||||
|
let executor_unparker = &worker.executor_context.executor_unparker;
|
||||||
|
let local_queue = &worker.local_queue;
|
||||||
|
let fast_slot = &worker.fast_slot;
|
||||||
|
|
||||||
|
let result = panic::catch_unwind(AssertUnwindSafe(|| {
|
||||||
|
// Set how long to spin when searching for a task.
|
||||||
|
const MAX_SEARCH_DURATION: Duration = Duration::from_nanos(1000);
|
||||||
|
|
||||||
|
// Seed a thread RNG with the worker ID.
|
||||||
|
let rng = Rng::new(id as u64);
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// Signal barrier: park until notified to continue or terminate.
|
||||||
|
|
||||||
|
// Try to deactivate the worker.
|
||||||
|
if pool_manager.try_set_worker_inactive(id) {
|
||||||
|
parker.park();
|
||||||
|
// No need to call `begin_worker_search()`: this was done by the
|
||||||
|
// thread that unparked the worker.
|
||||||
|
} else if injector.is_empty() {
|
||||||
|
// This worker could not be deactivated because it was the last
|
||||||
|
// active worker. In such case, the call to
|
||||||
|
// `try_set_worker_inactive` establishes a synchronization with
|
||||||
|
// all threads that pushed tasks to the injector queue but could
|
||||||
|
// not activate a new worker, which is why some tasks may now be
|
||||||
|
// visible in the injector queue.
|
||||||
|
pool_manager.set_all_workers_inactive();
|
||||||
|
executor_unparker.unpark();
|
||||||
|
parker.park();
|
||||||
|
// No need to call `begin_worker_search()`: this was done by the
|
||||||
|
// thread that unparked the worker.
|
||||||
|
} else {
|
||||||
|
pool_manager.begin_worker_search();
|
||||||
|
}
|
||||||
|
|
||||||
|
if pool_manager.termination_is_triggered() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut search_start = Instant::now();
|
||||||
|
|
||||||
|
// Process the tasks one by one.
|
||||||
|
loop {
|
||||||
|
// Check the injector queue first.
|
||||||
|
if let Some(bucket) = injector.pop_bucket() {
|
||||||
|
let bucket_iter = bucket.into_iter();
|
||||||
|
|
||||||
|
// There is a _very_ remote possibility that, even though
|
||||||
|
// the local queue is empty, it has temporarily too little
|
||||||
|
// spare capacity for the bucket. This could happen if a
|
||||||
|
// concurrent steal operation was preempted for all the time
|
||||||
|
// it took to pop and process the remaining tasks and it
|
||||||
|
// hasn't released the stolen capacity yet.
|
||||||
|
//
|
||||||
|
// Unfortunately, we cannot just skip checking the injector
|
||||||
|
// queue altogether when there isn't enough spare capacity
|
||||||
|
// in the local queue because this could lead to a race:
|
||||||
|
// suppose that (1) this thread has earlier pushed tasks
|
||||||
|
// onto the injector queue, and (2) the stealer has
|
||||||
|
// processed all stolen tasks before this thread sees the
|
||||||
|
// capacity restored and at the same time (3) the stealer
|
||||||
|
// does not yet see the tasks this thread pushed to the
|
||||||
|
// injector queue; in such scenario, both this thread and
|
||||||
|
// the stealer thread may park and leave unprocessed tasks
|
||||||
|
// in the injector queue.
|
||||||
|
//
|
||||||
|
// This is the only instance where spinning is used, as the
|
||||||
|
// probability of this happening is close to zero and the
|
||||||
|
// complexity of a signaling mechanism (condvar & friends)
|
||||||
|
// wouldn't carry its weight.
|
||||||
|
while local_queue.spare_capacity() < bucket_iter.len() {}
|
||||||
|
|
||||||
|
// Since empty buckets are never pushed onto the injector
|
||||||
|
// queue, we should now have at least one task to process.
|
||||||
|
local_queue.extend(bucket_iter);
|
||||||
|
} else {
|
||||||
|
// The injector queue is empty. Try to steal from active
|
||||||
|
// siblings.
|
||||||
|
let mut stealers = pool_manager.shuffled_stealers(Some(id), &rng);
|
||||||
|
if stealers.all(|stealer| {
|
||||||
|
stealer
|
||||||
|
.steal_and_pop(local_queue, |n| n - n / 2)
|
||||||
|
.map(|(task, _)| {
|
||||||
|
let prev_task = fast_slot.replace(Some(task));
|
||||||
|
assert!(prev_task.is_none());
|
||||||
|
})
|
||||||
|
.is_err()
|
||||||
|
}) {
|
||||||
|
// Give up if unsuccessful for too long.
|
||||||
|
if (Instant::now() - search_start) > MAX_SEARCH_DURATION {
|
||||||
|
pool_manager.end_worker_search();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Re-try.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Signal the end of the search so that another worker can be
|
||||||
|
// activated when a new task is scheduled.
|
||||||
|
pool_manager.end_worker_search();
|
||||||
|
|
||||||
|
// Pop tasks from the fast slot or the local queue.
|
||||||
|
while let Some(task) = fast_slot.take().or_else(|| local_queue.pop()) {
|
||||||
|
if pool_manager.termination_is_triggered() {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
task.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resume the search for tasks.
|
||||||
|
pool_manager.begin_worker_search();
|
||||||
|
search_start = Instant::now();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
|
||||||
|
// Propagate the panic, if any.
|
||||||
|
if let Err(panic) = result {
|
||||||
|
pool_manager.register_panic(panic);
|
||||||
|
pool_manager.trigger_termination();
|
||||||
|
executor_unparker.unpark();
|
||||||
|
}
|
||||||
|
}
|
244
asynchronix/src/executor/st_executor.rs
Normal file
244
asynchronix/src/executor/st_executor.rs
Normal file
@ -0,0 +1,244 @@
|
|||||||
|
use std::cell::RefCell;
|
||||||
|
use std::fmt;
|
||||||
|
use std::future::Future;
|
||||||
|
use std::sync::atomic::Ordering;
|
||||||
|
|
||||||
|
use slab::Slab;
|
||||||
|
|
||||||
|
use super::task::{self, CancelToken, Promise, Runnable};
|
||||||
|
use super::NEXT_EXECUTOR_ID;
|
||||||
|
|
||||||
|
use crate::macros::scoped_thread_local::scoped_thread_local;
|
||||||
|
|
||||||
|
const QUEUE_MIN_CAPACITY: usize = 32;
|
||||||
|
|
||||||
|
scoped_thread_local!(static EXECUTOR_CONTEXT: ExecutorContext);
|
||||||
|
scoped_thread_local!(static ACTIVE_TASKS: RefCell<Slab<CancelToken>>);
|
||||||
|
|
||||||
|
/// A single-threaded `async` executor.
|
||||||
|
pub(crate) struct Executor {
|
||||||
|
/// Shared executor data.
|
||||||
|
context: ExecutorContext,
|
||||||
|
/// List of tasks that have not completed yet.
|
||||||
|
active_tasks: RefCell<Slab<CancelToken>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Executor {
|
||||||
|
/// Creates an executor that runs futures on the current thread.
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
// Each executor instance has a unique ID inherited by tasks to ensure
|
||||||
|
// that tasks are scheduled on their parent executor.
|
||||||
|
let executor_id = NEXT_EXECUTOR_ID.fetch_add(1, Ordering::Relaxed);
|
||||||
|
assert!(
|
||||||
|
executor_id <= usize::MAX / 2,
|
||||||
|
"too many executors have been instantiated"
|
||||||
|
);
|
||||||
|
|
||||||
|
let context = ExecutorContext::new(executor_id);
|
||||||
|
let active_tasks = RefCell::new(Slab::new());
|
||||||
|
|
||||||
|
Self {
|
||||||
|
context,
|
||||||
|
active_tasks,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawns a task and returns a promise that can be polled to retrieve the
|
||||||
|
/// task's output.
|
||||||
|
///
|
||||||
|
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
||||||
|
/// is called.
|
||||||
|
pub(crate) fn spawn<T>(&self, future: T) -> Promise<T::Output>
|
||||||
|
where
|
||||||
|
T: Future + Send + 'static,
|
||||||
|
T::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
// Book a slot to store the task cancellation token.
|
||||||
|
let mut active_tasks = self.active_tasks.borrow_mut();
|
||||||
|
let task_entry = active_tasks.vacant_entry();
|
||||||
|
|
||||||
|
// Wrap the future so that it removes its cancel token from the
|
||||||
|
// executor's list when dropped.
|
||||||
|
let future = CancellableFuture::new(future, task_entry.key());
|
||||||
|
|
||||||
|
let (promise, runnable, cancel_token) =
|
||||||
|
task::spawn(future, schedule_task, self.context.executor_id);
|
||||||
|
|
||||||
|
task_entry.insert(cancel_token);
|
||||||
|
let mut queue = self.context.queue.borrow_mut();
|
||||||
|
queue.push(runnable);
|
||||||
|
|
||||||
|
promise
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawns a task which output will never be retrieved.
|
||||||
|
///
|
||||||
|
/// This is mostly useful to avoid undue reference counting for futures that
|
||||||
|
/// return a `()` type.
|
||||||
|
///
|
||||||
|
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
|
||||||
|
/// is called.
|
||||||
|
pub(crate) fn spawn_and_forget<T>(&self, future: T)
|
||||||
|
where
|
||||||
|
T: Future + Send + 'static,
|
||||||
|
T::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
// Book a slot to store the task cancellation token.
|
||||||
|
let mut active_tasks = self.active_tasks.borrow_mut();
|
||||||
|
let task_entry = active_tasks.vacant_entry();
|
||||||
|
|
||||||
|
// Wrap the future so that it removes its cancel token from the
|
||||||
|
// executor's list when dropped.
|
||||||
|
let future = CancellableFuture::new(future, task_entry.key());
|
||||||
|
|
||||||
|
let (runnable, cancel_token) =
|
||||||
|
task::spawn_and_forget(future, schedule_task, self.context.executor_id);
|
||||||
|
|
||||||
|
task_entry.insert(cancel_token);
|
||||||
|
let mut queue = self.context.queue.borrow_mut();
|
||||||
|
queue.push(runnable);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Execute spawned tasks, blocking until all futures have completed or
|
||||||
|
/// until the executor reaches a deadlock.
|
||||||
|
pub(crate) fn run(&mut self) {
|
||||||
|
ACTIVE_TASKS.set(&self.active_tasks, || {
|
||||||
|
EXECUTOR_CONTEXT.set(&self.context, || loop {
|
||||||
|
let task = match self.context.queue.borrow_mut().pop() {
|
||||||
|
Some(task) => task,
|
||||||
|
None => break,
|
||||||
|
};
|
||||||
|
|
||||||
|
task.run();
|
||||||
|
})
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Drop for Executor {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Drop all tasks that have not completed.
|
||||||
|
//
|
||||||
|
// The executor context must be set because some tasks may schedule
|
||||||
|
// other tasks when dropped, which requires that the work queue be
|
||||||
|
// available.
|
||||||
|
EXECUTOR_CONTEXT.set(&self.context, || {
|
||||||
|
// Cancel all pending futures.
|
||||||
|
//
|
||||||
|
// `ACTIVE_TASKS` is explicitly unset to prevent
|
||||||
|
// `CancellableFuture::drop()` from trying to remove its own token
|
||||||
|
// from the list of active tasks as this would result in a nested
|
||||||
|
// call to `borrow_mut` and thus a panic. This is mainly to stay on
|
||||||
|
// the safe side: `ACTIVE_TASKS` should not be set anyway, unless
|
||||||
|
// for some reason the executor runs inside another executor.
|
||||||
|
ACTIVE_TASKS.unset(|| {
|
||||||
|
let mut tasks = self.active_tasks.borrow_mut();
|
||||||
|
for task in tasks.drain() {
|
||||||
|
task.cancel();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some of the dropped tasks may have scheduled other tasks that
|
||||||
|
// were not yet cancelled, preventing them from being dropped
|
||||||
|
// upon cancellation. This is OK: the scheduled tasks will be
|
||||||
|
// dropped when the work queue is dropped, and they cannot
|
||||||
|
// re-schedule one another since all tasks were cancelled.
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Executor {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("Executor").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Shared executor context.
|
||||||
|
///
|
||||||
|
/// This contains all executor resources that can be shared between threads.
|
||||||
|
struct ExecutorContext {
|
||||||
|
/// Work queue.
|
||||||
|
queue: RefCell<Vec<Runnable>>,
|
||||||
|
/// Unique executor identifier inherited by all tasks spawned on this
|
||||||
|
/// executor instance.
|
||||||
|
executor_id: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ExecutorContext {
|
||||||
|
/// Creates a new shared executor context.
|
||||||
|
fn new(executor_id: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
queue: RefCell::new(Vec::with_capacity(QUEUE_MIN_CAPACITY)),
|
||||||
|
executor_id,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A `Future` wrapper that removes its cancellation token from the list of
|
||||||
|
/// active tasks when dropped.
|
||||||
|
struct CancellableFuture<T: Future> {
|
||||||
|
inner: T,
|
||||||
|
cancellation_key: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Future> CancellableFuture<T> {
|
||||||
|
/// Creates a new `CancellableFuture`.
|
||||||
|
fn new(fut: T, cancellation_key: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: fut,
|
||||||
|
cancellation_key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Future> Future for CancellableFuture<T> {
|
||||||
|
type Output = T::Output;
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn poll(
|
||||||
|
self: std::pin::Pin<&mut Self>,
|
||||||
|
cx: &mut std::task::Context<'_>,
|
||||||
|
) -> std::task::Poll<Self::Output> {
|
||||||
|
unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Future> Drop for CancellableFuture<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Remove the task from the list of active tasks while the executor is
|
||||||
|
// running (meaning that `ACTIVE_TASK` is set). Otherwise do nothing and
|
||||||
|
// let the executor's drop handler do the cleanup.
|
||||||
|
let _ = ACTIVE_TASKS.map(|active_tasks| {
|
||||||
|
// Don't use `borrow_mut()` because this function can be called from
|
||||||
|
// a destructor and should not panic. In the worse case, the cancel
|
||||||
|
// token will be left in the list of active tasks, which does
|
||||||
|
// prevents eager task deallocation but does not cause any issue
|
||||||
|
// otherwise.
|
||||||
|
if let Ok(mut active_tasks) = active_tasks.try_borrow_mut() {
|
||||||
|
let _cancel_token = active_tasks.try_remove(self.cancellation_key);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a `Runnable` from within a worker thread.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This function will panic if called from called outside from the executor
|
||||||
|
/// work thread or from another executor instance than the one the task for this
|
||||||
|
/// `Runnable` was spawned on.
|
||||||
|
fn schedule_task(task: Runnable, executor_id: usize) {
|
||||||
|
EXECUTOR_CONTEXT
|
||||||
|
.map(|context| {
|
||||||
|
// Check that this task was indeed spawned on this executor.
|
||||||
|
assert_eq!(
|
||||||
|
executor_id, context.executor_id,
|
||||||
|
"Tasks must be awaken on the same executor they are spawned on"
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut queue = context.queue.borrow_mut();
|
||||||
|
queue.push(task);
|
||||||
|
})
|
||||||
|
.expect("Tasks may not be awaken outside executor threads");
|
||||||
|
}
|
@ -125,13 +125,6 @@ where
|
|||||||
S: Fn(Runnable, T) + Send + Sync + 'static,
|
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||||
T: Clone + Send + Sync + 'static,
|
T: Clone + Send + Sync + 'static,
|
||||||
{
|
{
|
||||||
const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
|
|
||||||
Self::clone_waker,
|
|
||||||
Self::wake_by_val,
|
|
||||||
Self::wake_by_ref,
|
|
||||||
Self::drop_waker,
|
|
||||||
);
|
|
||||||
|
|
||||||
/// Clones a waker.
|
/// Clones a waker.
|
||||||
unsafe fn clone_waker(ptr: *const ()) -> RawWaker {
|
unsafe fn clone_waker(ptr: *const ()) -> RawWaker {
|
||||||
let this = &*(ptr as *const Self);
|
let this = &*(ptr as *const Self);
|
||||||
@ -141,7 +134,7 @@ where
|
|||||||
panic!("Attack of the clones: the waker was cloned too many times");
|
panic!("Attack of the clones: the waker was cloned too many times");
|
||||||
}
|
}
|
||||||
|
|
||||||
RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE)
|
RawWaker::new(ptr, raw_waker_vtable::<F, S, T>())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wakes the task by value.
|
/// Wakes the task by value.
|
||||||
@ -287,6 +280,37 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns a reference to the waker's virtual table.
|
||||||
|
///
|
||||||
|
/// Unfortunately, Rust will sometimes create multiple memory instances of the
|
||||||
|
/// virtual table for the same generic parameters, which defeats
|
||||||
|
/// `Waker::will_wake` as the latter tests the pointers to the virtual tables
|
||||||
|
/// for equality.
|
||||||
|
///
|
||||||
|
/// Preventing the function from being inlined appears to solve this problem,
|
||||||
|
/// but we may want to investigate more robust methods. For unrelated reasons,
|
||||||
|
/// Tokio has switched [1] to a single non-generic virtual table declared as
|
||||||
|
/// `static` which then delegates each call to another virtual call. This does
|
||||||
|
/// ensure that `Waker::will_wake` will always work, but the double indirection
|
||||||
|
/// is a bit unfortunate and its cost would need to be evaluated.
|
||||||
|
///
|
||||||
|
/// [1]: https://github.com/tokio-rs/tokio/pull/5213
|
||||||
|
#[inline(never)]
|
||||||
|
fn raw_waker_vtable<F, S, T>() -> &'static RawWakerVTable
|
||||||
|
where
|
||||||
|
F: Future + Send + 'static,
|
||||||
|
F::Output: Send + 'static,
|
||||||
|
S: Fn(Runnable, T) + Send + Sync + 'static,
|
||||||
|
T: Clone + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
&RawWakerVTable::new(
|
||||||
|
Task::<F, S, T>::clone_waker,
|
||||||
|
Task::<F, S, T>::wake_by_val,
|
||||||
|
Task::<F, S, T>::wake_by_ref,
|
||||||
|
Task::<F, S, T>::drop_waker,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
/// Spawns a task.
|
/// Spawns a task.
|
||||||
///
|
///
|
||||||
/// An arbitrary tag can be attached to the task, a clone of which will be
|
/// An arbitrary tag can be attached to the task, a clone of which will be
|
||||||
|
@ -25,7 +25,7 @@ struct VTable {
|
|||||||
/// but not currently scheduled (no `Runnable` exist) then the future is
|
/// but not currently scheduled (no `Runnable` exist) then the future is
|
||||||
/// dropped immediately. Otherwise, the future will be dropped at a later
|
/// dropped immediately. Otherwise, the future will be dropped at a later
|
||||||
/// time by the scheduled `Runnable` once it runs.
|
/// time by the scheduled `Runnable` once it runs.
|
||||||
unsafe fn cancel<F: Future, S, T>(ptr: *const ())
|
unsafe fn cancel<F, S, T>(ptr: *const ())
|
||||||
where
|
where
|
||||||
F: Future + Send + 'static,
|
F: Future + Send + 'static,
|
||||||
F::Output: Send + 'static,
|
F::Output: Send + 'static,
|
||||||
@ -123,7 +123,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Drops the token without cancelling the task.
|
/// Drops the token without cancelling the task.
|
||||||
unsafe fn drop<F: Future, S, T>(ptr: *const ())
|
unsafe fn drop<F, S, T>(ptr: *const ())
|
||||||
where
|
where
|
||||||
F: Future + Send + 'static,
|
F: Future + Send + 'static,
|
||||||
F::Output: Send + 'static,
|
F::Output: Send + 'static,
|
||||||
@ -180,7 +180,7 @@ impl CancelToken {
|
|||||||
/// allocator,
|
/// allocator,
|
||||||
/// - the reference count has been incremented to account for this new task
|
/// - the reference count has been incremented to account for this new task
|
||||||
/// reference.
|
/// reference.
|
||||||
pub(super) unsafe fn new_unchecked<F: Future, S, T>(task: *const Task<F, S, T>) -> Self
|
pub(super) unsafe fn new_unchecked<F, S, T>(task: *const Task<F, S, T>) -> Self
|
||||||
where
|
where
|
||||||
F: Future + Send + 'static,
|
F: Future + Send + 'static,
|
||||||
F::Output: Send + 'static,
|
F::Output: Send + 'static,
|
||||||
|
@ -20,7 +20,7 @@ struct VTable<U: Send + 'static> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieves the output of the task if ready.
|
/// Retrieves the output of the task if ready.
|
||||||
unsafe fn poll<F: Future, S, T>(ptr: *const ()) -> Stage<F::Output>
|
unsafe fn poll<F, S, T>(ptr: *const ()) -> Stage<F::Output>
|
||||||
where
|
where
|
||||||
F: Future + Send + 'static,
|
F: Future + Send + 'static,
|
||||||
F::Output: Send + 'static,
|
F::Output: Send + 'static,
|
||||||
@ -62,7 +62,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Drops the promise.
|
/// Drops the promise.
|
||||||
unsafe fn drop<F: Future, S, T>(ptr: *const ())
|
unsafe fn drop<F, S, T>(ptr: *const ())
|
||||||
where
|
where
|
||||||
F: Future + Send + 'static,
|
F: Future + Send + 'static,
|
||||||
F::Output: Send + 'static,
|
F::Output: Send + 'static,
|
||||||
|
@ -11,7 +11,7 @@ use crate::loom_exports::debug_or_loom_assert;
|
|||||||
use crate::loom_exports::sync::atomic::{self, AtomicU64, Ordering};
|
use crate::loom_exports::sync::atomic::{self, AtomicU64, Ordering};
|
||||||
|
|
||||||
use super::util::RunOnDrop;
|
use super::util::RunOnDrop;
|
||||||
use super::Task;
|
use super::{raw_waker_vtable, Task};
|
||||||
use super::{CLOSED, POLLING, REF_MASK, WAKE_MASK};
|
use super::{CLOSED, POLLING, REF_MASK, WAKE_MASK};
|
||||||
|
|
||||||
/// Virtual table for a `Runnable`.
|
/// Virtual table for a `Runnable`.
|
||||||
@ -22,7 +22,7 @@ struct VTable {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Polls the inner future.
|
/// Polls the inner future.
|
||||||
unsafe fn run<F: Future, S, T>(ptr: *const ())
|
unsafe fn run<F, S, T>(ptr: *const ())
|
||||||
where
|
where
|
||||||
F: Future + Send + 'static,
|
F: Future + Send + 'static,
|
||||||
F::Output: Send + 'static,
|
F::Output: Send + 'static,
|
||||||
@ -77,7 +77,7 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Poll the task.
|
// Poll the task.
|
||||||
let raw_waker = RawWaker::new(ptr, &Task::<F, S, T>::RAW_WAKER_VTABLE);
|
let raw_waker = RawWaker::new(ptr, raw_waker_vtable::<F, S, T>());
|
||||||
let waker = ManuallyDrop::new(Waker::from_raw(raw_waker));
|
let waker = ManuallyDrop::new(Waker::from_raw(raw_waker));
|
||||||
|
|
||||||
let cx = &mut Context::from_waker(&waker);
|
let cx = &mut Context::from_waker(&waker);
|
||||||
|
@ -136,6 +136,28 @@ impl<F: Future> Drop for MonitoredFuture<F> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A future that checks whether the waker cloned from the first call to `poll`
|
||||||
|
// tests equal with `Waker::will_wake` on the second call to `poll`.
|
||||||
|
struct WillWakeFuture {
|
||||||
|
waker: Arc<Mutex<Option<std::task::Waker>>>,
|
||||||
|
}
|
||||||
|
impl Future for WillWakeFuture {
|
||||||
|
type Output = bool;
|
||||||
|
|
||||||
|
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||||
|
let waker = &mut self.waker.lock().unwrap();
|
||||||
|
|
||||||
|
match waker.as_ref() {
|
||||||
|
None => {
|
||||||
|
**waker = Some(cx.waker().clone());
|
||||||
|
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
Some(waker) => Poll::Ready(waker.will_wake(cx.waker())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn task_schedule() {
|
fn task_schedule() {
|
||||||
test_prelude!();
|
test_prelude!();
|
||||||
@ -623,3 +645,24 @@ fn task_drop_cycle() {
|
|||||||
|
|
||||||
assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3);
|
assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn task_will_wake() {
|
||||||
|
test_prelude!();
|
||||||
|
|
||||||
|
let waker = Arc::new(Mutex::new(None));
|
||||||
|
let future = WillWakeFuture {
|
||||||
|
waker: waker.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
|
||||||
|
runnable.run();
|
||||||
|
|
||||||
|
assert!(promise.poll().is_pending());
|
||||||
|
|
||||||
|
// Wake the future so it is scheduled another time.
|
||||||
|
waker.lock().unwrap().as_ref().unwrap().wake_by_ref();
|
||||||
|
assert!(run_scheduled_runnable());
|
||||||
|
|
||||||
|
assert_eq!(promise.poll(), Stage::Ready(true));
|
||||||
|
}
|
||||||
|
@ -1,140 +0,0 @@
|
|||||||
use futures_channel::{mpsc, oneshot};
|
|
||||||
use futures_util::StreamExt;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
/// An object that runs an arbitrary closure when dropped.
|
|
||||||
struct RunOnDrop<F: FnOnce()> {
|
|
||||||
drop_fn: Option<F>,
|
|
||||||
}
|
|
||||||
impl<F: FnOnce()> RunOnDrop<F> {
|
|
||||||
/// Creates a new `RunOnDrop`.
|
|
||||||
fn new(drop_fn: F) -> Self {
|
|
||||||
Self {
|
|
||||||
drop_fn: Some(drop_fn),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<F: FnOnce()> Drop for RunOnDrop<F> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.drop_fn.take().map(|f| f());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn executor_deadlock() {
|
|
||||||
const NUM_THREADS: usize = 3;
|
|
||||||
|
|
||||||
let (_sender1, receiver1) = oneshot::channel::<()>();
|
|
||||||
let (_sender2, receiver2) = oneshot::channel::<()>();
|
|
||||||
|
|
||||||
let mut executor = Executor::new(NUM_THREADS);
|
|
||||||
static LAUNCH_COUNT: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
static COMPLETION_COUNT: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
|
|
||||||
executor.spawn_and_forget(async move {
|
|
||||||
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
let _ = receiver2.await;
|
|
||||||
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
});
|
|
||||||
executor.spawn_and_forget(async move {
|
|
||||||
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
let _ = receiver1.await;
|
|
||||||
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
});
|
|
||||||
|
|
||||||
executor.run();
|
|
||||||
// Check that the executor returns on deadlock, i.e. none of the task has
|
|
||||||
// completed.
|
|
||||||
assert_eq!(LAUNCH_COUNT.load(Ordering::Relaxed), 2);
|
|
||||||
assert_eq!(COMPLETION_COUNT.load(Ordering::Relaxed), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn executor_deadlock_st() {
|
|
||||||
const NUM_THREADS: usize = 1;
|
|
||||||
|
|
||||||
let (_sender1, receiver1) = oneshot::channel::<()>();
|
|
||||||
let (_sender2, receiver2) = oneshot::channel::<()>();
|
|
||||||
|
|
||||||
let mut executor = Executor::new(NUM_THREADS);
|
|
||||||
static LAUNCH_COUNT: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
static COMPLETION_COUNT: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
|
|
||||||
executor.spawn_and_forget(async move {
|
|
||||||
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
let _ = receiver2.await;
|
|
||||||
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
});
|
|
||||||
executor.spawn_and_forget(async move {
|
|
||||||
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
let _ = receiver1.await;
|
|
||||||
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
});
|
|
||||||
|
|
||||||
executor.run();
|
|
||||||
// Check that the executor returnes on deadlock, i.e. none of the task has
|
|
||||||
// completed.
|
|
||||||
assert_eq!(LAUNCH_COUNT.load(Ordering::Relaxed), 2);
|
|
||||||
assert_eq!(COMPLETION_COUNT.load(Ordering::Relaxed), 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn executor_drop_cycle() {
|
|
||||||
const NUM_THREADS: usize = 3;
|
|
||||||
|
|
||||||
let (sender1, mut receiver1) = mpsc::channel(2);
|
|
||||||
let (sender2, mut receiver2) = mpsc::channel(2);
|
|
||||||
let (sender3, mut receiver3) = mpsc::channel(2);
|
|
||||||
|
|
||||||
let mut executor = Executor::new(NUM_THREADS);
|
|
||||||
static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
|
|
||||||
// Spawn 3 tasks that wake one another when dropped.
|
|
||||||
executor.spawn_and_forget({
|
|
||||||
let mut sender2 = sender2.clone();
|
|
||||||
let mut sender3 = sender3.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let _guard = RunOnDrop::new(move || {
|
|
||||||
let _ = sender2.try_send(());
|
|
||||||
let _ = sender3.try_send(());
|
|
||||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
});
|
|
||||||
let _ = receiver1.next().await;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
executor.spawn_and_forget({
|
|
||||||
let mut sender1 = sender1.clone();
|
|
||||||
let mut sender3 = sender3.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let _guard = RunOnDrop::new(move || {
|
|
||||||
let _ = sender1.try_send(());
|
|
||||||
let _ = sender3.try_send(());
|
|
||||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
});
|
|
||||||
let _ = receiver2.next().await;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
executor.spawn_and_forget({
|
|
||||||
let mut sender1 = sender1.clone();
|
|
||||||
let mut sender2 = sender2.clone();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let _guard = RunOnDrop::new(move || {
|
|
||||||
let _ = sender1.try_send(());
|
|
||||||
let _ = sender2.try_send(());
|
|
||||||
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
|
|
||||||
});
|
|
||||||
let _ = receiver3.next().await;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
executor.run();
|
|
||||||
|
|
||||||
// Make sure that all tasks are eventually dropped even though each task
|
|
||||||
// wakes the others when dropped.
|
|
||||||
drop(executor);
|
|
||||||
assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3);
|
|
||||||
}
|
|
@ -1,25 +0,0 @@
|
|||||||
use std::cell::Cell;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use super::task::Runnable;
|
|
||||||
|
|
||||||
use super::ExecutorContext;
|
|
||||||
use super::LocalQueue;
|
|
||||||
|
|
||||||
/// A local worker with access to global executor resources.
|
|
||||||
pub(crate) struct Worker {
|
|
||||||
pub(super) local_queue: LocalQueue,
|
|
||||||
pub(super) fast_slot: Cell<Option<Runnable>>,
|
|
||||||
pub(super) executor_context: Arc<ExecutorContext>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Worker {
|
|
||||||
/// Creates a new worker.
|
|
||||||
pub(super) fn new(local_queue: LocalQueue, executor_context: Arc<ExecutorContext>) -> Self {
|
|
||||||
Self {
|
|
||||||
local_queue,
|
|
||||||
fast_slot: Cell::new(None),
|
|
||||||
executor_context,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -36,30 +36,35 @@
|
|||||||
//!
|
//!
|
||||||
//! Models can contain four kinds of ports:
|
//! Models can contain four kinds of ports:
|
||||||
//!
|
//!
|
||||||
//! * _output ports_, which are instances of the [`Output`](model::Output) type
|
//! * _output ports_, which are instances of the [`Output`](ports::Output) type
|
||||||
//! and can be used to broadcast a message,
|
//! and can be used to broadcast a message,
|
||||||
//! * _requestor ports_, which are instances of the
|
//! * _requestor ports_, which are instances of the
|
||||||
//! [`Requestor`](model::Requestor) type and can be used to broadcast a
|
//! [`Requestor`](ports::Requestor) type and can be used to broadcast a
|
||||||
//! message and receive an iterator yielding the replies from all connected
|
//! message and receive an iterator yielding the replies from all connected
|
||||||
//! replier ports,
|
//! replier ports,
|
||||||
//! * _input ports_, which are synchronous or asynchronous methods that
|
//! * _input ports_, which are synchronous or asynchronous methods that
|
||||||
//! implement the [`InputFn`](model::InputFn) trait and take an `&mut self`
|
//! implement the [`InputFn`](ports::InputFn) trait and take an `&mut self`
|
||||||
//! argument, a message argument, and an optional
|
//! argument, a message argument, and an optional
|
||||||
//! [`&Scheduler`](time::Scheduler) argument,
|
//! [`&Context`](model::Context) argument,
|
||||||
//! * _replier ports_, which are similar to input ports but implement the
|
//! * _replier ports_, which are similar to input ports but implement the
|
||||||
//! [`ReplierFn`](model::ReplierFn) trait and return a reply.
|
//! [`ReplierFn`](ports::ReplierFn) trait and return a reply.
|
||||||
//!
|
//!
|
||||||
//! Messages that are broadcast by an output port to an input port are referred
|
//! Messages that are broadcast by an output port to an input port are referred
|
||||||
//! to as *events*, while messages exchanged between requestor and replier ports
|
//! to as *events*, while messages exchanged between requestor and replier ports
|
||||||
//! are referred to as *requests* and *replies*.
|
//! are referred to as *requests* and *replies*.
|
||||||
//!
|
//!
|
||||||
//! Models must implement the [`Model`](model::Model) trait. The main purpose of
|
//! Models must implement the [`Model`](model::Model) trait. The main purpose of
|
||||||
//! this trait is to allow models to specify an `init()` method that is
|
//! this trait is to allow models to specify
|
||||||
//! guaranteed to run once and only once when the simulation is initialized,
|
//! * a `setup()` method that is called once during model addtion to simulation,
|
||||||
//! _i.e._ after all models have been connected but before the simulation
|
//! this method allows e.g. creation and interconnection of submodels inside
|
||||||
//! starts. The `init()` method has a default implementation, so models that do
|
//! the model,
|
||||||
//! not require initialization can simply implement the trait with a one-liner
|
//! * an `init()` method that is guaranteed to run once and only once when the
|
||||||
//! such as `impl Model for MyModel {}`.
|
//! simulation is initialized, _i.e._ after all models have been connected but
|
||||||
|
//! before the simulation starts.
|
||||||
|
//!
|
||||||
|
//! The `setup()` and `init()` methods have default implementations, so models
|
||||||
|
//! that do not require setup and initialization can simply implement the trait
|
||||||
|
//! with a one-liner such as `impl Model for MyModel {}`.
|
||||||
//!
|
//!
|
||||||
//! #### A simple model
|
//! #### A simple model
|
||||||
//!
|
//!
|
||||||
@ -78,7 +83,8 @@
|
|||||||
//! `Multiplier` could be implemented as follows:
|
//! `Multiplier` could be implemented as follows:
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use asynchronix::model::{Model, Output};
|
//! use asynchronix::model::Model;
|
||||||
|
//! use asynchronix::ports::Output;
|
||||||
//!
|
//!
|
||||||
//! #[derive(Default)]
|
//! #[derive(Default)]
|
||||||
//! pub struct Multiplier {
|
//! pub struct Multiplier {
|
||||||
@ -92,28 +98,28 @@
|
|||||||
//! impl Model for Multiplier {}
|
//! impl Model for Multiplier {}
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! #### A model using the local scheduler
|
//! #### A model using the local context
|
||||||
//!
|
//!
|
||||||
//! Models frequently need to schedule actions at a future time or simply get
|
//! Models frequently need to schedule actions at a future time or simply get
|
||||||
//! access to the current simulation time. To do so, input and replier methods
|
//! access to the current simulation time. To do so, input and replier methods
|
||||||
//! can take an optional argument that gives them access to a local scheduler.
|
//! can take an optional argument that gives them access to a local context.
|
||||||
//!
|
//!
|
||||||
//! To show how the local scheduler can be used in practice, let us implement
|
//! To show how the local context can be used in practice, let us implement
|
||||||
//! `Delay`, a model which simply forwards its input unmodified after a 1s
|
//! `Delay`, a model which simply forwards its input unmodified after a 1s
|
||||||
//! delay:
|
//! delay:
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use std::time::Duration;
|
//! use std::time::Duration;
|
||||||
//! use asynchronix::model::{Model, Output};
|
//! use asynchronix::model::{Context, Model};
|
||||||
//! use asynchronix::time::Scheduler;
|
//! use asynchronix::ports::Output;
|
||||||
//!
|
//!
|
||||||
//! #[derive(Default)]
|
//! #[derive(Default)]
|
||||||
//! pub struct Delay {
|
//! pub struct Delay {
|
||||||
//! pub output: Output<f64>,
|
//! pub output: Output<f64>,
|
||||||
//! }
|
//! }
|
||||||
//! impl Delay {
|
//! impl Delay {
|
||||||
//! pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
|
//! pub fn input(&mut self, value: f64, context: &Context<Self>) {
|
||||||
//! scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
|
//! context.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
|
||||||
//! }
|
//! }
|
||||||
//!
|
//!
|
||||||
//! async fn send(&mut self, value: f64) {
|
//! async fn send(&mut self, value: f64) {
|
||||||
@ -135,7 +141,7 @@
|
|||||||
//! [`Address`](simulation::Mailbox)es pointing to that mailbox.
|
//! [`Address`](simulation::Mailbox)es pointing to that mailbox.
|
||||||
//!
|
//!
|
||||||
//! Addresses are used among others to connect models: each output or requestor
|
//! Addresses are used among others to connect models: each output or requestor
|
||||||
//! ports has a `connect()` method that takes as argument a function pointer to
|
//! port has a `connect()` method that takes as argument a function pointer to
|
||||||
//! the corresponding input or replier port method and the address of the
|
//! the corresponding input or replier port method and the address of the
|
||||||
//! targeted model.
|
//! targeted model.
|
||||||
//!
|
//!
|
||||||
@ -166,8 +172,8 @@
|
|||||||
//! ```
|
//! ```
|
||||||
//! # mod models {
|
//! # mod models {
|
||||||
//! # use std::time::Duration;
|
//! # use std::time::Duration;
|
||||||
//! # use asynchronix::model::{Model, Output};
|
//! # use asynchronix::model::{Context, Model};
|
||||||
//! # use asynchronix::time::Scheduler;
|
//! # use asynchronix::ports::Output;
|
||||||
//! # #[derive(Default)]
|
//! # #[derive(Default)]
|
||||||
//! # pub struct Multiplier {
|
//! # pub struct Multiplier {
|
||||||
//! # pub output: Output<f64>,
|
//! # pub output: Output<f64>,
|
||||||
@ -183,8 +189,8 @@
|
|||||||
//! # pub output: Output<f64>,
|
//! # pub output: Output<f64>,
|
||||||
//! # }
|
//! # }
|
||||||
//! # impl Delay {
|
//! # impl Delay {
|
||||||
//! # pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
|
//! # pub fn input(&mut self, value: f64, context: &Context<Self>) {
|
||||||
//! # scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
|
//! # context.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
|
||||||
//! # }
|
//! # }
|
||||||
//! # async fn send(&mut self, value: f64) { // this method can be private
|
//! # async fn send(&mut self, value: f64) { // this method can be private
|
||||||
//! # self.output.send(value).await;
|
//! # self.output.send(value).await;
|
||||||
@ -193,6 +199,7 @@
|
|||||||
//! # impl Model for Delay {}
|
//! # impl Model for Delay {}
|
||||||
//! # }
|
//! # }
|
||||||
//! use std::time::Duration;
|
//! use std::time::Duration;
|
||||||
|
//! use asynchronix::ports::EventSlot;
|
||||||
//! use asynchronix::simulation::{Mailbox, SimInit};
|
//! use asynchronix::simulation::{Mailbox, SimInit};
|
||||||
//! use asynchronix::time::MonotonicTime;
|
//! use asynchronix::time::MonotonicTime;
|
||||||
//!
|
//!
|
||||||
@ -217,16 +224,17 @@
|
|||||||
//! delay1.output.connect(Delay::input, &delay2_mbox);
|
//! delay1.output.connect(Delay::input, &delay2_mbox);
|
||||||
//!
|
//!
|
||||||
//! // Keep handles to the system input and output for the simulation.
|
//! // Keep handles to the system input and output for the simulation.
|
||||||
//! let mut output_slot = delay2.output.connect_slot().0;
|
//! let mut output_slot = EventSlot::new();
|
||||||
|
//! delay2.output.connect_sink(&output_slot);
|
||||||
//! let input_address = multiplier1_mbox.address();
|
//! let input_address = multiplier1_mbox.address();
|
||||||
//!
|
//!
|
||||||
//! // Pick an arbitrary simulation start time and build the simulation.
|
//! // Pick an arbitrary simulation start time and build the simulation.
|
||||||
//! let t0 = MonotonicTime::EPOCH;
|
//! let t0 = MonotonicTime::EPOCH;
|
||||||
//! let mut simu = SimInit::new()
|
//! let mut simu = SimInit::new()
|
||||||
//! .add_model(multiplier1, multiplier1_mbox)
|
//! .add_model(multiplier1, multiplier1_mbox, "multiplier1")
|
||||||
//! .add_model(multiplier2, multiplier2_mbox)
|
//! .add_model(multiplier2, multiplier2_mbox, "multiplier2")
|
||||||
//! .add_model(delay1, delay1_mbox)
|
//! .add_model(delay1, delay1_mbox, "delay1")
|
||||||
//! .add_model(delay2, delay2_mbox)
|
//! .add_model(delay2, delay2_mbox, "delay2")
|
||||||
//! .init(t0);
|
//! .init(t0);
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
@ -239,23 +247,20 @@
|
|||||||
//! deadline using for instance
|
//! deadline using for instance
|
||||||
//! [`Simulation::step_by()`](simulation::Simulation::step_by).
|
//! [`Simulation::step_by()`](simulation::Simulation::step_by).
|
||||||
//! 2. by sending events or queries without advancing simulation time, using
|
//! 2. by sending events or queries without advancing simulation time, using
|
||||||
//! [`Simulation::send_event()`](simulation::Simulation::send_event) or
|
//! [`Simulation::process_event()`](simulation::Simulation::process_event) or
|
||||||
//! [`Simulation::send_query()`](simulation::Simulation::send_query),
|
//! [`Simulation::send_query()`](simulation::Simulation::process_query),
|
||||||
//! 3. by scheduling events, using for instance
|
//! 3. by scheduling events, using for instance
|
||||||
//! [`Simulation::schedule_event()`](simulation::Simulation::schedule_event).
|
//! [`Simulation::schedule_event()`](simulation::Simulation::schedule_event).
|
||||||
//!
|
//!
|
||||||
//! When a simulation is initialized via
|
//! When initialized with the default clock, the simulation will run as fast as
|
||||||
//! [`SimInit::init()`](simulation::SimInit::init) then the simulation will run
|
//! possible, without regard for the actual wall clock time. Alternatively, the
|
||||||
//! as fast as possible, without regard for the actual wall clock time.
|
//! simulation time can be synchronized to the wall clock time using
|
||||||
//! Alternatively, it is possible to initialize a simulation via
|
//! [`SimInit::set_clock()`](simulation::SimInit::set_clock) and providing a
|
||||||
//! [`SimInit::init_with_clock()`](simulation::SimInit::init_with_clock) to bind
|
//! custom [`Clock`](time::Clock) type or a readily-available real-time clock
|
||||||
//! the simulation time to the wall clock time using a custom
|
//! such as [`AutoSystemClock`](time::AutoSystemClock).
|
||||||
//! [`Clock`](time::Clock) type or a readily-available real-time clock such as
|
|
||||||
//! [`AutoSystemClock`](time::AutoSystemClock).
|
|
||||||
//!
|
//!
|
||||||
//! Simulation outputs can be monitored using
|
//! Simulation outputs can be monitored using [`EventSlot`](ports::EventSlot)s
|
||||||
//! [`EventSlot`](simulation::EventSlot)s and
|
//! and [`EventBuffer`](ports::EventBuffer)s, which can be connected to any
|
||||||
//! [`EventStream`](simulation::EventStream)s, which can be connected to any
|
|
||||||
//! model's output port. While an event slot only gives access to the last value
|
//! model's output port. While an event slot only gives access to the last value
|
||||||
//! sent from a port, an event stream is an iterator that yields all events that
|
//! sent from a port, an event stream is an iterator that yields all events that
|
||||||
//! were sent in first-in-first-out order.
|
//! were sent in first-in-first-out order.
|
||||||
@ -266,8 +271,8 @@
|
|||||||
//! ```
|
//! ```
|
||||||
//! # mod models {
|
//! # mod models {
|
||||||
//! # use std::time::Duration;
|
//! # use std::time::Duration;
|
||||||
//! # use asynchronix::model::{Model, Output};
|
//! # use asynchronix::model::{Context, Model};
|
||||||
//! # use asynchronix::time::Scheduler;
|
//! # use asynchronix::ports::Output;
|
||||||
//! # #[derive(Default)]
|
//! # #[derive(Default)]
|
||||||
//! # pub struct Multiplier {
|
//! # pub struct Multiplier {
|
||||||
//! # pub output: Output<f64>,
|
//! # pub output: Output<f64>,
|
||||||
@ -283,8 +288,8 @@
|
|||||||
//! # pub output: Output<f64>,
|
//! # pub output: Output<f64>,
|
||||||
//! # }
|
//! # }
|
||||||
//! # impl Delay {
|
//! # impl Delay {
|
||||||
//! # pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
|
//! # pub fn input(&mut self, value: f64, context: &Context<Self>) {
|
||||||
//! # scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
|
//! # context.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
|
||||||
//! # }
|
//! # }
|
||||||
//! # async fn send(&mut self, value: f64) { // this method can be private
|
//! # async fn send(&mut self, value: f64) { // this method can be private
|
||||||
//! # self.output.send(value).await;
|
//! # self.output.send(value).await;
|
||||||
@ -293,6 +298,7 @@
|
|||||||
//! # impl Model for Delay {}
|
//! # impl Model for Delay {}
|
||||||
//! # }
|
//! # }
|
||||||
//! # use std::time::Duration;
|
//! # use std::time::Duration;
|
||||||
|
//! # use asynchronix::ports::EventSlot;
|
||||||
//! # use asynchronix::simulation::{Mailbox, SimInit};
|
//! # use asynchronix::simulation::{Mailbox, SimInit};
|
||||||
//! # use asynchronix::time::MonotonicTime;
|
//! # use asynchronix::time::MonotonicTime;
|
||||||
//! # use models::{Delay, Multiplier};
|
//! # use models::{Delay, Multiplier};
|
||||||
@ -308,31 +314,32 @@
|
|||||||
//! # multiplier1.output.connect(Multiplier::input, &multiplier2_mbox);
|
//! # multiplier1.output.connect(Multiplier::input, &multiplier2_mbox);
|
||||||
//! # multiplier2.output.connect(Delay::input, &delay2_mbox);
|
//! # multiplier2.output.connect(Delay::input, &delay2_mbox);
|
||||||
//! # delay1.output.connect(Delay::input, &delay2_mbox);
|
//! # delay1.output.connect(Delay::input, &delay2_mbox);
|
||||||
//! # let mut output_slot = delay2.output.connect_slot().0;
|
//! # let mut output_slot = EventSlot::new();
|
||||||
|
//! # delay2.output.connect_sink(&output_slot);
|
||||||
//! # let input_address = multiplier1_mbox.address();
|
//! # let input_address = multiplier1_mbox.address();
|
||||||
//! # let t0 = MonotonicTime::EPOCH;
|
//! # let t0 = MonotonicTime::EPOCH;
|
||||||
//! # let mut simu = SimInit::new()
|
//! # let mut simu = SimInit::new()
|
||||||
//! # .add_model(multiplier1, multiplier1_mbox)
|
//! # .add_model(multiplier1, multiplier1_mbox, "multiplier1")
|
||||||
//! # .add_model(multiplier2, multiplier2_mbox)
|
//! # .add_model(multiplier2, multiplier2_mbox, "multiplier2")
|
||||||
//! # .add_model(delay1, delay1_mbox)
|
//! # .add_model(delay1, delay1_mbox, "delay1")
|
||||||
//! # .add_model(delay2, delay2_mbox)
|
//! # .add_model(delay2, delay2_mbox, "delay2")
|
||||||
//! # .init(t0);
|
//! # .init(t0);
|
||||||
//! // Send a value to the first multiplier.
|
//! // Send a value to the first multiplier.
|
||||||
//! simu.send_event(Multiplier::input, 21.0, &input_address);
|
//! simu.process_event(Multiplier::input, 21.0, &input_address);
|
||||||
//!
|
//!
|
||||||
//! // The simulation is still at t0 so nothing is expected at the output of the
|
//! // The simulation is still at t0 so nothing is expected at the output of the
|
||||||
//! // second delay gate.
|
//! // second delay gate.
|
||||||
//! assert!(output_slot.take().is_none());
|
//! assert!(output_slot.next().is_none());
|
||||||
//!
|
//!
|
||||||
//! // Advance simulation time until the next event and check the time and output.
|
//! // Advance simulation time until the next event and check the time and output.
|
||||||
//! simu.step();
|
//! simu.step();
|
||||||
//! assert_eq!(simu.time(), t0 + Duration::from_secs(1));
|
//! assert_eq!(simu.time(), t0 + Duration::from_secs(1));
|
||||||
//! assert_eq!(output_slot.take(), Some(84.0));
|
//! assert_eq!(output_slot.next(), Some(84.0));
|
||||||
//!
|
//!
|
||||||
//! // Get the answer to the ultimate question of life, the universe & everything.
|
//! // Get the answer to the ultimate question of life, the universe & everything.
|
||||||
//! simu.step();
|
//! simu.step();
|
||||||
//! assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
//! assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
||||||
//! assert_eq!(output_slot.take(), Some(42.0));
|
//! assert_eq!(output_slot.next(), Some(42.0));
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! # Message ordering guarantees
|
//! # Message ordering guarantees
|
||||||
@ -390,15 +397,14 @@
|
|||||||
//!
|
//!
|
||||||
//! * the [`model`] module provides more details about the signatures of input
|
//! * the [`model`] module provides more details about the signatures of input
|
||||||
//! and replier port methods and discusses model initialization in the
|
//! and replier port methods and discusses model initialization in the
|
||||||
//! documentation of [`model::Model`],
|
//! documentation of [`model::Model`] and self-scheduling methods as well as
|
||||||
|
//! scheduling cancellation in the documentation of [`model::Context`],
|
||||||
//! * the [`simulation`] module discusses how the capacity of mailboxes may
|
//! * the [`simulation`] module discusses how the capacity of mailboxes may
|
||||||
//! affect the simulation, how connections can be modified after the
|
//! affect the simulation, how connections can be modified after the
|
||||||
//! simulation was instantiated, and which pathological situations can lead to
|
//! simulation was instantiated, and which pathological situations can lead to
|
||||||
//! a deadlock,
|
//! a deadlock,
|
||||||
//! * the [`time`] module discusses in particular self-scheduling methods and
|
//! * the [`time`] module discusses in particular the monotonic timestamp format
|
||||||
//! scheduling cancellation in the documentation of [`time::Scheduler`] while
|
//! used for simulations ([`time::MonotonicTime`]).
|
||||||
//! the monotonic timestamp format used for simulations is documented in
|
|
||||||
//! [`time::MonotonicTime`].
|
|
||||||
#![warn(missing_docs, missing_debug_implementations, unreachable_pub)]
|
#![warn(missing_docs, missing_debug_implementations, unreachable_pub)]
|
||||||
|
|
||||||
pub(crate) mod channel;
|
pub(crate) mod channel;
|
||||||
@ -406,6 +412,9 @@ pub(crate) mod executor;
|
|||||||
mod loom_exports;
|
mod loom_exports;
|
||||||
pub(crate) mod macros;
|
pub(crate) mod macros;
|
||||||
pub mod model;
|
pub mod model;
|
||||||
|
pub mod ports;
|
||||||
|
#[cfg(feature = "rpc")]
|
||||||
|
pub mod rpc;
|
||||||
pub mod simulation;
|
pub mod simulation;
|
||||||
pub mod time;
|
pub mod time;
|
||||||
pub(crate) mod util;
|
pub(crate) mod util;
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
#[cfg(asynchronix_loom)]
|
#[cfg(asynchronix_loom)]
|
||||||
#[allow(unused_imports)]
|
#[allow(unused_imports)]
|
||||||
pub(crate) mod sync {
|
pub(crate) mod sync {
|
||||||
pub(crate) use loom::sync::{Arc, Mutex};
|
pub(crate) use loom::sync::{Arc, LockResult, Mutex, MutexGuard};
|
||||||
|
pub(crate) use std::sync::PoisonError;
|
||||||
|
|
||||||
pub(crate) mod atomic {
|
pub(crate) mod atomic {
|
||||||
pub(crate) use loom::sync::atomic::{
|
pub(crate) use loom::sync::atomic::{
|
||||||
@ -12,7 +13,7 @@ pub(crate) mod sync {
|
|||||||
#[cfg(not(asynchronix_loom))]
|
#[cfg(not(asynchronix_loom))]
|
||||||
#[allow(unused_imports)]
|
#[allow(unused_imports)]
|
||||||
pub(crate) mod sync {
|
pub(crate) mod sync {
|
||||||
pub(crate) use std::sync::{Arc, Mutex};
|
pub(crate) use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError};
|
||||||
|
|
||||||
pub(crate) mod atomic {
|
pub(crate) mod atomic {
|
||||||
pub(crate) use std::sync::atomic::{
|
pub(crate) use std::sync::atomic::{
|
||||||
|
@ -7,19 +7,18 @@ use std::ptr;
|
|||||||
/// Declare a new thread-local storage scoped key of type `ScopedKey<T>`.
|
/// Declare a new thread-local storage scoped key of type `ScopedKey<T>`.
|
||||||
///
|
///
|
||||||
/// This is based on the `scoped-tls` crate, with slight modifications, such as
|
/// This is based on the `scoped-tls` crate, with slight modifications, such as
|
||||||
/// the use of the newly available `const` qualifier for TLS.
|
/// the addition of a `ScopedLocalKey::unset` method and the use of a `map`
|
||||||
|
/// method that returns `Option::None` when the value is not set, rather than
|
||||||
|
/// panicking as `with` would.
|
||||||
macro_rules! scoped_thread_local {
|
macro_rules! scoped_thread_local {
|
||||||
($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty) => (
|
($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty) => (
|
||||||
$(#[$attrs])*
|
$(#[$attrs])*
|
||||||
$vis static $name: $crate::macros::scoped_thread_local::ScopedLocalKey<$ty>
|
$vis static $name: $crate::macros::scoped_thread_local::ScopedLocalKey<$ty>
|
||||||
= $crate::macros::scoped_thread_local::ScopedLocalKey {
|
= unsafe {
|
||||||
inner: {
|
::std::thread_local!(static FOO: ::std::cell::Cell<*const ()> = const {
|
||||||
thread_local!(static FOO: ::std::cell::Cell<*const ()> = const {
|
::std::cell::Cell::new(::std::ptr::null())
|
||||||
std::cell::Cell::new(::std::ptr::null())
|
});
|
||||||
});
|
$crate::macros::scoped_thread_local::ScopedLocalKey::new(&FOO)
|
||||||
&FOO
|
|
||||||
},
|
|
||||||
_marker: ::std::marker::PhantomData,
|
|
||||||
};
|
};
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -28,13 +27,24 @@ pub(crate) use scoped_thread_local;
|
|||||||
/// Type representing a thread local storage key corresponding to a reference
|
/// Type representing a thread local storage key corresponding to a reference
|
||||||
/// to the type parameter `T`.
|
/// to the type parameter `T`.
|
||||||
pub(crate) struct ScopedLocalKey<T> {
|
pub(crate) struct ScopedLocalKey<T> {
|
||||||
pub(crate) inner: &'static LocalKey<Cell<*const ()>>,
|
inner: &'static LocalKey<Cell<*const ()>>,
|
||||||
pub(crate) _marker: marker::PhantomData<T>,
|
_marker: marker::PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe impl<T> Sync for ScopedLocalKey<T> {}
|
unsafe impl<T> Sync for ScopedLocalKey<T> {}
|
||||||
|
|
||||||
impl<T> ScopedLocalKey<T> {
|
impl<T> ScopedLocalKey<T> {
|
||||||
|
#[doc(hidden)]
|
||||||
|
/// # Safety
|
||||||
|
///
|
||||||
|
/// Should only be called through the public macro.
|
||||||
|
pub(crate) const unsafe fn new(inner: &'static LocalKey<Cell<*const ()>>) -> Self {
|
||||||
|
Self {
|
||||||
|
inner,
|
||||||
|
_marker: marker::PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Inserts a value into this scoped thread local storage slot for the
|
/// Inserts a value into this scoped thread local storage slot for the
|
||||||
/// duration of a closure.
|
/// duration of a closure.
|
||||||
pub(crate) fn set<F, R>(&'static self, t: &T, f: F) -> R
|
pub(crate) fn set<F, R>(&'static self, t: &T, f: F) -> R
|
||||||
|
@ -2,16 +2,19 @@
|
|||||||
//!
|
//!
|
||||||
//! # Model trait
|
//! # Model trait
|
||||||
//!
|
//!
|
||||||
//! Every model must implement the [`Model`] trait. This trait defines an
|
//! Every model must implement the [`Model`] trait. This trait defines
|
||||||
//! asynchronous initialization method, [`Model::init()`], which main purpose is
|
//! * a setup method, [`Model::setup()`], which main purpose is to create,
|
||||||
//! to enable models to perform specific actions only once all models have been
|
//! connect and add to the simulation bench submodels and perform other setup
|
||||||
//! connected and migrated to the simulation, but before the simulation actually
|
//! steps,
|
||||||
//! starts.
|
//! * an asynchronous initialization method, [`Model::init()`], which main
|
||||||
|
//! purpose is to enable models to perform specific actions only once all
|
||||||
|
//! models have been connected and migrated to the simulation, but before the
|
||||||
|
//! simulation actually starts.
|
||||||
//!
|
//!
|
||||||
//! #### Examples
|
//! #### Examples
|
||||||
//!
|
//!
|
||||||
//! A model that does not require initialization can simply use the default
|
//! A model that does not require setup and initialization can simply use the
|
||||||
//! implementation of the `Model` trait:
|
//! default implementation of the `Model` trait:
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use asynchronix::model::Model;
|
//! use asynchronix::model::Model;
|
||||||
@ -22,28 +25,31 @@
|
|||||||
//! impl Model for MyModel {}
|
//! impl Model for MyModel {}
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! Otherwise, a custom `init()` method can be implemented:
|
//! Otherwise, custom `setup()` or `init()` methods can be implemented:
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use std::future::Future;
|
//! use std::future::Future;
|
||||||
//! use std::pin::Pin;
|
//! use std::pin::Pin;
|
||||||
//!
|
//!
|
||||||
//! use asynchronix::model::{InitializedModel, Model};
|
//! use asynchronix::model::{Context, InitializedModel, Model, SetupContext};
|
||||||
//! use asynchronix::time::Scheduler;
|
|
||||||
//!
|
//!
|
||||||
//! pub struct MyModel {
|
//! pub struct MyModel {
|
||||||
//! // ...
|
//! // ...
|
||||||
//! }
|
//! }
|
||||||
//! impl Model for MyModel {
|
//! impl Model for MyModel {
|
||||||
//! fn init(
|
//! fn setup(
|
||||||
//! mut self,
|
//! &mut self,
|
||||||
//! scheduler: &Scheduler<Self>
|
//! setup_context: &SetupContext<Self>) {
|
||||||
//! ) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>>{
|
//! println!("...setup...");
|
||||||
//! Box::pin(async move {
|
//! }
|
||||||
//! println!("...initialization...");
|
|
||||||
//!
|
//!
|
||||||
//! self.into()
|
//! async fn init(
|
||||||
//! })
|
//! mut self,
|
||||||
|
//! context: &Context<Self>
|
||||||
|
//! ) -> InitializedModel<Self> {
|
||||||
|
//! println!("...initialization...");
|
||||||
|
//!
|
||||||
|
//! self.into()
|
||||||
//! }
|
//! }
|
||||||
//! }
|
//! }
|
||||||
//! ```
|
//! ```
|
||||||
@ -65,8 +71,9 @@
|
|||||||
//! ### Output and requestor ports
|
//! ### Output and requestor ports
|
||||||
//!
|
//!
|
||||||
//! Output and requestor ports can be added to a model using composition, adding
|
//! Output and requestor ports can be added to a model using composition, adding
|
||||||
//! [`Output`] and [`Requestor`] objects as members. They are parametrized by
|
//! [`Output`](crate::ports::Output) and [`Requestor`](crate::ports::Requestor)
|
||||||
//! the event, request and reply types.
|
//! objects as members. They are parametrized by the event, request and reply
|
||||||
|
//! types.
|
||||||
//!
|
//!
|
||||||
//! Models are expected to expose their output and requestor ports as public
|
//! Models are expected to expose their output and requestor ports as public
|
||||||
//! members so they can be connected to input and replier ports when assembling
|
//! members so they can be connected to input and replier ports when assembling
|
||||||
@ -75,7 +82,8 @@
|
|||||||
//! #### Example
|
//! #### Example
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use asynchronix::model::{Model, Output, Requestor};
|
//! use asynchronix::model::Model;
|
||||||
|
//! use asynchronix::ports::{Output, Requestor};
|
||||||
//!
|
//!
|
||||||
//! pub struct MyModel {
|
//! pub struct MyModel {
|
||||||
//! pub my_output: Output<String>,
|
//! pub my_output: Output<String>,
|
||||||
@ -90,9 +98,9 @@
|
|||||||
//!
|
//!
|
||||||
//! ### Input and replier ports
|
//! ### Input and replier ports
|
||||||
//!
|
//!
|
||||||
//! Input ports and replier ports are methods that implement the [`InputFn`] or
|
//! Input ports and replier ports are methods that implement the
|
||||||
//! [`ReplierFn`] traits with appropriate bounds on their argument and return
|
//! [`InputFn`](crate::ports::InputFn) or [`ReplierFn`](crate::ports::ReplierFn)
|
||||||
//! types.
|
//! traits with appropriate bounds on their argument and return types.
|
||||||
//!
|
//!
|
||||||
//! In practice, an input port method for an event of type `T` may have any of
|
//! In practice, an input port method for an event of type `T` may have any of
|
||||||
//! the following signatures, where the futures returned by the `async` variants
|
//! the following signatures, where the futures returned by the `async` variants
|
||||||
@ -101,17 +109,17 @@
|
|||||||
//! ```ignore
|
//! ```ignore
|
||||||
//! fn(&mut self) // argument elided, implies `T=()`
|
//! fn(&mut self) // argument elided, implies `T=()`
|
||||||
//! fn(&mut self, T)
|
//! fn(&mut self, T)
|
||||||
//! fn(&mut self, T, &Scheduler<Self>)
|
//! fn(&mut self, T, &Context<Self>)
|
||||||
//! async fn(&mut self) // argument elided, implies `T=()`
|
//! async fn(&mut self) // argument elided, implies `T=()`
|
||||||
//! async fn(&mut self, T)
|
//! async fn(&mut self, T)
|
||||||
//! async fn(&mut self, T, &Scheduler<Self>)
|
//! async fn(&mut self, T, &Context<Self>)
|
||||||
//! where
|
//! where
|
||||||
//! Self: Model,
|
//! Self: Model,
|
||||||
//! T: Clone + Send + 'static,
|
//! T: Clone + Send + 'static,
|
||||||
//! R: Send + 'static,
|
//! R: Send + 'static,
|
||||||
//! ```
|
//! ```
|
||||||
//!
|
//!
|
||||||
//! The scheduler argument is useful for methods that need access to the
|
//! The context argument is useful for methods that need access to the
|
||||||
//! simulation time or that need to schedule an action at a future date.
|
//! simulation time or that need to schedule an action at a future date.
|
||||||
//!
|
//!
|
||||||
//! A replier port for a request of type `T` with a reply of type `R` may in
|
//! A replier port for a request of type `T` with a reply of type `R` may in
|
||||||
@ -121,7 +129,7 @@
|
|||||||
//! ```ignore
|
//! ```ignore
|
||||||
//! async fn(&mut self) -> R // argument elided, implies `T=()`
|
//! async fn(&mut self) -> R // argument elided, implies `T=()`
|
||||||
//! async fn(&mut self, T) -> R
|
//! async fn(&mut self, T) -> R
|
||||||
//! async fn(&mut self, T, &Scheduler<Self>) -> R
|
//! async fn(&mut self, T, &Context<Self>) -> R
|
||||||
//! where
|
//! where
|
||||||
//! Self: Model,
|
//! Self: Model,
|
||||||
//! T: Clone + Send + 'static,
|
//! T: Clone + Send + 'static,
|
||||||
@ -132,7 +140,7 @@
|
|||||||
//! can be connected to input and requestor ports when assembling the simulation
|
//! can be connected to input and requestor ports when assembling the simulation
|
||||||
//! bench. However, input ports may instead be defined as private methods if
|
//! bench. However, input ports may instead be defined as private methods if
|
||||||
//! they are only used by the model itself to schedule future actions (see the
|
//! they are only used by the model itself to schedule future actions (see the
|
||||||
//! [`Scheduler`](crate::time::Scheduler) examples).
|
//! [`Context`] examples).
|
||||||
//!
|
//!
|
||||||
//! Changing the signature of an input or replier port is not considered to
|
//! Changing the signature of an input or replier port is not considered to
|
||||||
//! alter the public interface of a model provided that the event, request and
|
//! alter the public interface of a model provided that the event, request and
|
||||||
@ -141,17 +149,16 @@
|
|||||||
//! #### Example
|
//! #### Example
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use asynchronix::model::Model;
|
//! use asynchronix::model::{Context, Model};
|
||||||
//! use asynchronix::time::Scheduler;
|
|
||||||
//!
|
//!
|
||||||
//! pub struct MyModel {
|
//! pub struct MyModel {
|
||||||
//! // ...
|
//! // ...
|
||||||
//! }
|
//! }
|
||||||
//! impl MyModel {
|
//! impl MyModel {
|
||||||
//! pub fn my_input(&mut self, input: String, scheduler: &Scheduler<Self>) {
|
//! pub fn my_input(&mut self, input: String, context: &Context<Self>) {
|
||||||
//! // ...
|
//! // ...
|
||||||
//! }
|
//! }
|
||||||
//! pub async fn my_replier(&mut self, request: u32) -> bool { // scheduler argument elided
|
//! pub async fn my_replier(&mut self, request: u32) -> bool { // context argument elided
|
||||||
//! // ...
|
//! // ...
|
||||||
//! # unimplemented!()
|
//! # unimplemented!()
|
||||||
//! }
|
//! }
|
||||||
@ -161,21 +168,19 @@
|
|||||||
//!
|
//!
|
||||||
|
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::pin::Pin;
|
|
||||||
|
|
||||||
use crate::time::Scheduler;
|
pub use context::{Context, SetupContext};
|
||||||
|
|
||||||
pub use model_fn::{InputFn, ReplierFn};
|
mod context;
|
||||||
pub use ports::{LineError, LineId, Output, Requestor};
|
|
||||||
|
|
||||||
pub mod markers;
|
|
||||||
mod model_fn;
|
|
||||||
mod ports;
|
|
||||||
|
|
||||||
/// Trait to be implemented by all models.
|
/// Trait to be implemented by all models.
|
||||||
///
|
///
|
||||||
/// This trait enables models to perform specific actions in the
|
/// This trait enables models to perform specific actions during setup and
|
||||||
/// [`Model::init()`] method only once all models have been connected and
|
/// initialization. The [`Model::setup()`] method is run only once when models
|
||||||
|
/// are being added to the simulation bench. This method allows in particular
|
||||||
|
/// sub-models to be created, connected and added to the simulation.
|
||||||
|
///
|
||||||
|
/// The [`Model::init()`] method is run only once all models have been connected and
|
||||||
/// migrated to the simulation bench, but before the simulation actually starts.
|
/// migrated to the simulation bench, but before the simulation actually starts.
|
||||||
/// A common use for `init` is to send messages to connected models at the
|
/// A common use for `init` is to send messages to connected models at the
|
||||||
/// beginning of the simulation.
|
/// beginning of the simulation.
|
||||||
@ -184,6 +189,37 @@ mod ports;
|
|||||||
/// to prevent an already initialized model from being added to the simulation
|
/// to prevent an already initialized model from being added to the simulation
|
||||||
/// bench.
|
/// bench.
|
||||||
pub trait Model: Sized + Send + 'static {
|
pub trait Model: Sized + Send + 'static {
|
||||||
|
/// Performs model setup.
|
||||||
|
///
|
||||||
|
/// This method is executed exactly once for all models of the simulation
|
||||||
|
/// when the [`SimInit::add_model()`](crate::simulation::SimInit::add_model)
|
||||||
|
/// method is called.
|
||||||
|
///
|
||||||
|
/// The default implementation does nothing.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::future::Future;
|
||||||
|
/// use std::pin::Pin;
|
||||||
|
///
|
||||||
|
/// use asynchronix::model::{InitializedModel, Model, SetupContext};
|
||||||
|
///
|
||||||
|
/// pub struct MyModel {
|
||||||
|
/// // ...
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl Model for MyModel {
|
||||||
|
/// fn setup(
|
||||||
|
/// &mut self,
|
||||||
|
/// setup_context: &SetupContext<Self>
|
||||||
|
/// ) {
|
||||||
|
/// println!("...setup...");
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
fn setup(&mut self, _: &SetupContext<Self>) {}
|
||||||
|
|
||||||
/// Performs asynchronous model initialization.
|
/// Performs asynchronous model initialization.
|
||||||
///
|
///
|
||||||
/// This asynchronous method is executed exactly once for all models of the
|
/// This asynchronous method is executed exactly once for all models of the
|
||||||
@ -193,47 +229,31 @@ pub trait Model: Sized + Send + 'static {
|
|||||||
/// The default implementation simply converts the model to an
|
/// The default implementation simply converts the model to an
|
||||||
/// `InitializedModel` without any side effect.
|
/// `InitializedModel` without any side effect.
|
||||||
///
|
///
|
||||||
/// *Note*: it is currently necessary to box the returned future; this
|
|
||||||
/// limitation will be lifted once Rust supports `async` methods in traits.
|
|
||||||
///
|
|
||||||
/// # Examples
|
/// # Examples
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
/// use std::future::Future;
|
/// use std::future::Future;
|
||||||
/// use std::pin::Pin;
|
/// use std::pin::Pin;
|
||||||
///
|
///
|
||||||
/// use asynchronix::model::{InitializedModel, Model};
|
/// use asynchronix::model::{Context, InitializedModel, Model};
|
||||||
/// use asynchronix::time::Scheduler;
|
|
||||||
///
|
///
|
||||||
/// pub struct MyModel {
|
/// pub struct MyModel {
|
||||||
/// // ...
|
/// // ...
|
||||||
/// }
|
/// }
|
||||||
///
|
///
|
||||||
/// impl Model for MyModel {
|
/// impl Model for MyModel {
|
||||||
/// fn init(
|
/// async fn init(
|
||||||
/// self,
|
/// self,
|
||||||
/// scheduler: &Scheduler<Self>
|
/// context: &Context<Self>
|
||||||
/// ) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>>{
|
/// ) -> InitializedModel<Self> {
|
||||||
/// Box::pin(async move {
|
/// println!("...initialization...");
|
||||||
/// println!("...initialization...");
|
|
||||||
///
|
///
|
||||||
/// self.into()
|
/// self.into()
|
||||||
/// })
|
|
||||||
/// }
|
/// }
|
||||||
/// }
|
/// }
|
||||||
/// ```
|
/// ```
|
||||||
|
fn init(self, _: &Context<Self>) -> impl Future<Output = InitializedModel<Self>> + Send {
|
||||||
// Removing the boxing constraint requires the
|
async { self.into() }
|
||||||
// `return_position_impl_trait_in_trait` and `async_fn_in_trait` features.
|
|
||||||
// Tracking issue: <https://github.com/rust-lang/rust/issues/91611>.
|
|
||||||
fn init(
|
|
||||||
self,
|
|
||||||
scheduler: &Scheduler<Self>,
|
|
||||||
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
|
|
||||||
Box::pin(async move {
|
|
||||||
let _ = scheduler; // suppress the unused argument warning
|
|
||||||
self.into()
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
509
asynchronix/src/model/context.rs
Normal file
509
asynchronix/src/model/context.rs
Normal file
@ -0,0 +1,509 @@
|
|||||||
|
use std::fmt;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::channel::Sender;
|
||||||
|
use crate::executor::Executor;
|
||||||
|
use crate::ports::InputFn;
|
||||||
|
use crate::simulation::{
|
||||||
|
self, schedule_event_at_unchecked, schedule_keyed_event_at_unchecked,
|
||||||
|
schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked, ActionKey,
|
||||||
|
Deadline, Mailbox, SchedulerQueue, SchedulingError,
|
||||||
|
};
|
||||||
|
use crate::time::{MonotonicTime, TearableAtomicTime};
|
||||||
|
use crate::util::sync_cell::SyncCellReader;
|
||||||
|
|
||||||
|
use super::Model;
|
||||||
|
|
||||||
|
/// A local context for models.
|
||||||
|
///
|
||||||
|
/// A `Context` is a handle to the global context associated to a model
|
||||||
|
/// instance. It can be used by the model to retrieve the simulation time or
|
||||||
|
/// schedule delayed actions on itself.
|
||||||
|
///
|
||||||
|
/// ### Caveat: self-scheduling `async` methods
|
||||||
|
///
|
||||||
|
/// Due to a current rustc issue, `async` methods that schedule themselves will
|
||||||
|
/// not compile unless an explicit `Send` bound is added to the returned future.
|
||||||
|
/// This can be done by replacing the `async` signature with a partially
|
||||||
|
/// desugared signature such as:
|
||||||
|
///
|
||||||
|
/// ```ignore
|
||||||
|
/// fn self_scheduling_method<'a>(
|
||||||
|
/// &'a mut self,
|
||||||
|
/// arg: MyEventType,
|
||||||
|
/// context: &'a Context<Self>
|
||||||
|
/// ) -> impl Future<Output=()> + Send + 'a {
|
||||||
|
/// async move {
|
||||||
|
/// /* implementation */
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// Self-scheduling methods which are not `async` are not affected by this
|
||||||
|
/// issue.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// A model that sends a greeting after some delay.
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::time::Duration;
|
||||||
|
/// use asynchronix::model::{Context, Model};
|
||||||
|
/// use asynchronix::ports::Output;
|
||||||
|
///
|
||||||
|
/// #[derive(Default)]
|
||||||
|
/// pub struct DelayedGreeter {
|
||||||
|
/// msg_out: Output<String>,
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl DelayedGreeter {
|
||||||
|
/// // Triggers a greeting on the output port after some delay [input port].
|
||||||
|
/// pub async fn greet_with_delay(&mut self, delay: Duration, context: &Context<Self>) {
|
||||||
|
/// let time = context.time();
|
||||||
|
/// let greeting = format!("Hello, this message was scheduled at: {:?}.", time);
|
||||||
|
///
|
||||||
|
/// if delay.is_zero() {
|
||||||
|
/// self.msg_out.send(greeting).await;
|
||||||
|
/// } else {
|
||||||
|
/// context.schedule_event(delay, Self::send_msg, greeting).unwrap();
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // Sends a message to the output [private input port].
|
||||||
|
/// async fn send_msg(&mut self, msg: String) {
|
||||||
|
/// self.msg_out.send(msg).await;
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
/// impl Model for DelayedGreeter {}
|
||||||
|
/// ```
|
||||||
|
|
||||||
|
// The self-scheduling caveat seems related to this issue:
|
||||||
|
// https://github.com/rust-lang/rust/issues/78649
|
||||||
|
pub struct Context<M: Model> {
|
||||||
|
name: String,
|
||||||
|
sender: Sender<M>,
|
||||||
|
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||||
|
time: SyncCellReader<TearableAtomicTime>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: Model> Context<M> {
|
||||||
|
/// Creates a new local context.
|
||||||
|
pub(crate) fn new(
|
||||||
|
name: String,
|
||||||
|
sender: Sender<M>,
|
||||||
|
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||||
|
time: SyncCellReader<TearableAtomicTime>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
name,
|
||||||
|
sender,
|
||||||
|
scheduler_queue,
|
||||||
|
time,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the model instance name.
|
||||||
|
pub fn name(&self) -> &str {
|
||||||
|
&self.name
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the current simulation time.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use asynchronix::model::{Context, Model};
|
||||||
|
/// use asynchronix::time::MonotonicTime;
|
||||||
|
///
|
||||||
|
/// fn is_third_millenium<M: Model>(context: &Context<M>) -> bool {
|
||||||
|
/// let time = context.time();
|
||||||
|
/// time >= MonotonicTime::new(978307200, 0).unwrap()
|
||||||
|
/// && time < MonotonicTime::new(32535216000, 0).unwrap()
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
pub fn time(&self) -> MonotonicTime {
|
||||||
|
self.time.try_read().expect("internal simulation error: could not perform a synchronized read of the simulation time")
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules an event at a future time.
|
||||||
|
///
|
||||||
|
/// An error is returned if the specified deadline is not in the future of
|
||||||
|
/// the current simulation time.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::time::Duration;
|
||||||
|
///
|
||||||
|
/// use asynchronix::model::{Context, Model};
|
||||||
|
///
|
||||||
|
/// // A timer.
|
||||||
|
/// pub struct Timer {}
|
||||||
|
///
|
||||||
|
/// impl Timer {
|
||||||
|
/// // Sets an alarm [input port].
|
||||||
|
/// pub fn set(&mut self, setting: Duration, context: &Context<Self>) {
|
||||||
|
/// if context.schedule_event(setting, Self::ring, ()).is_err() {
|
||||||
|
/// println!("The alarm clock can only be set for a future time");
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // Rings [private input port].
|
||||||
|
/// fn ring(&mut self) {
|
||||||
|
/// println!("Brringggg");
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl Model for Timer {}
|
||||||
|
/// ```
|
||||||
|
pub fn schedule_event<F, T, S>(
|
||||||
|
&self,
|
||||||
|
deadline: impl Deadline,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
) -> Result<(), SchedulingError>
|
||||||
|
where
|
||||||
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let now = self.time();
|
||||||
|
let time = deadline.into_time(now);
|
||||||
|
if now >= time {
|
||||||
|
return Err(SchedulingError::InvalidScheduledTime);
|
||||||
|
}
|
||||||
|
let sender = self.sender.clone();
|
||||||
|
schedule_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a cancellable event at a future time and returns an action
|
||||||
|
/// key.
|
||||||
|
///
|
||||||
|
/// An error is returned if the specified deadline is not in the future of
|
||||||
|
/// the current simulation time.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use asynchronix::model::{Context, Model};
|
||||||
|
/// use asynchronix::simulation::ActionKey;
|
||||||
|
/// use asynchronix::time::MonotonicTime;
|
||||||
|
///
|
||||||
|
/// // An alarm clock that can be cancelled.
|
||||||
|
/// #[derive(Default)]
|
||||||
|
/// pub struct CancellableAlarmClock {
|
||||||
|
/// event_key: Option<ActionKey>,
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl CancellableAlarmClock {
|
||||||
|
/// // Sets an alarm [input port].
|
||||||
|
/// pub fn set(&mut self, setting: MonotonicTime, context: &Context<Self>) {
|
||||||
|
/// self.cancel();
|
||||||
|
/// match context.schedule_keyed_event(setting, Self::ring, ()) {
|
||||||
|
/// Ok(event_key) => self.event_key = Some(event_key),
|
||||||
|
/// Err(_) => println!("The alarm clock can only be set for a future time"),
|
||||||
|
/// };
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // Cancels the current alarm, if any [input port].
|
||||||
|
/// pub fn cancel(&mut self) {
|
||||||
|
/// self.event_key.take().map(|k| k.cancel());
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // Rings the alarm [private input port].
|
||||||
|
/// fn ring(&mut self) {
|
||||||
|
/// println!("Brringggg!");
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl Model for CancellableAlarmClock {}
|
||||||
|
/// ```
|
||||||
|
pub fn schedule_keyed_event<F, T, S>(
|
||||||
|
&self,
|
||||||
|
deadline: impl Deadline,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
) -> Result<ActionKey, SchedulingError>
|
||||||
|
where
|
||||||
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let now = self.time();
|
||||||
|
let time = deadline.into_time(now);
|
||||||
|
if now >= time {
|
||||||
|
return Err(SchedulingError::InvalidScheduledTime);
|
||||||
|
}
|
||||||
|
let sender = self.sender.clone();
|
||||||
|
let event_key =
|
||||||
|
schedule_keyed_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue);
|
||||||
|
|
||||||
|
Ok(event_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a periodically recurring event at a future time.
|
||||||
|
///
|
||||||
|
/// An error is returned if the specified deadline is not in the future of
|
||||||
|
/// the current simulation time or if the specified period is null.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::time::Duration;
|
||||||
|
///
|
||||||
|
/// use asynchronix::model::{Context, Model};
|
||||||
|
/// use asynchronix::time::MonotonicTime;
|
||||||
|
///
|
||||||
|
/// // An alarm clock beeping at 1Hz.
|
||||||
|
/// pub struct BeepingAlarmClock {}
|
||||||
|
///
|
||||||
|
/// impl BeepingAlarmClock {
|
||||||
|
/// // Sets an alarm [input port].
|
||||||
|
/// pub fn set(&mut self, setting: MonotonicTime, context: &Context<Self>) {
|
||||||
|
/// if context.schedule_periodic_event(
|
||||||
|
/// setting,
|
||||||
|
/// Duration::from_secs(1), // 1Hz = 1/1s
|
||||||
|
/// Self::beep,
|
||||||
|
/// ()
|
||||||
|
/// ).is_err() {
|
||||||
|
/// println!("The alarm clock can only be set for a future time");
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // Emits a single beep [private input port].
|
||||||
|
/// fn beep(&mut self) {
|
||||||
|
/// println!("Beep!");
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl Model for BeepingAlarmClock {}
|
||||||
|
/// ```
|
||||||
|
pub fn schedule_periodic_event<F, T, S>(
|
||||||
|
&self,
|
||||||
|
deadline: impl Deadline,
|
||||||
|
period: Duration,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
) -> Result<(), SchedulingError>
|
||||||
|
where
|
||||||
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let now = self.time();
|
||||||
|
let time = deadline.into_time(now);
|
||||||
|
if now >= time {
|
||||||
|
return Err(SchedulingError::InvalidScheduledTime);
|
||||||
|
}
|
||||||
|
if period.is_zero() {
|
||||||
|
return Err(SchedulingError::NullRepetitionPeriod);
|
||||||
|
}
|
||||||
|
let sender = self.sender.clone();
|
||||||
|
schedule_periodic_event_at_unchecked(
|
||||||
|
time,
|
||||||
|
period,
|
||||||
|
func,
|
||||||
|
arg,
|
||||||
|
sender,
|
||||||
|
&self.scheduler_queue,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a cancellable, periodically recurring event at a future time
|
||||||
|
/// and returns an action key.
|
||||||
|
///
|
||||||
|
/// An error is returned if the specified deadline is not in the future of
|
||||||
|
/// the current simulation time or if the specified period is null.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::time::Duration;
|
||||||
|
///
|
||||||
|
/// use asynchronix::model::{Context, Model};
|
||||||
|
/// use asynchronix::simulation::ActionKey;
|
||||||
|
/// use asynchronix::time::MonotonicTime;
|
||||||
|
///
|
||||||
|
/// // An alarm clock beeping at 1Hz that can be cancelled before it sets off, or
|
||||||
|
/// // stopped after it sets off.
|
||||||
|
/// #[derive(Default)]
|
||||||
|
/// pub struct CancellableBeepingAlarmClock {
|
||||||
|
/// event_key: Option<ActionKey>,
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl CancellableBeepingAlarmClock {
|
||||||
|
/// // Sets an alarm [input port].
|
||||||
|
/// pub fn set(&mut self, setting: MonotonicTime, context: &Context<Self>) {
|
||||||
|
/// self.cancel();
|
||||||
|
/// match context.schedule_keyed_periodic_event(
|
||||||
|
/// setting,
|
||||||
|
/// Duration::from_secs(1), // 1Hz = 1/1s
|
||||||
|
/// Self::beep,
|
||||||
|
/// ()
|
||||||
|
/// ) {
|
||||||
|
/// Ok(event_key) => self.event_key = Some(event_key),
|
||||||
|
/// Err(_) => println!("The alarm clock can only be set for a future time"),
|
||||||
|
/// };
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // Cancels or stops the alarm [input port].
|
||||||
|
/// pub fn cancel(&mut self) {
|
||||||
|
/// self.event_key.take().map(|k| k.cancel());
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// // Emits a single beep [private input port].
|
||||||
|
/// fn beep(&mut self) {
|
||||||
|
/// println!("Beep!");
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl Model for CancellableBeepingAlarmClock {}
|
||||||
|
/// ```
|
||||||
|
pub fn schedule_keyed_periodic_event<F, T, S>(
|
||||||
|
&self,
|
||||||
|
deadline: impl Deadline,
|
||||||
|
period: Duration,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
) -> Result<ActionKey, SchedulingError>
|
||||||
|
where
|
||||||
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let now = self.time();
|
||||||
|
let time = deadline.into_time(now);
|
||||||
|
if now >= time {
|
||||||
|
return Err(SchedulingError::InvalidScheduledTime);
|
||||||
|
}
|
||||||
|
if period.is_zero() {
|
||||||
|
return Err(SchedulingError::NullRepetitionPeriod);
|
||||||
|
}
|
||||||
|
let sender = self.sender.clone();
|
||||||
|
let event_key = schedule_periodic_keyed_event_at_unchecked(
|
||||||
|
time,
|
||||||
|
period,
|
||||||
|
func,
|
||||||
|
arg,
|
||||||
|
sender,
|
||||||
|
&self.scheduler_queue,
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(event_key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: Model> fmt::Debug for Context<M> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
f.debug_struct("Context").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A setup context for models.
|
||||||
|
///
|
||||||
|
/// A `SetupContext` can be used by models during the setup stage to
|
||||||
|
/// create submodels and add them to the simulation bench.
|
||||||
|
///
|
||||||
|
/// # Examples
|
||||||
|
///
|
||||||
|
/// A model that contains two connected submodels.
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
/// use std::time::Duration;
|
||||||
|
/// use asynchronix::model::{Model, SetupContext};
|
||||||
|
/// use asynchronix::ports::Output;
|
||||||
|
/// use asynchronix::simulation::Mailbox;
|
||||||
|
///
|
||||||
|
/// #[derive(Default)]
|
||||||
|
/// pub struct SubmodelA {
|
||||||
|
/// out: Output<u32>,
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl Model for SubmodelA {}
|
||||||
|
///
|
||||||
|
/// #[derive(Default)]
|
||||||
|
/// pub struct SubmodelB {}
|
||||||
|
///
|
||||||
|
/// impl SubmodelB {
|
||||||
|
/// pub async fn input(&mut self, value: u32) {
|
||||||
|
/// println!("Received {}", value);
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// impl Model for SubmodelB {}
|
||||||
|
///
|
||||||
|
/// #[derive(Default)]
|
||||||
|
/// pub struct Parent {}
|
||||||
|
///
|
||||||
|
/// impl Model for Parent {
|
||||||
|
/// fn setup(
|
||||||
|
/// &mut self,
|
||||||
|
/// setup_context: &SetupContext<Self>) {
|
||||||
|
/// let mut a = SubmodelA::default();
|
||||||
|
/// let b = SubmodelB::default();
|
||||||
|
/// let a_mbox = Mailbox::new();
|
||||||
|
/// let b_mbox = Mailbox::new();
|
||||||
|
/// let a_name = setup_context.name().to_string() + "::a";
|
||||||
|
/// let b_name = setup_context.name().to_string() + "::b";
|
||||||
|
///
|
||||||
|
/// a.out.connect(SubmodelB::input, &b_mbox);
|
||||||
|
///
|
||||||
|
/// setup_context.add_model(a, a_mbox, a_name);
|
||||||
|
/// setup_context.add_model(b, b_mbox, b_name);
|
||||||
|
/// }
|
||||||
|
/// }
|
||||||
|
///
|
||||||
|
/// ```
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct SetupContext<'a, M: Model> {
|
||||||
|
/// Mailbox of the model.
|
||||||
|
pub mailbox: &'a Mailbox<M>,
|
||||||
|
context: &'a Context<M>,
|
||||||
|
executor: &'a Executor,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, M: Model> SetupContext<'a, M> {
|
||||||
|
/// Creates a new local context.
|
||||||
|
pub(crate) fn new(
|
||||||
|
mailbox: &'a Mailbox<M>,
|
||||||
|
context: &'a Context<M>,
|
||||||
|
executor: &'a Executor,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
mailbox,
|
||||||
|
context,
|
||||||
|
executor,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the model instance name.
|
||||||
|
pub fn name(&self) -> &str {
|
||||||
|
&self.context.name
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a new model and its mailbox to the simulation bench.
|
||||||
|
///
|
||||||
|
/// The `name` argument needs not be unique (it can be an empty string) and
|
||||||
|
/// is used for convenience for model instance identification (e.g. for
|
||||||
|
/// logging purposes).
|
||||||
|
pub fn add_model<N: Model>(&self, model: N, mailbox: Mailbox<N>, name: impl Into<String>) {
|
||||||
|
let mut submodel_name = name.into();
|
||||||
|
if !self.context.name().is_empty() && !submodel_name.is_empty() {
|
||||||
|
submodel_name = self.context.name().to_string() + "." + &submodel_name;
|
||||||
|
}
|
||||||
|
simulation::add_model(
|
||||||
|
model,
|
||||||
|
mailbox,
|
||||||
|
submodel_name,
|
||||||
|
self.context.scheduler_queue.clone(),
|
||||||
|
self.context.time.clone(),
|
||||||
|
self.executor,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
@ -1,218 +0,0 @@
|
|||||||
//! Model ports for event and query broadcasting.
|
|
||||||
//!
|
|
||||||
//! Models typically contain [`Output`] and/or [`Requestor`] ports, exposed as
|
|
||||||
//! public member variables. Output ports broadcast events to all connected
|
|
||||||
//! input ports, while requestor ports broadcast queries to, and retrieve
|
|
||||||
//! replies from, all connected replier ports.
|
|
||||||
//!
|
|
||||||
//! On the surface, output and requestor ports only differ in that sending a
|
|
||||||
//! query from a requestor port also returns an iterator over the replies from
|
|
||||||
//! all connected ports. Sending a query is more costly, however, because of the
|
|
||||||
//! need to wait until all connected models have processed the query. In
|
|
||||||
//! contrast, since events are buffered in the mailbox of the target model,
|
|
||||||
//! sending an event is a fire-and-forget operation. For this reason, output
|
|
||||||
//! ports should generally be preferred over requestor ports when possible.
|
|
||||||
|
|
||||||
use std::fmt;
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
|
|
||||||
mod broadcaster;
|
|
||||||
mod sender;
|
|
||||||
|
|
||||||
use crate::model::{InputFn, Model, ReplierFn};
|
|
||||||
use crate::simulation::{Address, EventSlot, EventStream};
|
|
||||||
use crate::util::spsc_queue;
|
|
||||||
|
|
||||||
use broadcaster::Broadcaster;
|
|
||||||
|
|
||||||
use self::sender::{EventSender, EventSlotSender, EventStreamSender, QuerySender};
|
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
|
|
||||||
/// Unique identifier for a connection between two ports.
|
|
||||||
pub struct LineId(u64);
|
|
||||||
|
|
||||||
/// An output port.
|
|
||||||
///
|
|
||||||
/// `Output` ports can be connected to input ports, i.e. to asynchronous model
|
|
||||||
/// methods that return no value. They broadcast events to all connected input
|
|
||||||
/// ports.
|
|
||||||
pub struct Output<T: Clone + Send + 'static> {
|
|
||||||
broadcaster: Broadcaster<T, ()>,
|
|
||||||
next_line_id: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Clone + Send + 'static> Output<T> {
|
|
||||||
/// Creates a new, disconnected `Output` port.
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds a connection to an input port of the model specified by the
|
|
||||||
/// address.
|
|
||||||
///
|
|
||||||
/// The input port must be an asynchronous method of a model of type `M`
|
|
||||||
/// taking as argument a value of type `T` plus, optionally, a scheduler
|
|
||||||
/// reference.
|
|
||||||
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>) -> LineId
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Copy,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
assert!(self.next_line_id != u64::MAX);
|
|
||||||
let line_id = LineId(self.next_line_id);
|
|
||||||
self.next_line_id += 1;
|
|
||||||
let sender = Box::new(EventSender::new(input, address.into().0));
|
|
||||||
self.broadcaster.add(sender, line_id);
|
|
||||||
|
|
||||||
line_id
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds a connection to an event stream iterator.
|
|
||||||
pub fn connect_stream(&mut self) -> (EventStream<T>, LineId) {
|
|
||||||
assert!(self.next_line_id != u64::MAX);
|
|
||||||
let line_id = LineId(self.next_line_id);
|
|
||||||
self.next_line_id += 1;
|
|
||||||
|
|
||||||
let (producer, consumer) = spsc_queue::spsc_queue();
|
|
||||||
let sender = Box::new(EventStreamSender::new(producer));
|
|
||||||
let event_stream = EventStream::new(consumer);
|
|
||||||
|
|
||||||
self.broadcaster.add(sender, line_id);
|
|
||||||
|
|
||||||
(event_stream, line_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds a connection to an event slot.
|
|
||||||
pub fn connect_slot(&mut self) -> (EventSlot<T>, LineId) {
|
|
||||||
assert!(self.next_line_id != u64::MAX);
|
|
||||||
let line_id = LineId(self.next_line_id);
|
|
||||||
self.next_line_id += 1;
|
|
||||||
|
|
||||||
let slot = Arc::new(Mutex::new(None));
|
|
||||||
let sender = Box::new(EventSlotSender::new(slot.clone()));
|
|
||||||
let event_slot = EventSlot::new(slot);
|
|
||||||
|
|
||||||
self.broadcaster.add(sender, line_id);
|
|
||||||
|
|
||||||
(event_slot, line_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Removes the connection specified by the `LineId` parameter.
|
|
||||||
///
|
|
||||||
/// It is a logic error to specify a line identifier from another [`Output`]
|
|
||||||
/// or [`Requestor`] instance and may result in the disconnection of an
|
|
||||||
/// arbitrary endpoint.
|
|
||||||
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
|
|
||||||
if self.broadcaster.remove(line_id) {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(LineError {})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Removes all connections.
|
|
||||||
pub fn disconnect_all(&mut self) {
|
|
||||||
self.broadcaster.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Broadcasts an event to all connected input ports.
|
|
||||||
pub async fn send(&mut self, arg: T) {
|
|
||||||
self.broadcaster.broadcast_event(arg).await.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Clone + Send + 'static> Default for Output<T> {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
broadcaster: Broadcaster::default(),
|
|
||||||
next_line_id: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Clone + Send + 'static> fmt::Debug for Output<T> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(f, "Output ({} connected ports)", self.broadcaster.len())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A requestor port.
|
|
||||||
///
|
|
||||||
/// `Requestor` ports can be connected to replier ports, i.e. to asynchronous
|
|
||||||
/// model methods that return a value. They broadcast queries to all connected
|
|
||||||
/// replier ports.
|
|
||||||
pub struct Requestor<T: Clone + Send + 'static, R: Send + 'static> {
|
|
||||||
broadcaster: Broadcaster<T, R>,
|
|
||||||
next_line_id: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Clone + Send + 'static, R: Send + 'static> Requestor<T, R> {
|
|
||||||
/// Creates a new, disconnected `Requestor` port.
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self::default()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds a connection to a replier port of the model specified by the
|
|
||||||
/// address.
|
|
||||||
///
|
|
||||||
/// The replier port must be an asynchronous method of a model of type `M`
|
|
||||||
/// returning a value of type `R` and taking as argument a value of type `T`
|
|
||||||
/// plus, optionally, a scheduler reference.
|
|
||||||
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>) -> LineId
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> ReplierFn<'a, M, T, R, S> + Copy,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
assert!(self.next_line_id != u64::MAX);
|
|
||||||
let line_id = LineId(self.next_line_id);
|
|
||||||
self.next_line_id += 1;
|
|
||||||
let sender = Box::new(QuerySender::new(replier, address.into().0));
|
|
||||||
self.broadcaster.add(sender, line_id);
|
|
||||||
|
|
||||||
line_id
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Removes the connection specified by the `LineId` parameter.
|
|
||||||
///
|
|
||||||
/// It is a logic error to specify a line identifier from another [`Output`]
|
|
||||||
/// or [`Requestor`] instance and may result in the disconnection of an
|
|
||||||
/// arbitrary endpoint.
|
|
||||||
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
|
|
||||||
if self.broadcaster.remove(line_id) {
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(LineError {})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Removes all connections.
|
|
||||||
pub fn disconnect_all(&mut self) {
|
|
||||||
self.broadcaster.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Broadcasts a query to all connected replier ports.
|
|
||||||
pub async fn send(&mut self, arg: T) -> impl Iterator<Item = R> + '_ {
|
|
||||||
self.broadcaster.broadcast_query(arg).await.unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Clone + Send + 'static, R: Send + 'static> Default for Requestor<T, R> {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
broadcaster: Broadcaster::default(),
|
|
||||||
next_line_id: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for Requestor<T, R> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
write!(f, "Requestor ({} connected ports)", self.broadcaster.len())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error raised when the specified line cannot be found.
|
|
||||||
#[derive(Copy, Clone, Debug)]
|
|
||||||
pub struct LineError {}
|
|
93
asynchronix/src/ports.rs
Normal file
93
asynchronix/src/ports.rs
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
//! Model ports for event and query broadcasting.
|
||||||
|
//!
|
||||||
|
//! Models typically contain [`Output`] and/or [`Requestor`] ports, exposed as
|
||||||
|
//! public member variables. Output ports broadcast events to all connected
|
||||||
|
//! input ports, while requestor ports broadcast queries to, and retrieve
|
||||||
|
//! replies from, all connected replier ports.
|
||||||
|
//!
|
||||||
|
//! On the surface, output and requestor ports only differ in that sending a
|
||||||
|
//! query from a requestor port also returns an iterator over the replies from
|
||||||
|
//! all connected ports. Sending a query is more costly, however, because of the
|
||||||
|
//! need to wait until all connected models have processed the query. In
|
||||||
|
//! contrast, since events are buffered in the mailbox of the target model,
|
||||||
|
//! sending an event is a fire-and-forget operation. For this reason, output
|
||||||
|
//! ports should generally be preferred over requestor ports when possible.
|
||||||
|
//!
|
||||||
|
//! `Output` and `Requestor` ports are clonable. Their clones are shallow
|
||||||
|
//! copies, meaning that any modification of the ports connected to one clone is
|
||||||
|
//! immediately reflected in other clones.
|
||||||
|
//!
|
||||||
|
//! #### Example
|
||||||
|
//!
|
||||||
|
//! This example demonstrates a submodel inside a parent model. The output of
|
||||||
|
//! the submodel is a clone of the parent model output. Both outputs remain
|
||||||
|
//! therefore always connected to the same inputs.
|
||||||
|
//!
|
||||||
|
//! For a more comprehensive example demonstrating output cloning in submodels
|
||||||
|
//! assemblies, see the [`assembly example`][assembly].
|
||||||
|
//!
|
||||||
|
//! [assembly]:
|
||||||
|
//! https://github.com/asynchronics/asynchronix/tree/main/asynchronix/examples/assembly.rs
|
||||||
|
//!
|
||||||
|
//! ```
|
||||||
|
//! use asynchronix::model::{Model, SetupContext};
|
||||||
|
//! use asynchronix::ports::Output;
|
||||||
|
//! use asynchronix::simulation::Mailbox;
|
||||||
|
//!
|
||||||
|
//! pub struct ChildModel {
|
||||||
|
//! pub output: Output<u64>,
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! impl ChildModel {
|
||||||
|
//! pub fn new() -> Self {
|
||||||
|
//! Self {
|
||||||
|
//! output: Default::default(),
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! impl Model for ChildModel {}
|
||||||
|
//!
|
||||||
|
//! pub struct ParentModel {
|
||||||
|
//! pub output: Output<u64>,
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! impl ParentModel {
|
||||||
|
//! pub fn new() -> Self {
|
||||||
|
//! Self {
|
||||||
|
//! output: Default::default(),
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//!
|
||||||
|
//! impl Model for ParentModel {
|
||||||
|
//! fn setup(&mut self, setup_context: &SetupContext<Self>) {
|
||||||
|
//! let mut child = ChildModel::new();
|
||||||
|
//! let child_mbox = Mailbox::new();
|
||||||
|
//! child.output = self.output.clone();
|
||||||
|
//! let child_name = setup_context.name().to_string() + "::child";
|
||||||
|
//! setup_context.add_model(child, child_mbox, child_name);
|
||||||
|
//! }
|
||||||
|
//! }
|
||||||
|
//! ```
|
||||||
|
|
||||||
|
mod input;
|
||||||
|
mod output;
|
||||||
|
mod sink;
|
||||||
|
mod source;
|
||||||
|
|
||||||
|
pub use input::markers;
|
||||||
|
pub use input::{InputFn, ReplierFn};
|
||||||
|
pub use output::{Output, Requestor};
|
||||||
|
pub use sink::{
|
||||||
|
event_buffer::EventBuffer, event_slot::EventSlot, EventSink, EventSinkStream, EventSinkWriter,
|
||||||
|
};
|
||||||
|
pub use source::{EventSource, QuerySource, ReplyReceiver};
|
||||||
|
|
||||||
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
|
||||||
|
/// Unique identifier for a connection between two ports.
|
||||||
|
pub struct LineId(u64);
|
||||||
|
|
||||||
|
/// Error raised when the specified line cannot be found.
|
||||||
|
#[derive(Copy, Clone, Debug)]
|
||||||
|
pub struct LineError {}
|
4
asynchronix/src/ports/input.rs
Normal file
4
asynchronix/src/ports/input.rs
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
pub mod markers;
|
||||||
|
mod model_fn;
|
||||||
|
|
||||||
|
pub use model_fn::{InputFn, ReplierFn};
|
@ -6,14 +6,14 @@
|
|||||||
pub struct WithoutArguments {}
|
pub struct WithoutArguments {}
|
||||||
|
|
||||||
/// Marker type for regular simulation model methods that take a mutable
|
/// Marker type for regular simulation model methods that take a mutable
|
||||||
/// reference to the model and a message, without scheduler argument.
|
/// reference to the model and a message, without context argument.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct WithoutScheduler {}
|
pub struct WithoutContext {}
|
||||||
|
|
||||||
/// Marker type for regular simulation model methods that take a mutable
|
/// Marker type for regular simulation model methods that take a mutable
|
||||||
/// reference to the model, a message and an explicit scheduler argument.
|
/// reference to the model, a message and an explicit context argument.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct WithScheduler {}
|
pub struct WithContext {}
|
||||||
|
|
||||||
/// Marker type for asynchronous simulation model methods that take a mutable
|
/// Marker type for asynchronous simulation model methods that take a mutable
|
||||||
/// reference to the model, without any other argument.
|
/// reference to the model, without any other argument.
|
||||||
@ -21,11 +21,11 @@ pub struct WithScheduler {}
|
|||||||
pub struct AsyncWithoutArguments {}
|
pub struct AsyncWithoutArguments {}
|
||||||
|
|
||||||
/// Marker type for asynchronous simulation model methods that take a mutable
|
/// Marker type for asynchronous simulation model methods that take a mutable
|
||||||
/// reference to the model and a message, without scheduler argument.
|
/// reference to the model and a message, without context argument.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct AsyncWithoutScheduler {}
|
pub struct AsyncWithoutContext {}
|
||||||
|
|
||||||
/// Marker type for asynchronous simulation model methods that take a mutable
|
/// Marker type for asynchronous simulation model methods that take a mutable
|
||||||
/// reference to the model, a message and an explicit scheduler argument.
|
/// reference to the model, a message and an explicit context argument.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct AsyncWithScheduler {}
|
pub struct AsyncWithContext {}
|
@ -2,8 +2,9 @@
|
|||||||
|
|
||||||
use std::future::{ready, Future, Ready};
|
use std::future::{ready, Future, Ready};
|
||||||
|
|
||||||
use crate::model::{markers, Model};
|
use crate::model::{Context, Model};
|
||||||
use crate::time::Scheduler;
|
|
||||||
|
use super::markers;
|
||||||
|
|
||||||
/// A function, method or closures that can be used as an *input port*.
|
/// A function, method or closures that can be used as an *input port*.
|
||||||
///
|
///
|
||||||
@ -13,9 +14,9 @@ use crate::time::Scheduler;
|
|||||||
///
|
///
|
||||||
/// ```ignore
|
/// ```ignore
|
||||||
/// FnOnce(&mut M, T)
|
/// FnOnce(&mut M, T)
|
||||||
/// FnOnce(&mut M, T, &Scheduler<M>)
|
/// FnOnce(&mut M, T, &Context<M>)
|
||||||
/// async fn(&mut M, T)
|
/// async fn(&mut M, T)
|
||||||
/// async fn(&mut M, T, &Scheduler<M>)
|
/// async fn(&mut M, T, &Context<M>)
|
||||||
/// where
|
/// where
|
||||||
/// M: Model
|
/// M: Model
|
||||||
/// ```
|
/// ```
|
||||||
@ -33,7 +34,7 @@ pub trait InputFn<'a, M: Model, T, S>: Send + 'static {
|
|||||||
type Future: Future<Output = ()> + Send + 'a;
|
type Future: Future<Output = ()> + Send + 'a;
|
||||||
|
|
||||||
/// Calls the method.
|
/// Calls the method.
|
||||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future;
|
fn call(self, model: &'a mut M, arg: T, context: &'a Context<M>) -> Self::Future;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, M, F> InputFn<'a, M, (), markers::WithoutArguments> for F
|
impl<'a, M, F> InputFn<'a, M, (), markers::WithoutArguments> for F
|
||||||
@ -43,36 +44,36 @@ where
|
|||||||
{
|
{
|
||||||
type Future = Ready<()>;
|
type Future = Ready<()>;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, _arg: (), _context: &'a Context<M>) -> Self::Future {
|
||||||
self(model);
|
self(model);
|
||||||
|
|
||||||
ready(())
|
ready(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithoutScheduler> for F
|
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithoutContext> for F
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: FnOnce(&'a mut M, T) + Send + 'static,
|
F: FnOnce(&'a mut M, T) + Send + 'static,
|
||||||
{
|
{
|
||||||
type Future = Ready<()>;
|
type Future = Ready<()>;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, arg: T, _context: &'a Context<M>) -> Self::Future {
|
||||||
self(model, arg);
|
self(model, arg);
|
||||||
|
|
||||||
ready(())
|
ready(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithScheduler> for F
|
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithContext> for F
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) + Send + 'static,
|
F: FnOnce(&'a mut M, T, &'a Context<M>) + Send + 'static,
|
||||||
{
|
{
|
||||||
type Future = Ready<()>;
|
type Future = Ready<()>;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, arg: T, context: &'a Context<M>) -> Self::Future {
|
||||||
self(model, arg, scheduler);
|
self(model, arg, context);
|
||||||
|
|
||||||
ready(())
|
ready(())
|
||||||
}
|
}
|
||||||
@ -86,12 +87,12 @@ where
|
|||||||
{
|
{
|
||||||
type Future = Fut;
|
type Future = Fut;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, _arg: (), _context: &'a Context<M>) -> Self::Future {
|
||||||
self(model)
|
self(model)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithoutScheduler> for F
|
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithoutContext> for F
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
Fut: Future<Output = ()> + Send + 'a,
|
Fut: Future<Output = ()> + Send + 'a,
|
||||||
@ -99,21 +100,21 @@ where
|
|||||||
{
|
{
|
||||||
type Future = Fut;
|
type Future = Fut;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, arg: T, _context: &'a Context<M>) -> Self::Future {
|
||||||
self(model, arg)
|
self(model, arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithScheduler> for F
|
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithContext> for F
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
Fut: Future<Output = ()> + Send + 'a,
|
Fut: Future<Output = ()> + Send + 'a,
|
||||||
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) -> Fut + Send + 'static,
|
F: FnOnce(&'a mut M, T, &'a Context<M>) -> Fut + Send + 'static,
|
||||||
{
|
{
|
||||||
type Future = Fut;
|
type Future = Fut;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, arg: T, context: &'a Context<M>) -> Self::Future {
|
||||||
self(model, arg, scheduler)
|
self(model, arg, context)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -125,7 +126,7 @@ where
|
|||||||
///
|
///
|
||||||
/// ```ignore
|
/// ```ignore
|
||||||
/// async fn(&mut M, T) -> R
|
/// async fn(&mut M, T) -> R
|
||||||
/// async fn(&mut M, T, &Scheduler<M>) -> R
|
/// async fn(&mut M, T, &Context<M>) -> R
|
||||||
/// where
|
/// where
|
||||||
/// M: Model
|
/// M: Model
|
||||||
/// ```
|
/// ```
|
||||||
@ -142,7 +143,7 @@ pub trait ReplierFn<'a, M: Model, T, R, S>: Send + 'static {
|
|||||||
type Future: Future<Output = R> + Send + 'a;
|
type Future: Future<Output = R> + Send + 'a;
|
||||||
|
|
||||||
/// Calls the method.
|
/// Calls the method.
|
||||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future;
|
fn call(self, model: &'a mut M, arg: T, context: &'a Context<M>) -> Self::Future;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, M, R, Fut, F> ReplierFn<'a, M, (), R, markers::AsyncWithoutArguments> for F
|
impl<'a, M, R, Fut, F> ReplierFn<'a, M, (), R, markers::AsyncWithoutArguments> for F
|
||||||
@ -153,12 +154,12 @@ where
|
|||||||
{
|
{
|
||||||
type Future = Fut;
|
type Future = Fut;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, _arg: (), _context: &'a Context<M>) -> Self::Future {
|
||||||
self(model)
|
self(model)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithoutScheduler> for F
|
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithoutContext> for F
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
Fut: Future<Output = R> + Send + 'a,
|
Fut: Future<Output = R> + Send + 'a,
|
||||||
@ -166,20 +167,20 @@ where
|
|||||||
{
|
{
|
||||||
type Future = Fut;
|
type Future = Fut;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, arg: T, _context: &'a Context<M>) -> Self::Future {
|
||||||
self(model, arg)
|
self(model, arg)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithScheduler> for F
|
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithContext> for F
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
Fut: Future<Output = R> + Send + 'a,
|
Fut: Future<Output = R> + Send + 'a,
|
||||||
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) -> Fut + Send + 'static,
|
F: FnOnce(&'a mut M, T, &'a Context<M>) -> Fut + Send + 'static,
|
||||||
{
|
{
|
||||||
type Future = Fut;
|
type Future = Fut;
|
||||||
|
|
||||||
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
|
fn call(self, model: &'a mut M, arg: T, context: &'a Context<M>) -> Self::Future {
|
||||||
self(model, arg, scheduler)
|
self(model, arg, context)
|
||||||
}
|
}
|
||||||
}
|
}
|
185
asynchronix/src/ports/output.rs
Normal file
185
asynchronix/src/ports/output.rs
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
mod broadcaster;
|
||||||
|
mod sender;
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
|
||||||
|
use crate::model::Model;
|
||||||
|
use crate::ports::{EventSink, LineError, LineId};
|
||||||
|
use crate::ports::{InputFn, ReplierFn};
|
||||||
|
use crate::simulation::Address;
|
||||||
|
use crate::util::cached_rw_lock::CachedRwLock;
|
||||||
|
|
||||||
|
use broadcaster::{EventBroadcaster, QueryBroadcaster};
|
||||||
|
|
||||||
|
use self::sender::{EventSinkSender, InputSender, ReplierSender};
|
||||||
|
|
||||||
|
/// An output port.
|
||||||
|
///
|
||||||
|
/// `Output` ports can be connected to input ports, i.e. to asynchronous model
|
||||||
|
/// methods that return no value. They broadcast events to all connected input
|
||||||
|
/// ports.
|
||||||
|
///
|
||||||
|
/// When an `Output` is cloned, the information on connected ports remains
|
||||||
|
/// shared and therefore all clones use and modify the same list of connected
|
||||||
|
/// ports.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Output<T: Clone + Send + 'static> {
|
||||||
|
broadcaster: CachedRwLock<EventBroadcaster<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static> Output<T> {
|
||||||
|
/// Creates a new, disconnected `Output` port.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a connection to an input port of the model specified by the
|
||||||
|
/// address.
|
||||||
|
///
|
||||||
|
/// The input port must be an asynchronous method of a model of type `M`
|
||||||
|
/// taking as argument a value of type `T` plus, optionally, a scheduler
|
||||||
|
/// reference.
|
||||||
|
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>) -> LineId
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let sender = Box::new(InputSender::new(input, address.into().0));
|
||||||
|
self.broadcaster.write().unwrap().add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a connection to an event sink such as an
|
||||||
|
/// [`EventSlot`](crate::ports::EventSlot) or
|
||||||
|
/// [`EventBuffer`](crate::ports::EventBuffer).
|
||||||
|
pub fn connect_sink<S: EventSink<T>>(&mut self, sink: &S) -> LineId {
|
||||||
|
let sender = Box::new(EventSinkSender::new(sink.writer()));
|
||||||
|
self.broadcaster.write().unwrap().add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the connection specified by the `LineId` parameter.
|
||||||
|
///
|
||||||
|
/// It is a logic error to specify a line identifier from another
|
||||||
|
/// [`Output`], [`Requestor`], [`EventSource`](crate::ports::EventSource) or
|
||||||
|
/// [`QuerySource`](crate::ports::QuerySource) instance and may result in
|
||||||
|
/// the disconnection of an arbitrary endpoint.
|
||||||
|
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
|
||||||
|
if self.broadcaster.write().unwrap().remove(line_id) {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(LineError {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all connections.
|
||||||
|
pub fn disconnect_all(&mut self) {
|
||||||
|
self.broadcaster.write().unwrap().clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts an event to all connected input ports.
|
||||||
|
pub async fn send(&mut self, arg: T) {
|
||||||
|
let broadcaster = self.broadcaster.write_scratchpad().unwrap();
|
||||||
|
broadcaster.broadcast(arg).await.unwrap();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static> Default for Output<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
broadcaster: CachedRwLock::new(EventBroadcaster::default()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static> fmt::Debug for Output<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Output ({} connected ports)",
|
||||||
|
self.broadcaster.read_unsync().len()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A requestor port.
|
||||||
|
///
|
||||||
|
/// `Requestor` ports can be connected to replier ports, i.e. to asynchronous
|
||||||
|
/// model methods that return a value. They broadcast queries to all connected
|
||||||
|
/// replier ports.
|
||||||
|
///
|
||||||
|
/// When a `Requestor` is cloned, the information on connected ports remains
|
||||||
|
/// shared and therefore all clones use and modify the same list of connected
|
||||||
|
/// ports.
|
||||||
|
pub struct Requestor<T: Clone + Send + 'static, R: Send + 'static> {
|
||||||
|
broadcaster: CachedRwLock<QueryBroadcaster<T, R>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static, R: Send + 'static> Requestor<T, R> {
|
||||||
|
/// Creates a new, disconnected `Requestor` port.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a connection to a replier port of the model specified by the
|
||||||
|
/// address.
|
||||||
|
///
|
||||||
|
/// The replier port must be an asynchronous method of a model of type `M`
|
||||||
|
/// returning a value of type `R` and taking as argument a value of type `T`
|
||||||
|
/// plus, optionally, a scheduler reference.
|
||||||
|
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>) -> LineId
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let sender = Box::new(ReplierSender::new(replier, address.into().0));
|
||||||
|
self.broadcaster.write().unwrap().add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the connection specified by the `LineId` parameter.
|
||||||
|
///
|
||||||
|
/// It is a logic error to specify a line identifier from another
|
||||||
|
/// [`Requestor`], [`Output`], [`EventSource`](crate::ports::EventSource) or
|
||||||
|
/// [`QuerySource`](crate::ports::QuerySource) instance and may result in
|
||||||
|
/// the disconnection of an arbitrary endpoint.
|
||||||
|
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
|
||||||
|
if self.broadcaster.write().unwrap().remove(line_id) {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(LineError {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all connections.
|
||||||
|
pub fn disconnect_all(&mut self) {
|
||||||
|
self.broadcaster.write().unwrap().clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts a query to all connected replier ports.
|
||||||
|
pub async fn send(&mut self, arg: T) -> impl Iterator<Item = R> + '_ {
|
||||||
|
self.broadcaster
|
||||||
|
.write_scratchpad()
|
||||||
|
.unwrap()
|
||||||
|
.broadcast(arg)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static, R: Send + 'static> Default for Requestor<T, R> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
broadcaster: CachedRwLock::new(QueryBroadcaster::default()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for Requestor<T, R> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Requestor ({} connected ports)",
|
||||||
|
self.broadcaster.read_unsync().len()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
@ -8,61 +8,50 @@ use recycle_box::{coerce_box, RecycleBox};
|
|||||||
|
|
||||||
use super::sender::{SendError, Sender};
|
use super::sender::{SendError, Sender};
|
||||||
use super::LineId;
|
use super::LineId;
|
||||||
use task_set::TaskSet;
|
use crate::util::task_set::TaskSet;
|
||||||
|
|
||||||
mod task_set;
|
|
||||||
|
|
||||||
/// An object that can efficiently broadcast messages to several addresses.
|
/// An object that can efficiently broadcast messages to several addresses.
|
||||||
///
|
///
|
||||||
|
/// This is very similar to `source::broadcaster::BroadcasterInner`, but
|
||||||
|
/// generates non-owned futures instead.
|
||||||
|
///
|
||||||
/// This object maintains a list of senders associated to each target address.
|
/// This object maintains a list of senders associated to each target address.
|
||||||
/// When a message is broadcasted, the sender futures are awaited in parallel.
|
/// When a message is broadcast, the sender futures are awaited in parallel.
|
||||||
/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate
|
/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate
|
||||||
/// does, but with some key differences:
|
/// does, but with some key differences:
|
||||||
///
|
///
|
||||||
/// - tasks and future storage are reusable to avoid repeated allocation, so
|
/// - tasks and future storage are reusable to avoid repeated allocation, so
|
||||||
/// allocation occurs only after a new sender is added,
|
/// allocation occurs only after a new sender is added,
|
||||||
/// - the outputs of all sender futures are returned all at once rather than
|
/// - the outputs of all sender futures are returned all at once rather than
|
||||||
/// with an asynchronous iterator (a.k.a. async stream); the implementation
|
/// with an asynchronous iterator (a.k.a. async stream).
|
||||||
/// exploits this behavior by waking the main broadcast future only when all
|
pub(super) struct BroadcasterInner<T: Clone, R> {
|
||||||
/// sender futures have been awaken, which strongly reduces overhead since
|
/// Line identifier for the next port to be connected.
|
||||||
/// waking a sender task does not actually schedule it on the executor.
|
next_line_id: u64,
|
||||||
pub(super) struct Broadcaster<T: Clone + 'static, R: 'static> {
|
|
||||||
/// The list of senders with their associated line identifier.
|
/// The list of senders with their associated line identifier.
|
||||||
senders: Vec<(LineId, Box<dyn Sender<T, R>>)>,
|
senders: Vec<(LineId, Box<dyn Sender<T, R>>)>,
|
||||||
/// Fields explicitly borrowed by the `BroadcastFuture`.
|
/// Fields explicitly borrowed by the `BroadcastFuture`.
|
||||||
shared: Shared<R>,
|
shared: Shared<R>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Clone + 'static> Broadcaster<T, ()> {
|
impl<T: Clone, R> BroadcasterInner<T, R> {
|
||||||
/// Broadcasts an event to all addresses.
|
|
||||||
pub(super) async fn broadcast_event(&mut self, arg: T) -> Result<(), BroadcastError> {
|
|
||||||
match self.senders.as_mut_slice() {
|
|
||||||
// No sender.
|
|
||||||
[] => Ok(()),
|
|
||||||
// One sender.
|
|
||||||
[sender] => sender.1.send(arg).await.map_err(|_| BroadcastError {}),
|
|
||||||
// Multiple senders.
|
|
||||||
_ => self.broadcast(arg).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Clone + 'static, R> Broadcaster<T, R> {
|
|
||||||
/// Adds a new sender associated to the specified identifier.
|
/// Adds a new sender associated to the specified identifier.
|
||||||
///
|
///
|
||||||
/// # Panics
|
/// # Panics
|
||||||
///
|
///
|
||||||
/// This method will panic if the total count of senders would reach
|
/// This method will panic if the total count of senders would reach
|
||||||
/// `u32::MAX - 1`.
|
/// `u32::MAX - 1`.
|
||||||
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>, id: LineId) {
|
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>) -> LineId {
|
||||||
self.senders.push((id, sender));
|
assert!(self.next_line_id != u64::MAX);
|
||||||
|
let line_id = LineId(self.next_line_id);
|
||||||
|
self.next_line_id += 1;
|
||||||
|
|
||||||
self.shared.futures_env.push(FutureEnv {
|
self.senders.push((line_id, sender));
|
||||||
storage: None,
|
|
||||||
output: None,
|
self.shared.futures_env.push(FutureEnv::default());
|
||||||
});
|
|
||||||
|
|
||||||
self.shared.task_set.resize(self.senders.len());
|
self.shared.task_set.resize(self.senders.len());
|
||||||
|
|
||||||
|
line_id
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes the first sender with the specified identifier, if any.
|
/// Removes the first sender with the specified identifier, if any.
|
||||||
@ -93,55 +82,25 @@ impl<T: Clone + 'static, R> Broadcaster<T, R> {
|
|||||||
self.senders.len()
|
self.senders.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Broadcasts a query to all addresses and collect all responses.
|
|
||||||
pub(super) async fn broadcast_query(
|
|
||||||
&mut self,
|
|
||||||
arg: T,
|
|
||||||
) -> Result<impl Iterator<Item = R> + '_, BroadcastError> {
|
|
||||||
match self.senders.as_mut_slice() {
|
|
||||||
// No sender.
|
|
||||||
[] => {}
|
|
||||||
// One sender.
|
|
||||||
[sender] => {
|
|
||||||
let output = sender.1.send(arg).await.map_err(|_| BroadcastError {})?;
|
|
||||||
self.shared.futures_env[0].output = Some(output);
|
|
||||||
}
|
|
||||||
// Multiple senders.
|
|
||||||
_ => self.broadcast(arg).await?,
|
|
||||||
};
|
|
||||||
|
|
||||||
// At this point all outputs should be available so `unwrap` can be
|
|
||||||
// called on the output of each future.
|
|
||||||
let outputs = self
|
|
||||||
.shared
|
|
||||||
.futures_env
|
|
||||||
.iter_mut()
|
|
||||||
.map(|t| t.output.take().unwrap());
|
|
||||||
|
|
||||||
Ok(outputs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Efficiently broadcasts a message or a query to multiple addresses.
|
/// Efficiently broadcasts a message or a query to multiple addresses.
|
||||||
///
|
///
|
||||||
/// This method does not collect the responses from queries.
|
/// This method does not collect the responses from queries.
|
||||||
fn broadcast(&mut self, arg: T) -> BroadcastFuture<'_, R> {
|
fn broadcast(&mut self, arg: T) -> BroadcastFuture<'_, R> {
|
||||||
let futures_count = self.senders.len();
|
|
||||||
let mut futures = recycle_vec(self.shared.storage.take().unwrap_or_default());
|
let mut futures = recycle_vec(self.shared.storage.take().unwrap_or_default());
|
||||||
|
|
||||||
// Broadcast the message and collect all futures.
|
// Broadcast the message and collect all futures.
|
||||||
for (i, (sender, futures_env)) in self
|
let mut iter = self
|
||||||
.senders
|
.senders
|
||||||
.iter_mut()
|
.iter_mut()
|
||||||
.zip(self.shared.futures_env.iter_mut())
|
.zip(self.shared.futures_env.iter_mut());
|
||||||
.enumerate()
|
while let Some((sender, futures_env)) = iter.next() {
|
||||||
{
|
|
||||||
let future_cache = futures_env
|
let future_cache = futures_env
|
||||||
.storage
|
.storage
|
||||||
.take()
|
.take()
|
||||||
.unwrap_or_else(|| RecycleBox::new(()));
|
.unwrap_or_else(|| RecycleBox::new(()));
|
||||||
|
|
||||||
// Move the argument rather than clone it for the last future.
|
// Move the argument rather than clone it for the last future.
|
||||||
if i + 1 == futures_count {
|
if iter.len() == 0 {
|
||||||
let future: RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + '_> =
|
let future: RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + '_> =
|
||||||
coerce_box!(RecycleBox::recycle(future_cache, sender.1.send(arg)));
|
coerce_box!(RecycleBox::recycle(future_cache, sender.1.send(arg)));
|
||||||
|
|
||||||
@ -161,13 +120,14 @@ impl<T: Clone + 'static, R> Broadcaster<T, R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: Clone + 'static, R> Default for Broadcaster<T, R> {
|
impl<T: Clone, R> Default for BroadcasterInner<T, R> {
|
||||||
/// Creates an empty `Broadcaster` object.
|
/// Creates an empty `Broadcaster` object.
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
let wake_sink = WakeSink::new();
|
let wake_sink = WakeSink::new();
|
||||||
let wake_src = wake_sink.source();
|
let wake_src = wake_sink.source();
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
|
next_line_id: 0,
|
||||||
senders: Vec::new(),
|
senders: Vec::new(),
|
||||||
shared: Shared {
|
shared: Shared {
|
||||||
wake_sink,
|
wake_sink,
|
||||||
@ -179,6 +139,164 @@ impl<T: Clone + 'static, R> Default for Broadcaster<T, R> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: Clone, R> Clone for BroadcasterInner<T, R> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
next_line_id: self.next_line_id,
|
||||||
|
senders: self.senders.clone(),
|
||||||
|
shared: self.shared.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can efficiently broadcast events to several input ports.
|
||||||
|
///
|
||||||
|
/// This is very similar to `source::broadcaster::EventBroadcaster`, but
|
||||||
|
/// generates non-owned futures instead.
|
||||||
|
///
|
||||||
|
/// See `BroadcasterInner` for implementation details.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(super) struct EventBroadcaster<T: Clone> {
|
||||||
|
/// The broadcaster core object.
|
||||||
|
inner: BroadcasterInner<T, ()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone> EventBroadcaster<T> {
|
||||||
|
/// Adds a new sender associated to the specified identifier.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This method will panic if the total count of senders would reach
|
||||||
|
/// `u32::MAX - 1`.
|
||||||
|
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, ()>>) -> LineId {
|
||||||
|
self.inner.add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the first sender with the specified identifier, if any.
|
||||||
|
///
|
||||||
|
/// Returns `true` if there was indeed a sender associated to the specified
|
||||||
|
/// identifier.
|
||||||
|
pub(super) fn remove(&mut self, id: LineId) -> bool {
|
||||||
|
self.inner.remove(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all senders.
|
||||||
|
pub(super) fn clear(&mut self) {
|
||||||
|
self.inner.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of connected senders.
|
||||||
|
pub(super) fn len(&self) -> usize {
|
||||||
|
self.inner.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts an event to all addresses.
|
||||||
|
pub(super) async fn broadcast(&mut self, arg: T) -> Result<(), BroadcastError> {
|
||||||
|
match self.inner.senders.as_mut_slice() {
|
||||||
|
// No sender.
|
||||||
|
[] => Ok(()),
|
||||||
|
// One sender.
|
||||||
|
[sender] => sender.1.send(arg).await.map_err(|_| BroadcastError {}),
|
||||||
|
// Multiple senders.
|
||||||
|
_ => self.inner.broadcast(arg).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone> Default for EventBroadcaster<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: BroadcasterInner::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can efficiently broadcast queries to several replier ports.
|
||||||
|
///
|
||||||
|
/// This is very similar to `source::broadcaster::QueryBroadcaster`, but
|
||||||
|
/// generates non-owned futures instead.
|
||||||
|
///
|
||||||
|
/// See `BroadcasterInner` for implementation details.
|
||||||
|
pub(super) struct QueryBroadcaster<T: Clone, R> {
|
||||||
|
/// The broadcaster core object.
|
||||||
|
inner: BroadcasterInner<T, R>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone, R> QueryBroadcaster<T, R> {
|
||||||
|
/// Adds a new sender associated to the specified identifier.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This method will panic if the total count of senders would reach
|
||||||
|
/// `u32::MAX - 1`.
|
||||||
|
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>) -> LineId {
|
||||||
|
self.inner.add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the first sender with the specified identifier, if any.
|
||||||
|
///
|
||||||
|
/// Returns `true` if there was indeed a sender associated to the specified
|
||||||
|
/// identifier.
|
||||||
|
pub(super) fn remove(&mut self, id: LineId) -> bool {
|
||||||
|
self.inner.remove(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all senders.
|
||||||
|
pub(super) fn clear(&mut self) {
|
||||||
|
self.inner.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of connected senders.
|
||||||
|
pub(super) fn len(&self) -> usize {
|
||||||
|
self.inner.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts a query to all addresses and collect all responses.
|
||||||
|
pub(super) async fn broadcast(
|
||||||
|
&mut self,
|
||||||
|
arg: T,
|
||||||
|
) -> Result<impl Iterator<Item = R> + '_, BroadcastError> {
|
||||||
|
match self.inner.senders.as_mut_slice() {
|
||||||
|
// No sender.
|
||||||
|
[] => {}
|
||||||
|
// One sender.
|
||||||
|
[sender] => {
|
||||||
|
let output = sender.1.send(arg).await.map_err(|_| BroadcastError {})?;
|
||||||
|
self.inner.shared.futures_env[0].output = Some(output);
|
||||||
|
}
|
||||||
|
// Multiple senders.
|
||||||
|
_ => self.inner.broadcast(arg).await?,
|
||||||
|
};
|
||||||
|
|
||||||
|
// At this point all outputs should be available so `unwrap` can be
|
||||||
|
// called on the output of each future.
|
||||||
|
let outputs = self
|
||||||
|
.inner
|
||||||
|
.shared
|
||||||
|
.futures_env
|
||||||
|
.iter_mut()
|
||||||
|
.map(|t| t.output.take().unwrap());
|
||||||
|
|
||||||
|
Ok(outputs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone, R> Default for QueryBroadcaster<T, R> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: BroadcasterInner::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone, R> Clone for QueryBroadcaster<T, R> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: self.inner.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Data related to a sender future.
|
/// Data related to a sender future.
|
||||||
struct FutureEnv<R> {
|
struct FutureEnv<R> {
|
||||||
/// Cached storage for the future.
|
/// Cached storage for the future.
|
||||||
@ -187,6 +305,15 @@ struct FutureEnv<R> {
|
|||||||
output: Option<R>,
|
output: Option<R>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<R> Default for FutureEnv<R> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
storage: None,
|
||||||
|
output: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A type-erased `Send` future wrapped in a `RecycleBox`.
|
/// A type-erased `Send` future wrapped in a `RecycleBox`.
|
||||||
type RecycleBoxFuture<'a, R> = RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + 'a>;
|
type RecycleBoxFuture<'a, R> = RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + 'a>;
|
||||||
|
|
||||||
@ -206,14 +333,29 @@ struct Shared<R> {
|
|||||||
storage: Option<Vec<Pin<RecycleBoxFuture<'static, R>>>>,
|
storage: Option<Vec<Pin<RecycleBoxFuture<'static, R>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<R> Clone for Shared<R> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
let wake_sink = WakeSink::new();
|
||||||
|
let wake_src = wake_sink.source();
|
||||||
|
|
||||||
|
let mut futures_env = Vec::new();
|
||||||
|
futures_env.resize_with(self.futures_env.len(), Default::default);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
wake_sink,
|
||||||
|
task_set: TaskSet::with_len(wake_src, self.task_set.len()),
|
||||||
|
futures_env,
|
||||||
|
storage: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// A future aggregating the outputs of a collection of sender futures.
|
/// A future aggregating the outputs of a collection of sender futures.
|
||||||
///
|
///
|
||||||
/// The idea is to join all sender futures as efficiently as possible, meaning:
|
/// The idea is to join all sender futures as efficiently as possible, meaning:
|
||||||
///
|
///
|
||||||
/// - the sender futures are polled simultaneously rather than waiting for their
|
/// - the sender futures are polled simultaneously rather than waiting for their
|
||||||
/// completion in a sequential manner,
|
/// completion in a sequential manner,
|
||||||
/// - this future is never woken if it can be proven that at least one of the
|
|
||||||
/// individual sender task will still be awaken,
|
|
||||||
/// - the storage allocated for the sender futures is always returned to the
|
/// - the storage allocated for the sender futures is always returned to the
|
||||||
/// `Broadcast` object so it can be reused by the next future,
|
/// `Broadcast` object so it can be reused by the next future,
|
||||||
/// - the happy path (all futures immediately ready) is very fast.
|
/// - the happy path (all futures immediately ready) is very fast.
|
||||||
@ -231,9 +373,9 @@ pub(super) struct BroadcastFuture<'a, R> {
|
|||||||
impl<'a, R> BroadcastFuture<'a, R> {
|
impl<'a, R> BroadcastFuture<'a, R> {
|
||||||
/// Creates a new `BroadcastFuture`.
|
/// Creates a new `BroadcastFuture`.
|
||||||
fn new(shared: &'a mut Shared<R>, futures: Vec<Pin<RecycleBoxFuture<'a, R>>>) -> Self {
|
fn new(shared: &'a mut Shared<R>, futures: Vec<Pin<RecycleBoxFuture<'a, R>>>) -> Self {
|
||||||
let futures_count = futures.len();
|
let pending_futures_count = futures.len();
|
||||||
|
|
||||||
assert!(shared.futures_env.len() == futures_count);
|
assert!(shared.futures_env.len() == pending_futures_count);
|
||||||
|
|
||||||
for futures_env in shared.futures_env.iter_mut() {
|
for futures_env in shared.futures_env.iter_mut() {
|
||||||
// Drop the previous output if necessary.
|
// Drop the previous output if necessary.
|
||||||
@ -244,7 +386,7 @@ impl<'a, R> BroadcastFuture<'a, R> {
|
|||||||
shared,
|
shared,
|
||||||
futures: ManuallyDrop::new(futures),
|
futures: ManuallyDrop::new(futures),
|
||||||
state: FutureState::Uninit,
|
state: FutureState::Uninit,
|
||||||
pending_futures_count: futures_count,
|
pending_futures_count,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -276,7 +418,10 @@ impl<'a, R> Future for BroadcastFuture<'a, R> {
|
|||||||
// Poll all sender futures once if this is the first time the broadcast
|
// Poll all sender futures once if this is the first time the broadcast
|
||||||
// future is polled.
|
// future is polled.
|
||||||
if this.state == FutureState::Uninit {
|
if this.state == FutureState::Uninit {
|
||||||
// Prevent spurious wake-ups.
|
// The task set is re-used for each broadcast, so it may have some
|
||||||
|
// task scheduled due to e.g. spurious wake-ups that were triggered
|
||||||
|
// after the previous broadcast was completed. Discarding scheduled
|
||||||
|
// tasks can prevent unnecessary wake-ups.
|
||||||
this.shared.task_set.discard_scheduled();
|
this.shared.task_set.discard_scheduled();
|
||||||
|
|
||||||
for task_idx in 0..this.futures.len() {
|
for task_idx in 0..this.futures.len() {
|
||||||
@ -311,20 +456,22 @@ impl<'a, R> Future for BroadcastFuture<'a, R> {
|
|||||||
// Repeatedly poll the futures of all scheduled tasks until there are no
|
// Repeatedly poll the futures of all scheduled tasks until there are no
|
||||||
// more scheduled tasks.
|
// more scheduled tasks.
|
||||||
loop {
|
loop {
|
||||||
// Only register the waker if it is probable that we won't find any
|
// No need to register the waker if some tasks have been scheduled.
|
||||||
// scheduled task.
|
|
||||||
if !this.shared.task_set.has_scheduled() {
|
if !this.shared.task_set.has_scheduled() {
|
||||||
this.shared.wake_sink.register(cx.waker());
|
this.shared.wake_sink.register(cx.waker());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve the indices of the scheduled tasks if any. If there are
|
// Retrieve the indices of the scheduled tasks if any. If there are
|
||||||
// no scheduled tasks, `Poll::Pending` is returned and this future
|
// no scheduled tasks, `Poll::Pending` is returned and this future
|
||||||
// will be awaken again when enough tasks have been scheduled.
|
// will be awaken again when enough tasks have been awaken.
|
||||||
let scheduled_tasks = match this
|
//
|
||||||
.shared
|
// NOTE: the current implementation requires a notification to be
|
||||||
.task_set
|
// sent each time a sub-future has made progress. We may try at some
|
||||||
.steal_scheduled(this.pending_futures_count)
|
// point to benchmark an alternative strategy where a notification
|
||||||
{
|
// is requested only when all pending sub-futures have made progress,
|
||||||
|
// using `take_scheduled(this.pending_futures_count)`. This would
|
||||||
|
// reduce the cost of context switch but could hurt latency.
|
||||||
|
let scheduled_tasks = match this.shared.task_set.take_scheduled(1) {
|
||||||
Some(st) => st,
|
Some(st) => st,
|
||||||
None => return Poll::Pending,
|
None => return Poll::Pending,
|
||||||
};
|
};
|
||||||
@ -403,18 +550,20 @@ fn recycle_vec<T, U>(mut v: Vec<T>) -> Vec<U> {
|
|||||||
#[cfg(all(test, not(asynchronix_loom)))]
|
#[cfg(all(test, not(asynchronix_loom)))]
|
||||||
mod tests {
|
mod tests {
|
||||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
use futures_executor::block_on;
|
use futures_executor::block_on;
|
||||||
|
|
||||||
use crate::channel::Receiver;
|
use crate::channel::Receiver;
|
||||||
use crate::time::Scheduler;
|
use crate::model::Context;
|
||||||
use crate::time::{MonotonicTime, TearableAtomicTime};
|
use crate::time::{MonotonicTime, TearableAtomicTime};
|
||||||
use crate::util::priority_queue::PriorityQueue;
|
use crate::util::priority_queue::PriorityQueue;
|
||||||
use crate::util::sync_cell::SyncCell;
|
use crate::util::sync_cell::SyncCell;
|
||||||
|
|
||||||
use super::super::*;
|
use super::super::sender::{InputSender, ReplierSender};
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use crate::model::Model;
|
||||||
|
|
||||||
struct Counter {
|
struct Counter {
|
||||||
inner: Arc<AtomicUsize>,
|
inner: Arc<AtomicUsize>,
|
||||||
@ -438,18 +587,18 @@ mod tests {
|
|||||||
const N_RECV: usize = 4;
|
const N_RECV: usize = 4;
|
||||||
|
|
||||||
let mut mailboxes = Vec::new();
|
let mut mailboxes = Vec::new();
|
||||||
let mut broadcaster = Broadcaster::default();
|
let mut broadcaster = EventBroadcaster::default();
|
||||||
for id in 0..N_RECV {
|
for _ in 0..N_RECV {
|
||||||
let mailbox = Receiver::new(10);
|
let mailbox = Receiver::new(10);
|
||||||
let address = mailbox.sender();
|
let address = mailbox.sender();
|
||||||
let sender = Box::new(EventSender::new(Counter::inc, address));
|
let sender = Box::new(InputSender::new(Counter::inc, address));
|
||||||
|
|
||||||
broadcaster.add(sender, LineId(id as u64));
|
broadcaster.add(sender);
|
||||||
mailboxes.push(mailbox);
|
mailboxes.push(mailbox);
|
||||||
}
|
}
|
||||||
|
|
||||||
let th_broadcast = thread::spawn(move || {
|
let th_broadcast = thread::spawn(move || {
|
||||||
block_on(broadcaster.broadcast_event(1)).unwrap();
|
block_on(broadcaster.broadcast(1)).unwrap();
|
||||||
});
|
});
|
||||||
|
|
||||||
let counter = Arc::new(AtomicUsize::new(0));
|
let counter = Arc::new(AtomicUsize::new(0));
|
||||||
@ -465,9 +614,13 @@ mod tests {
|
|||||||
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
|
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
|
||||||
let dummy_time =
|
let dummy_time =
|
||||||
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
|
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
|
||||||
let dummy_scheduler =
|
let dummy_context = Context::new(
|
||||||
Scheduler::new(dummy_address, dummy_priority_queue, dummy_time);
|
String::new(),
|
||||||
block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap();
|
dummy_address,
|
||||||
|
dummy_priority_queue,
|
||||||
|
dummy_time,
|
||||||
|
);
|
||||||
|
block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap();
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@ -486,18 +639,18 @@ mod tests {
|
|||||||
const N_RECV: usize = 4;
|
const N_RECV: usize = 4;
|
||||||
|
|
||||||
let mut mailboxes = Vec::new();
|
let mut mailboxes = Vec::new();
|
||||||
let mut broadcaster = Broadcaster::default();
|
let mut broadcaster = QueryBroadcaster::default();
|
||||||
for id in 0..N_RECV {
|
for _ in 0..N_RECV {
|
||||||
let mailbox = Receiver::new(10);
|
let mailbox = Receiver::new(10);
|
||||||
let address = mailbox.sender();
|
let address = mailbox.sender();
|
||||||
let sender = Box::new(QuerySender::new(Counter::fetch_inc, address));
|
let sender = Box::new(ReplierSender::new(Counter::fetch_inc, address));
|
||||||
|
|
||||||
broadcaster.add(sender, LineId(id as u64));
|
broadcaster.add(sender);
|
||||||
mailboxes.push(mailbox);
|
mailboxes.push(mailbox);
|
||||||
}
|
}
|
||||||
|
|
||||||
let th_broadcast = thread::spawn(move || {
|
let th_broadcast = thread::spawn(move || {
|
||||||
let iter = block_on(broadcaster.broadcast_query(1)).unwrap();
|
let iter = block_on(broadcaster.broadcast(1)).unwrap();
|
||||||
let sum = iter.fold(0, |acc, val| acc + val);
|
let sum = iter.fold(0, |acc, val| acc + val);
|
||||||
|
|
||||||
assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)}
|
assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)}
|
||||||
@ -516,9 +669,13 @@ mod tests {
|
|||||||
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
|
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
|
||||||
let dummy_time =
|
let dummy_time =
|
||||||
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
|
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
|
||||||
let dummy_scheduler =
|
let dummy_context = Context::new(
|
||||||
Scheduler::new(dummy_address, dummy_priority_queue, dummy_time);
|
String::new(),
|
||||||
block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap();
|
dummy_address,
|
||||||
|
dummy_priority_queue,
|
||||||
|
dummy_time,
|
||||||
|
);
|
||||||
|
block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap();
|
||||||
thread::sleep(std::time::Duration::from_millis(100));
|
thread::sleep(std::time::Duration::from_millis(100));
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -566,6 +723,12 @@ mod tests {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<R> Clone for TestEvent<R> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// An object that can wake a `TestEvent`.
|
// An object that can wake a `TestEvent`.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct TestEventWaker<R> {
|
struct TestEventWaker<R> {
|
||||||
@ -606,12 +769,12 @@ mod tests {
|
|||||||
let (test_event2, waker2) = test_event::<usize>();
|
let (test_event2, waker2) = test_event::<usize>();
|
||||||
let (test_event3, waker3) = test_event::<usize>();
|
let (test_event3, waker3) = test_event::<usize>();
|
||||||
|
|
||||||
let mut broadcaster = Broadcaster::default();
|
let mut broadcaster = QueryBroadcaster::default();
|
||||||
broadcaster.add(Box::new(test_event1), LineId(1));
|
broadcaster.add(Box::new(test_event1));
|
||||||
broadcaster.add(Box::new(test_event2), LineId(2));
|
broadcaster.add(Box::new(test_event2));
|
||||||
broadcaster.add(Box::new(test_event3), LineId(3));
|
broadcaster.add(Box::new(test_event3));
|
||||||
|
|
||||||
let mut fut = Box::pin(broadcaster.broadcast_query(()));
|
let mut fut = Box::pin(broadcaster.broadcast(()));
|
||||||
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
|
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
|
||||||
let is_scheduled_waker = is_scheduled.clone();
|
let is_scheduled_waker = is_scheduled.clone();
|
||||||
|
|
||||||
@ -626,7 +789,6 @@ mod tests {
|
|||||||
let th2 = thread::spawn(move || waker2.wake_final(7));
|
let th2 = thread::spawn(move || waker2.wake_final(7));
|
||||||
let th3 = thread::spawn(move || waker3.wake_final(42));
|
let th3 = thread::spawn(move || waker3.wake_final(42));
|
||||||
|
|
||||||
let mut schedule_count = 0;
|
|
||||||
loop {
|
loop {
|
||||||
match fut.as_mut().poll(&mut cx) {
|
match fut.as_mut().poll(&mut cx) {
|
||||||
Poll::Ready(Ok(mut res)) => {
|
Poll::Ready(Ok(mut res)) => {
|
||||||
@ -645,8 +807,6 @@ mod tests {
|
|||||||
if !is_scheduled.swap(false, Ordering::Acquire) {
|
if !is_scheduled.swap(false, Ordering::Acquire) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
schedule_count += 1;
|
|
||||||
assert!(schedule_count <= 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
th1.join().unwrap();
|
th1.join().unwrap();
|
||||||
@ -681,11 +841,11 @@ mod tests {
|
|||||||
let (test_event1, waker1) = test_event::<usize>();
|
let (test_event1, waker1) = test_event::<usize>();
|
||||||
let (test_event2, waker2) = test_event::<usize>();
|
let (test_event2, waker2) = test_event::<usize>();
|
||||||
|
|
||||||
let mut broadcaster = Broadcaster::default();
|
let mut broadcaster = QueryBroadcaster::default();
|
||||||
broadcaster.add(Box::new(test_event1), LineId(1));
|
broadcaster.add(Box::new(test_event1));
|
||||||
broadcaster.add(Box::new(test_event2), LineId(2));
|
broadcaster.add(Box::new(test_event2));
|
||||||
|
|
||||||
let mut fut = Box::pin(broadcaster.broadcast_query(()));
|
let mut fut = Box::pin(broadcaster.broadcast(()));
|
||||||
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
|
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
|
||||||
let is_scheduled_waker = is_scheduled.clone();
|
let is_scheduled_waker = is_scheduled.clone();
|
||||||
|
|
||||||
@ -701,7 +861,6 @@ mod tests {
|
|||||||
let th2 = thread::spawn(move || waker2.wake_final(7));
|
let th2 = thread::spawn(move || waker2.wake_final(7));
|
||||||
let th_spurious = thread::spawn(move || spurious_waker.wake_spurious());
|
let th_spurious = thread::spawn(move || spurious_waker.wake_spurious());
|
||||||
|
|
||||||
let mut schedule_count = 0;
|
|
||||||
loop {
|
loop {
|
||||||
match fut.as_mut().poll(&mut cx) {
|
match fut.as_mut().poll(&mut cx) {
|
||||||
Poll::Ready(Ok(mut res)) => {
|
Poll::Ready(Ok(mut res)) => {
|
||||||
@ -719,8 +878,6 @@ mod tests {
|
|||||||
if !is_scheduled.swap(false, Ordering::Acquire) {
|
if !is_scheduled.swap(false, Ordering::Acquire) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
schedule_count += 1;
|
|
||||||
assert!(schedule_count <= 2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
th1.join().unwrap();
|
th1.join().unwrap();
|
@ -4,22 +4,31 @@ use std::future::Future;
|
|||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::mem::ManuallyDrop;
|
use std::mem::ManuallyDrop;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
|
use dyn_clone::DynClone;
|
||||||
use recycle_box::{coerce_box, RecycleBox};
|
use recycle_box::{coerce_box, RecycleBox};
|
||||||
|
|
||||||
use crate::channel;
|
use crate::channel;
|
||||||
use crate::model::{InputFn, Model, ReplierFn};
|
use crate::model::Model;
|
||||||
use crate::util::spsc_queue;
|
use crate::ports::{EventSinkWriter, InputFn, ReplierFn};
|
||||||
|
|
||||||
/// Abstraction over `EventSender` and `QuerySender`.
|
/// An event or query sender abstracting over the target model and input or
|
||||||
pub(super) trait Sender<T, R>: Send {
|
/// replier method.
|
||||||
|
pub(super) trait Sender<T, R>: DynClone + Send {
|
||||||
|
/// Asynchronously send the event or request.
|
||||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>>;
|
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An object that can send a payload to a model.
|
dyn_clone::clone_trait_object!(<T, R> Sender<T, R>);
|
||||||
pub(super) struct EventSender<M: 'static, F, T, S> {
|
|
||||||
|
/// An object that can send events to an input port.
|
||||||
|
pub(super) struct InputSender<M: 'static, F, T, S>
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
|
T: Send + 'static,
|
||||||
|
{
|
||||||
func: F,
|
func: F,
|
||||||
sender: channel::Sender<M>,
|
sender: channel::Sender<M>,
|
||||||
fut_storage: Option<RecycleBox<()>>,
|
fut_storage: Option<RecycleBox<()>>,
|
||||||
@ -27,7 +36,7 @@ pub(super) struct EventSender<M: 'static, F, T, S> {
|
|||||||
_phantom_closure_marker: PhantomData<S>,
|
_phantom_closure_marker: PhantomData<S>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M: Send, F, T, S> EventSender<M, F, T, S>
|
impl<M: Send, F, T, S> InputSender<M, F, T, S>
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
@ -44,15 +53,15 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M: Send, F, T, S> Sender<T, ()> for EventSender<M, F, T, S>
|
impl<M: Send, F, T, S> Sender<T, ()> for InputSender<M, F, T, S>
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Copy,
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
T: Send + 'static,
|
T: Send + 'static,
|
||||||
S: Send,
|
S: Send + 'static,
|
||||||
{
|
{
|
||||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
|
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
|
||||||
let func = self.func;
|
let func = self.func.clone();
|
||||||
|
|
||||||
let fut = self.sender.send(move |model, scheduler, recycle_box| {
|
let fut = self.sender.send(move |model, scheduler, recycle_box| {
|
||||||
let fut = func.call(model, arg, scheduler);
|
let fut = func.call(model, arg, scheduler);
|
||||||
@ -66,8 +75,26 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An object that can send a payload to a model and retrieve a response.
|
impl<M: Send, F, T, S> Clone for InputSender<M, F, T, S>
|
||||||
pub(super) struct QuerySender<M: 'static, F, T, R, S> {
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
|
T: Send + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
func: self.func.clone(),
|
||||||
|
sender: self.sender.clone(),
|
||||||
|
fut_storage: None,
|
||||||
|
_phantom_closure: PhantomData,
|
||||||
|
_phantom_closure_marker: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can send a request to a replier port and retrieve a response.
|
||||||
|
pub(super) struct ReplierSender<M: 'static, F, T, R, S> {
|
||||||
func: F,
|
func: F,
|
||||||
sender: channel::Sender<M>,
|
sender: channel::Sender<M>,
|
||||||
receiver: multishot::Receiver<R>,
|
receiver: multishot::Receiver<R>,
|
||||||
@ -76,7 +103,7 @@ pub(super) struct QuerySender<M: 'static, F, T, R, S> {
|
|||||||
_phantom_closure_marker: PhantomData<S>,
|
_phantom_closure_marker: PhantomData<S>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M, F, T, R, S> QuerySender<M, F, T, R, S>
|
impl<M, F, T, R, S> ReplierSender<M, F, T, R, S>
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: for<'a> ReplierFn<'a, M, T, R, S>,
|
F: for<'a> ReplierFn<'a, M, T, R, S>,
|
||||||
@ -95,16 +122,16 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<M, F, T, R, S> Sender<T, R> for QuerySender<M, F, T, R, S>
|
impl<M, F, T, R, S> Sender<T, R> for ReplierSender<M, F, T, R, S>
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: for<'a> ReplierFn<'a, M, T, R, S> + Copy,
|
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
|
||||||
T: Send + 'static,
|
T: Send + 'static,
|
||||||
R: Send + 'static,
|
R: Send + 'static,
|
||||||
S: Send,
|
S: Send,
|
||||||
{
|
{
|
||||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>> {
|
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>> {
|
||||||
let func = self.func;
|
let func = self.func.clone();
|
||||||
let sender = &mut self.sender;
|
let sender = &mut self.sender;
|
||||||
let reply_receiver = &mut self.receiver;
|
let reply_receiver = &mut self.receiver;
|
||||||
let fut_storage = &mut self.fut_storage;
|
let fut_storage = &mut self.fut_storage;
|
||||||
@ -134,67 +161,75 @@ where
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An object that can send a payload to an unbounded queue.
|
impl<M, F, T, R, S> Clone for ReplierSender<M, F, T, R, S>
|
||||||
pub(super) struct EventStreamSender<T> {
|
where
|
||||||
producer: spsc_queue::Producer<T>,
|
M: Model,
|
||||||
fut_storage: Option<RecycleBox<()>>,
|
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
|
||||||
}
|
T: Send + 'static,
|
||||||
|
R: Send + 'static,
|
||||||
impl<T> EventStreamSender<T> {
|
S: Send,
|
||||||
pub(super) fn new(producer: spsc_queue::Producer<T>) -> Self {
|
{
|
||||||
|
fn clone(&self) -> Self {
|
||||||
Self {
|
Self {
|
||||||
producer,
|
func: self.func.clone(),
|
||||||
|
sender: self.sender.clone(),
|
||||||
|
receiver: multishot::Receiver::new(),
|
||||||
fut_storage: None,
|
fut_storage: None,
|
||||||
|
_phantom_closure: PhantomData,
|
||||||
|
_phantom_closure_marker: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Sender<T, ()> for EventStreamSender<T>
|
/// An object that can send a payload to an event sink.
|
||||||
where
|
pub(super) struct EventSinkSender<T: Send + 'static, W: EventSinkWriter<T>> {
|
||||||
T: Send + 'static,
|
writer: W,
|
||||||
{
|
|
||||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
|
|
||||||
let producer = &mut self.producer;
|
|
||||||
|
|
||||||
RecycledFuture::new(&mut self.fut_storage, async move {
|
|
||||||
producer.push(arg).map_err(|_| SendError {})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An object that can send a payload to a mutex-protected slot.
|
|
||||||
pub(super) struct EventSlotSender<T> {
|
|
||||||
slot: Arc<Mutex<Option<T>>>,
|
|
||||||
fut_storage: Option<RecycleBox<()>>,
|
fut_storage: Option<RecycleBox<()>>,
|
||||||
|
_phantom_event: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> EventSlotSender<T> {
|
impl<T: Send + 'static, W: EventSinkWriter<T>> EventSinkSender<T, W> {
|
||||||
pub(super) fn new(slot: Arc<Mutex<Option<T>>>) -> Self {
|
pub(super) fn new(writer: W) -> Self {
|
||||||
Self {
|
Self {
|
||||||
slot,
|
writer,
|
||||||
fut_storage: None,
|
fut_storage: None,
|
||||||
|
_phantom_event: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Sender<T, ()> for EventSlotSender<T>
|
impl<T, W> Sender<T, ()> for EventSinkSender<T, W>
|
||||||
where
|
where
|
||||||
T: Send + 'static,
|
T: Send + 'static,
|
||||||
|
W: EventSinkWriter<T>,
|
||||||
{
|
{
|
||||||
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
|
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
|
||||||
let slot = &*self.slot;
|
let writer = &mut self.writer;
|
||||||
|
|
||||||
RecycledFuture::new(&mut self.fut_storage, async move {
|
RecycledFuture::new(&mut self.fut_storage, async move {
|
||||||
let mut slot = slot.lock().unwrap();
|
writer.write(arg);
|
||||||
*slot = Some(arg);
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
impl<T, W> Clone for EventSinkSender<T, W>
|
||||||
|
where
|
||||||
|
T: Send + 'static,
|
||||||
|
W: EventSinkWriter<T>,
|
||||||
|
{
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
writer: self.writer.clone(),
|
||||||
|
fut_storage: None,
|
||||||
|
_phantom_event: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Error returned when the mailbox was closed or dropped.
|
/// Error returned when the mailbox was closed or dropped.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||||
pub(super) struct SendError {}
|
pub(super) struct SendError {}
|
||||||
|
|
||||||
impl fmt::Display for SendError {
|
impl fmt::Display for SendError {
|
57
asynchronix/src/ports/sink.rs
Normal file
57
asynchronix/src/ports/sink.rs
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
pub(crate) mod event_buffer;
|
||||||
|
pub(crate) mod event_slot;
|
||||||
|
|
||||||
|
/// A simulation endpoint that can receive events sent by model outputs.
|
||||||
|
///
|
||||||
|
/// An `EventSink` can be thought of as a self-standing input meant to
|
||||||
|
/// externally monitor the simulated system.
|
||||||
|
pub trait EventSink<T> {
|
||||||
|
/// Writer handle to an event sink.
|
||||||
|
type Writer: EventSinkWriter<T>;
|
||||||
|
|
||||||
|
/// Returns the writer handle associated to this sink.
|
||||||
|
fn writer(&self) -> Self::Writer;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A writer handle to an event sink.
|
||||||
|
pub trait EventSinkWriter<T>: Clone + Send + Sync + 'static {
|
||||||
|
/// Writes a value to the associated sink.
|
||||||
|
fn write(&self, event: T);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An iterator over collected events with the ability to pause and resume event
|
||||||
|
/// collection.
|
||||||
|
///
|
||||||
|
/// An `EventSinkStream` will typically be implemented on an `EventSink` for
|
||||||
|
/// which it will constitute a draining iterator.
|
||||||
|
pub trait EventSinkStream: Iterator {
|
||||||
|
/// Starts or resumes the collection of new events.
|
||||||
|
fn open(&mut self);
|
||||||
|
|
||||||
|
/// Pauses the collection of new events.
|
||||||
|
///
|
||||||
|
/// Events that were previously in the stream remain available.
|
||||||
|
fn close(&mut self);
|
||||||
|
|
||||||
|
/// This is a stop-gap method that serves the exact same purpose as
|
||||||
|
/// `Iterator::try_fold` but is specialized for `Result` rather than the
|
||||||
|
/// `Try` trait so it can be implemented on stable Rust.
|
||||||
|
///
|
||||||
|
/// It makes it possible to provide a faster implementation when the event
|
||||||
|
/// sink stream can be iterated over more rapidly than by repeatably calling
|
||||||
|
/// `Iterator::next`, for instance if the implementation of the stream
|
||||||
|
/// relies on a mutex that must be locked on each call.
|
||||||
|
///
|
||||||
|
/// It is not publicly implementable because it may be removed at any time
|
||||||
|
/// once the `Try` trait is stabilized, without regard for backward
|
||||||
|
/// compatibility.
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[allow(private_interfaces)]
|
||||||
|
fn __try_fold<B, F, E>(&mut self, init: B, f: F) -> Result<B, E>
|
||||||
|
where
|
||||||
|
Self: Sized,
|
||||||
|
F: FnMut(B, Self::Item) -> Result<B, E>,
|
||||||
|
{
|
||||||
|
Iterator::try_fold(self, init, f)
|
||||||
|
}
|
||||||
|
}
|
148
asynchronix/src/ports/sink/event_buffer.rs
Normal file
148
asynchronix/src/ports/sink/event_buffer.rs
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
use std::collections::VecDeque;
|
||||||
|
use std::fmt;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
|
use super::{EventSink, EventSinkStream, EventSinkWriter};
|
||||||
|
|
||||||
|
/// The shared data of an `EventBuffer`.
|
||||||
|
struct Inner<T> {
|
||||||
|
capacity: usize,
|
||||||
|
is_open: AtomicBool,
|
||||||
|
buffer: Mutex<VecDeque<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An [`EventSink`] and [`EventSinkStream`] with a bounded size.
|
||||||
|
///
|
||||||
|
/// If the maximum capacity is exceeded, older events are overwritten. Events
|
||||||
|
/// are returned in first-in-first-out order. Note that even if the iterator
|
||||||
|
/// returns `None`, it may still produce more items in the future (in other
|
||||||
|
/// words, it is not a [`FusedIterator`](std::iter::FusedIterator)).
|
||||||
|
pub struct EventBuffer<T> {
|
||||||
|
inner: Arc<Inner<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> EventBuffer<T> {
|
||||||
|
/// Default capacity when constructed with `new`.
|
||||||
|
pub const DEFAULT_CAPACITY: usize = 16;
|
||||||
|
|
||||||
|
/// Creates an open `EventBuffer` with the default capacity.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::with_capacity(Self::DEFAULT_CAPACITY)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a closed `EventBuffer` with the default capacity.
|
||||||
|
pub fn new_closed() -> Self {
|
||||||
|
Self::with_capacity_closed(Self::DEFAULT_CAPACITY)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an open `EventBuffer` with the specified capacity.
|
||||||
|
pub fn with_capacity(capacity: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(Inner {
|
||||||
|
capacity,
|
||||||
|
is_open: AtomicBool::new(true),
|
||||||
|
buffer: Mutex::new(VecDeque::new()),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a closed `EventBuffer` with the specified capacity.
|
||||||
|
pub fn with_capacity_closed(capacity: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(Inner {
|
||||||
|
capacity,
|
||||||
|
is_open: AtomicBool::new(false),
|
||||||
|
buffer: Mutex::new(VecDeque::new()),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Send + 'static> EventSink<T> for EventBuffer<T> {
|
||||||
|
type Writer = EventBufferWriter<T>;
|
||||||
|
|
||||||
|
fn writer(&self) -> Self::Writer {
|
||||||
|
EventBufferWriter {
|
||||||
|
inner: self.inner.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Iterator for EventBuffer<T> {
|
||||||
|
type Item = T;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
self.inner.buffer.lock().unwrap().pop_front()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Send + 'static> EventSinkStream for EventBuffer<T> {
|
||||||
|
fn open(&mut self) {
|
||||||
|
self.inner.is_open.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn close(&mut self) {
|
||||||
|
self.inner.is_open.store(false, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[doc(hidden)]
|
||||||
|
#[allow(private_interfaces)]
|
||||||
|
fn __try_fold<B, F, E>(&mut self, init: B, f: F) -> Result<B, E>
|
||||||
|
where
|
||||||
|
Self: Sized,
|
||||||
|
F: FnMut(B, Self::Item) -> Result<B, E>,
|
||||||
|
{
|
||||||
|
let mut inner = self.inner.buffer.lock().unwrap();
|
||||||
|
let mut drain = inner.drain(..);
|
||||||
|
|
||||||
|
drain.try_fold(init, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Default for EventBuffer<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> fmt::Debug for EventBuffer<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
f.debug_struct("EventBuffer").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A producer handle of an `EventStream`.
|
||||||
|
pub struct EventBufferWriter<T> {
|
||||||
|
inner: Arc<Inner<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Send + 'static> EventSinkWriter<T> for EventBufferWriter<T> {
|
||||||
|
/// Pushes an event onto the queue.
|
||||||
|
fn write(&self, event: T) {
|
||||||
|
if !self.inner.is_open.load(Ordering::Relaxed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut buffer = self.inner.buffer.lock().unwrap();
|
||||||
|
if buffer.len() == self.inner.capacity {
|
||||||
|
buffer.pop_front();
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer.push_back(event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Clone for EventBufferWriter<T> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: self.inner.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> fmt::Debug for EventBufferWriter<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
f.debug_struct("EventBufferWriter").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
128
asynchronix/src/ports/sink/event_slot.rs
Normal file
128
asynchronix/src/ports/sink/event_slot.rs
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
use std::fmt;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex, TryLockError, TryLockResult};
|
||||||
|
|
||||||
|
use super::{EventSink, EventSinkStream, EventSinkWriter};
|
||||||
|
|
||||||
|
/// The shared data of an `EventBuffer`.
|
||||||
|
struct Inner<T> {
|
||||||
|
is_open: AtomicBool,
|
||||||
|
slot: Mutex<Option<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An [`EventSink`] and [`EventSinkStream`] that only keeps the last event.
|
||||||
|
///
|
||||||
|
/// Once the value is read, the iterator will return `None` until a new value is
|
||||||
|
/// received. If the slot contains a value when a new value is received, the
|
||||||
|
/// previous value is overwritten.
|
||||||
|
pub struct EventSlot<T> {
|
||||||
|
inner: Arc<Inner<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> EventSlot<T> {
|
||||||
|
/// Creates an open `EventSlot`.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(Inner {
|
||||||
|
is_open: AtomicBool::new(true),
|
||||||
|
slot: Mutex::new(None),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a closed `EventSlot`.
|
||||||
|
pub fn new_closed() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: Arc::new(Inner {
|
||||||
|
is_open: AtomicBool::new(false),
|
||||||
|
slot: Mutex::new(None),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Send + 'static> EventSink<T> for EventSlot<T> {
|
||||||
|
type Writer = EventSlotWriter<T>;
|
||||||
|
|
||||||
|
/// Returns a writer handle.
|
||||||
|
fn writer(&self) -> EventSlotWriter<T> {
|
||||||
|
EventSlotWriter {
|
||||||
|
inner: self.inner.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Iterator for EventSlot<T> {
|
||||||
|
type Item = T;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
match self.inner.slot.try_lock() {
|
||||||
|
TryLockResult::Ok(mut v) => v.take(),
|
||||||
|
TryLockResult::Err(TryLockError::WouldBlock) => None,
|
||||||
|
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Send + 'static> EventSinkStream for EventSlot<T> {
|
||||||
|
fn open(&mut self) {
|
||||||
|
self.inner.is_open.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
fn close(&mut self) {
|
||||||
|
self.inner.is_open.store(false, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Default for EventSlot<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> fmt::Debug for EventSlot<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
f.debug_struct("EventSlot").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A writer handle of an `EventSlot`.
|
||||||
|
pub struct EventSlotWriter<T> {
|
||||||
|
inner: Arc<Inner<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Send + 'static> EventSinkWriter<T> for EventSlotWriter<T> {
|
||||||
|
/// Write an event into the slot.
|
||||||
|
fn write(&self, event: T) {
|
||||||
|
// Ignore if the sink is closed.
|
||||||
|
if !self.inner.is_open.load(Ordering::Relaxed) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Why do we just use `try_lock` and abandon if the lock is taken? The
|
||||||
|
// reason is that (i) the reader is never supposed to access the slot
|
||||||
|
// when the simulation runs and (ii) as a rule the simulator does not
|
||||||
|
// warrant fairness when concurrently writing to an input. Therefore, if
|
||||||
|
// the mutex is already locked when this writer attempts to lock it, it
|
||||||
|
// means another writer is concurrently writing an event, and that event
|
||||||
|
// is just as legitimate as ours so there is not need to overwrite it.
|
||||||
|
match self.inner.slot.try_lock() {
|
||||||
|
TryLockResult::Ok(mut v) => *v = Some(event),
|
||||||
|
TryLockResult::Err(TryLockError::WouldBlock) => {}
|
||||||
|
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Clone for EventSlotWriter<T> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: self.inner.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> fmt::Debug for EventSlotWriter<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
f.debug_struct("EventStreamWriter").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
280
asynchronix/src/ports/source.rs
Normal file
280
asynchronix/src/ports/source.rs
Normal file
@ -0,0 +1,280 @@
|
|||||||
|
mod broadcaster;
|
||||||
|
mod sender;
|
||||||
|
|
||||||
|
use std::fmt;
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use crate::model::Model;
|
||||||
|
use crate::ports::InputFn;
|
||||||
|
use crate::ports::{LineError, LineId};
|
||||||
|
use crate::simulation::{
|
||||||
|
Action, ActionKey, Address, KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction,
|
||||||
|
};
|
||||||
|
use crate::util::slot;
|
||||||
|
|
||||||
|
use broadcaster::ReplyIterator;
|
||||||
|
use broadcaster::{EventBroadcaster, QueryBroadcaster};
|
||||||
|
use sender::{InputSender, ReplierSender};
|
||||||
|
|
||||||
|
use super::ReplierFn;
|
||||||
|
|
||||||
|
/// An event source port.
|
||||||
|
///
|
||||||
|
/// The `EventSource` port is similar to an [`Output`](crate::ports::Output)
|
||||||
|
/// port in that it can send events to connected input ports. It is not meant,
|
||||||
|
/// however, to be instantiated as a member of a model, but rather as a
|
||||||
|
/// simulation monitoring endpoint instantiated during bench assembly.
|
||||||
|
pub struct EventSource<T: Clone + Send + 'static> {
|
||||||
|
broadcaster: Arc<Mutex<EventBroadcaster<T>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static> EventSource<T> {
|
||||||
|
/// Creates a new, disconnected `EventSource` port.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a connection to an input port of the model specified by the
|
||||||
|
/// address.
|
||||||
|
///
|
||||||
|
/// The input port must be an asynchronous method of a model of type `M`
|
||||||
|
/// taking as argument a value of type `T` plus, optionally, a scheduler
|
||||||
|
/// reference.
|
||||||
|
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>) -> LineId
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let sender = Box::new(InputSender::new(input, address.into().0));
|
||||||
|
self.broadcaster.lock().unwrap().add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the connection specified by the `LineId` parameter.
|
||||||
|
///
|
||||||
|
/// It is a logic error to specify a line identifier from another
|
||||||
|
/// [`EventSource`], [`QuerySource`], [`Output`](crate::ports::Output) or
|
||||||
|
/// [`Requestor`](crate::ports::Requestor) instance and may result in the
|
||||||
|
/// disconnection of an arbitrary endpoint.
|
||||||
|
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
|
||||||
|
if self.broadcaster.lock().unwrap().remove(line_id) {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(LineError {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all connections.
|
||||||
|
pub fn disconnect_all(&mut self) {
|
||||||
|
self.broadcaster.lock().unwrap().clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an action which, when processed, broadcasts an event to all
|
||||||
|
/// connected input ports.
|
||||||
|
///
|
||||||
|
/// Note that the action broadcasts the event to those models that are
|
||||||
|
/// connected to the event source at the time the action is processed.
|
||||||
|
pub fn event(&mut self, arg: T) -> Action {
|
||||||
|
let fut = self.broadcaster.lock().unwrap().broadcast(arg);
|
||||||
|
let fut = async {
|
||||||
|
fut.await.unwrap();
|
||||||
|
};
|
||||||
|
|
||||||
|
Action::new(OnceAction::new(fut))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a cancellable action and a cancellation key; when processed, the
|
||||||
|
/// action broadcasts an event to all connected input ports.
|
||||||
|
///
|
||||||
|
/// Note that the action broadcasts the event to those models that are
|
||||||
|
/// connected to the event source at the time the action is processed.
|
||||||
|
pub fn keyed_event(&mut self, arg: T) -> (Action, ActionKey) {
|
||||||
|
let action_key = ActionKey::new();
|
||||||
|
let fut = self.broadcaster.lock().unwrap().broadcast(arg);
|
||||||
|
|
||||||
|
let action = Action::new(KeyedOnceAction::new(
|
||||||
|
// Cancellation is ignored once the action is already spawned on the
|
||||||
|
// executor. This means the action cannot be cancelled while the
|
||||||
|
// simulation is running, but since an event source is meant to be
|
||||||
|
// used outside the simulator, this shouldn't be an issue in
|
||||||
|
// practice.
|
||||||
|
|_| async {
|
||||||
|
fut.await.unwrap();
|
||||||
|
},
|
||||||
|
action_key.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
(action, action_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a periodically recurring action which, when processed,
|
||||||
|
/// broadcasts an event to all connected input ports.
|
||||||
|
///
|
||||||
|
/// Note that the action broadcasts the event to those models that are
|
||||||
|
/// connected to the event source at the time the action is processed.
|
||||||
|
pub fn periodic_event(&mut self, period: Duration, arg: T) -> Action {
|
||||||
|
let broadcaster = self.broadcaster.clone();
|
||||||
|
|
||||||
|
Action::new(PeriodicAction::new(
|
||||||
|
|| async move {
|
||||||
|
let fut = broadcaster.lock().unwrap().broadcast(arg);
|
||||||
|
fut.await.unwrap();
|
||||||
|
},
|
||||||
|
period,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a cancellable, periodically recurring action and a cancellation
|
||||||
|
/// key; when processed, the action broadcasts an event to all connected
|
||||||
|
/// input ports.
|
||||||
|
///
|
||||||
|
/// Note that the action broadcasts the event to those models that are
|
||||||
|
/// connected to the event source at the time the action is processed.
|
||||||
|
pub fn keyed_periodic_event(&mut self, period: Duration, arg: T) -> (Action, ActionKey) {
|
||||||
|
let action_key = ActionKey::new();
|
||||||
|
let broadcaster = self.broadcaster.clone();
|
||||||
|
|
||||||
|
let action = Action::new(KeyedPeriodicAction::new(
|
||||||
|
// Cancellation is ignored once the action is already spawned on the
|
||||||
|
// executor. This means the action cannot be cancelled while the
|
||||||
|
// simulation is running, but since an event source is meant to be
|
||||||
|
// used outside the simulator, this shouldn't be an issue in
|
||||||
|
// practice.
|
||||||
|
|_| async move {
|
||||||
|
let fut = broadcaster.lock().unwrap().broadcast(arg);
|
||||||
|
fut.await.unwrap();
|
||||||
|
},
|
||||||
|
period,
|
||||||
|
action_key.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
(action, action_key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static> Default for EventSource<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
broadcaster: Arc::new(Mutex::new(EventBroadcaster::default())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static> fmt::Debug for EventSource<T> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Event source ({} connected ports)",
|
||||||
|
self.broadcaster.lock().unwrap().len()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A request source port.
|
||||||
|
///
|
||||||
|
/// The `QuerySource` port is similar to an
|
||||||
|
/// [`Requestor`](crate::ports::Requestor) port in that it can send events to
|
||||||
|
/// connected input ports. It is not meant, however, to be instantiated as a
|
||||||
|
/// member of a model, but rather as a simulation monitoring endpoint
|
||||||
|
/// instantiated during bench assembly.
|
||||||
|
pub struct QuerySource<T: Clone + Send + 'static, R: Send + 'static> {
|
||||||
|
broadcaster: Arc<Mutex<QueryBroadcaster<T, R>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static, R: Send + 'static> QuerySource<T, R> {
|
||||||
|
/// Creates a new, disconnected `EventSource` port.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a connection to a replier port of the model specified by the
|
||||||
|
/// address.
|
||||||
|
///
|
||||||
|
/// The replier port must be an asynchronous method of a model of type `M`
|
||||||
|
/// returning a value of type `R` and taking as argument a value of type `T`
|
||||||
|
/// plus, optionally, a scheduler reference.
|
||||||
|
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>) -> LineId
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let sender = Box::new(ReplierSender::new(replier, address.into().0));
|
||||||
|
self.broadcaster.lock().unwrap().add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the connection specified by the `LineId` parameter.
|
||||||
|
///
|
||||||
|
/// It is a logic error to specify a line identifier from another
|
||||||
|
/// [`QuerySource`], [`EventSource`], [`Output`](crate::ports::Output) or
|
||||||
|
/// [`Requestor`](crate::ports::Requestor) instance and may result in the
|
||||||
|
/// disconnection of an arbitrary endpoint.
|
||||||
|
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
|
||||||
|
if self.broadcaster.lock().unwrap().remove(line_id) {
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(LineError {})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all connections.
|
||||||
|
pub fn disconnect_all(&mut self) {
|
||||||
|
self.broadcaster.lock().unwrap().clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an action which, when processed, broadcasts a query to all
|
||||||
|
/// connected replier ports.
|
||||||
|
///
|
||||||
|
/// Note that the action broadcasts the query to those models that are
|
||||||
|
/// connected to the query source at the time the action is processed.
|
||||||
|
pub fn query(&mut self, arg: T) -> (Action, ReplyReceiver<R>) {
|
||||||
|
let (writer, reader) = slot::slot();
|
||||||
|
let fut = self.broadcaster.lock().unwrap().broadcast(arg);
|
||||||
|
let fut = async move {
|
||||||
|
let replies = fut.await.unwrap();
|
||||||
|
let _ = writer.write(replies);
|
||||||
|
};
|
||||||
|
|
||||||
|
let action = Action::new(OnceAction::new(fut));
|
||||||
|
|
||||||
|
(action, ReplyReceiver::<R>(reader))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static, R: Send + 'static> Default for QuerySource<T, R> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
broadcaster: Arc::new(Mutex::new(QueryBroadcaster::default())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for QuerySource<T, R> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Query source ({} connected ports)",
|
||||||
|
self.broadcaster.lock().unwrap().len()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A receiver for all replies collected from a single query broadcast.
|
||||||
|
pub struct ReplyReceiver<R>(slot::SlotReader<ReplyIterator<R>>);
|
||||||
|
|
||||||
|
impl<R> ReplyReceiver<R> {
|
||||||
|
/// Returns all replies to a query.
|
||||||
|
///
|
||||||
|
/// Returns `None` if the replies are not yet available or if they were
|
||||||
|
/// already taken in a previous call to `take`.
|
||||||
|
pub fn take(&mut self) -> Option<impl Iterator<Item = R>> {
|
||||||
|
self.0.try_read().ok()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R> fmt::Debug for ReplyReceiver<R> {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(f, "Replies")
|
||||||
|
}
|
||||||
|
}
|
776
asynchronix/src/ports/source/broadcaster.rs
Normal file
776
asynchronix/src/ports/source/broadcaster.rs
Normal file
@ -0,0 +1,776 @@
|
|||||||
|
use std::future::Future;
|
||||||
|
use std::mem;
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
use std::vec;
|
||||||
|
|
||||||
|
use pin_project_lite::pin_project;
|
||||||
|
|
||||||
|
use diatomic_waker::WakeSink;
|
||||||
|
|
||||||
|
use super::sender::{Sender, SenderFuture};
|
||||||
|
|
||||||
|
use crate::ports::LineId;
|
||||||
|
use crate::util::task_set::TaskSet;
|
||||||
|
|
||||||
|
/// An object that can efficiently broadcast messages to several addresses.
|
||||||
|
///
|
||||||
|
/// This is very similar to `output::broadcaster::BroadcasterInner`, but
|
||||||
|
/// generates owned futures instead.
|
||||||
|
///
|
||||||
|
/// This object maintains a list of senders associated to each target address.
|
||||||
|
/// When a message is broadcast, the sender futures are awaited in parallel.
|
||||||
|
/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate
|
||||||
|
/// does, but the outputs of all sender futures are returned all at once rather
|
||||||
|
/// than with an asynchronous iterator (a.k.a. async stream).
|
||||||
|
pub(super) struct BroadcasterInner<T: Clone, R> {
|
||||||
|
/// Line identifier for the next port to be connected.
|
||||||
|
next_line_id: u64,
|
||||||
|
/// The list of senders with their associated line identifier.
|
||||||
|
senders: Vec<(LineId, Box<dyn Sender<T, R>>)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone, R> BroadcasterInner<T, R> {
|
||||||
|
/// Adds a new sender associated to the specified identifier.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This method will panic if the total count of senders would reach
|
||||||
|
/// `u32::MAX - 1`.
|
||||||
|
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>) -> LineId {
|
||||||
|
assert!(self.next_line_id != u64::MAX);
|
||||||
|
let line_id = LineId(self.next_line_id);
|
||||||
|
self.next_line_id += 1;
|
||||||
|
|
||||||
|
self.senders.push((line_id, sender));
|
||||||
|
|
||||||
|
line_id
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the first sender with the specified identifier, if any.
|
||||||
|
///
|
||||||
|
/// Returns `true` if there was indeed a sender associated to the specified
|
||||||
|
/// identifier.
|
||||||
|
pub(super) fn remove(&mut self, id: LineId) -> bool {
|
||||||
|
if let Some(pos) = self.senders.iter().position(|s| s.0 == id) {
|
||||||
|
self.senders.swap_remove(pos);
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all senders.
|
||||||
|
pub(super) fn clear(&mut self) {
|
||||||
|
self.senders.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of connected senders.
|
||||||
|
pub(super) fn len(&self) -> usize {
|
||||||
|
self.senders.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Efficiently broadcasts a message or a query to multiple addresses.
|
||||||
|
///
|
||||||
|
/// This method does not collect the responses from queries.
|
||||||
|
fn broadcast(&mut self, arg: T) -> BroadcastFuture<R> {
|
||||||
|
let mut future_states = Vec::with_capacity(self.senders.len());
|
||||||
|
|
||||||
|
// Broadcast the message and collect all futures.
|
||||||
|
let mut iter = self.senders.iter_mut();
|
||||||
|
while let Some(sender) = iter.next() {
|
||||||
|
// Move the argument rather than clone it for the last future.
|
||||||
|
if iter.len() == 0 {
|
||||||
|
future_states.push(SenderFutureState::Pending(sender.1.send(arg)));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
future_states.push(SenderFutureState::Pending(sender.1.send(arg.clone())));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate the global future.
|
||||||
|
BroadcastFuture::new(future_states)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone, R> Default for BroadcasterInner<T, R> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
next_line_id: 0,
|
||||||
|
senders: Vec::new(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can efficiently broadcast events to several input ports.
|
||||||
|
///
|
||||||
|
/// This is very similar to `output::broadcaster::EventBroadcaster`, but
|
||||||
|
/// generates owned futures instead.
|
||||||
|
///
|
||||||
|
/// See `BroadcasterInner` for implementation details.
|
||||||
|
pub(super) struct EventBroadcaster<T: Clone> {
|
||||||
|
/// The broadcaster core object.
|
||||||
|
inner: BroadcasterInner<T, ()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send> EventBroadcaster<T> {
|
||||||
|
/// Adds a new sender associated to the specified identifier.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This method will panic if the total count of senders would reach
|
||||||
|
/// `u32::MAX - 1`.
|
||||||
|
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, ()>>) -> LineId {
|
||||||
|
self.inner.add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the first sender with the specified identifier, if any.
|
||||||
|
///
|
||||||
|
/// Returns `true` if there was indeed a sender associated to the specified
|
||||||
|
/// identifier.
|
||||||
|
pub(super) fn remove(&mut self, id: LineId) -> bool {
|
||||||
|
self.inner.remove(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all senders.
|
||||||
|
pub(super) fn clear(&mut self) {
|
||||||
|
self.inner.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of connected senders.
|
||||||
|
pub(super) fn len(&self) -> usize {
|
||||||
|
self.inner.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts an event to all addresses.
|
||||||
|
pub(super) fn broadcast(
|
||||||
|
&mut self,
|
||||||
|
arg: T,
|
||||||
|
) -> impl Future<Output = Result<(), BroadcastError>> + Send {
|
||||||
|
enum Fut<F1, F2> {
|
||||||
|
Empty,
|
||||||
|
Single(F1),
|
||||||
|
Multiple(F2),
|
||||||
|
}
|
||||||
|
|
||||||
|
let fut = match self.inner.senders.as_mut_slice() {
|
||||||
|
// No sender.
|
||||||
|
[] => Fut::Empty,
|
||||||
|
// One sender.
|
||||||
|
[sender] => Fut::Single(sender.1.send(arg)),
|
||||||
|
// Multiple senders.
|
||||||
|
_ => Fut::Multiple(self.inner.broadcast(arg)),
|
||||||
|
};
|
||||||
|
|
||||||
|
async {
|
||||||
|
match fut {
|
||||||
|
Fut::Empty => Ok(()),
|
||||||
|
Fut::Single(fut) => fut.await.map_err(|_| BroadcastError {}),
|
||||||
|
Fut::Multiple(fut) => fut.await.map(|_| ()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone> Default for EventBroadcaster<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: BroadcasterInner::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can efficiently broadcast queries to several replier ports.
|
||||||
|
///
|
||||||
|
/// This is very similar to `output::broadcaster::QueryBroadcaster`, but
|
||||||
|
/// generates owned futures instead.
|
||||||
|
///
|
||||||
|
/// See `BroadcasterInner` for implementation details.
|
||||||
|
pub(super) struct QueryBroadcaster<T: Clone, R> {
|
||||||
|
/// The broadcaster core object.
|
||||||
|
inner: BroadcasterInner<T, R>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + Send, R: Send> QueryBroadcaster<T, R> {
|
||||||
|
/// Adds a new sender associated to the specified identifier.
|
||||||
|
///
|
||||||
|
/// # Panics
|
||||||
|
///
|
||||||
|
/// This method will panic if the total count of senders would reach
|
||||||
|
/// `u32::MAX - 1`.
|
||||||
|
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>) -> LineId {
|
||||||
|
self.inner.add(sender)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the first sender with the specified identifier, if any.
|
||||||
|
///
|
||||||
|
/// Returns `true` if there was indeed a sender associated to the specified
|
||||||
|
/// identifier.
|
||||||
|
pub(super) fn remove(&mut self, id: LineId) -> bool {
|
||||||
|
self.inner.remove(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes all senders.
|
||||||
|
pub(super) fn clear(&mut self) {
|
||||||
|
self.inner.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of connected senders.
|
||||||
|
pub(super) fn len(&self) -> usize {
|
||||||
|
self.inner.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts an event to all addresses.
|
||||||
|
pub(super) fn broadcast(
|
||||||
|
&mut self,
|
||||||
|
arg: T,
|
||||||
|
) -> impl Future<Output = Result<ReplyIterator<R>, BroadcastError>> + Send {
|
||||||
|
enum Fut<F1, F2> {
|
||||||
|
Empty,
|
||||||
|
Single(F1),
|
||||||
|
Multiple(F2),
|
||||||
|
}
|
||||||
|
|
||||||
|
let fut = match self.inner.senders.as_mut_slice() {
|
||||||
|
// No sender.
|
||||||
|
[] => Fut::Empty,
|
||||||
|
// One sender.
|
||||||
|
[sender] => Fut::Single(sender.1.send(arg)),
|
||||||
|
// Multiple senders.
|
||||||
|
_ => Fut::Multiple(self.inner.broadcast(arg)),
|
||||||
|
};
|
||||||
|
|
||||||
|
async {
|
||||||
|
match fut {
|
||||||
|
Fut::Empty => Ok(ReplyIterator(Vec::new().into_iter())),
|
||||||
|
Fut::Single(fut) => fut
|
||||||
|
.await
|
||||||
|
.map(|reply| ReplyIterator(vec![SenderFutureState::Ready(reply)].into_iter()))
|
||||||
|
.map_err(|_| BroadcastError {}),
|
||||||
|
Fut::Multiple(fut) => fut.await.map_err(|_| BroadcastError {}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone, R> Default for QueryBroadcaster<T, R> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
inner: BroadcasterInner::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pin_project! {
|
||||||
|
/// A future aggregating the outputs of a collection of sender futures.
|
||||||
|
///
|
||||||
|
/// The idea is to join all sender futures as efficiently as possible, meaning:
|
||||||
|
///
|
||||||
|
/// - the sender futures are polled simultaneously rather than waiting for their
|
||||||
|
/// completion in a sequential manner,
|
||||||
|
/// - the happy path (all futures immediately ready) is very fast.
|
||||||
|
pub(super) struct BroadcastFuture<R> {
|
||||||
|
// Thread-safe waker handle.
|
||||||
|
wake_sink: WakeSink,
|
||||||
|
// Tasks associated to the sender futures.
|
||||||
|
task_set: TaskSet,
|
||||||
|
// List of all sender futures or their outputs.
|
||||||
|
future_states: Vec<SenderFutureState<R>>,
|
||||||
|
// The total count of futures that have not yet been polled to completion.
|
||||||
|
pending_futures_count: usize,
|
||||||
|
// State of completion of the future.
|
||||||
|
state: FutureState,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R> BroadcastFuture<R> {
|
||||||
|
/// Creates a new `BroadcastFuture`.
|
||||||
|
fn new(future_states: Vec<SenderFutureState<R>>) -> Self {
|
||||||
|
let wake_sink = WakeSink::new();
|
||||||
|
let wake_src = wake_sink.source();
|
||||||
|
let pending_futures_count = future_states.len();
|
||||||
|
|
||||||
|
BroadcastFuture {
|
||||||
|
wake_sink,
|
||||||
|
task_set: TaskSet::with_len(wake_src, pending_futures_count),
|
||||||
|
future_states,
|
||||||
|
pending_futures_count,
|
||||||
|
state: FutureState::Uninit,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R> Future for BroadcastFuture<R> {
|
||||||
|
type Output = Result<ReplyIterator<R>, BroadcastError>;
|
||||||
|
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||||
|
let this = &mut *self;
|
||||||
|
|
||||||
|
assert_ne!(this.state, FutureState::Completed);
|
||||||
|
|
||||||
|
// Poll all sender futures once if this is the first time the broadcast
|
||||||
|
// future is polled.
|
||||||
|
if this.state == FutureState::Uninit {
|
||||||
|
for task_idx in 0..this.future_states.len() {
|
||||||
|
if let SenderFutureState::Pending(future) = &mut this.future_states[task_idx] {
|
||||||
|
let task_waker_ref = this.task_set.waker_of(task_idx);
|
||||||
|
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
|
||||||
|
|
||||||
|
match future.as_mut().poll(task_cx_ref) {
|
||||||
|
Poll::Ready(Ok(output)) => {
|
||||||
|
this.future_states[task_idx] = SenderFutureState::Ready(output);
|
||||||
|
this.pending_futures_count -= 1;
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(_)) => {
|
||||||
|
this.state = FutureState::Completed;
|
||||||
|
|
||||||
|
return Poll::Ready(Err(BroadcastError {}));
|
||||||
|
}
|
||||||
|
Poll::Pending => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if this.pending_futures_count == 0 {
|
||||||
|
this.state = FutureState::Completed;
|
||||||
|
let outputs = mem::take(&mut this.future_states).into_iter();
|
||||||
|
|
||||||
|
return Poll::Ready(Ok(ReplyIterator(outputs)));
|
||||||
|
}
|
||||||
|
|
||||||
|
this.state = FutureState::Pending;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Repeatedly poll the futures of all scheduled tasks until there are no
|
||||||
|
// more scheduled tasks.
|
||||||
|
loop {
|
||||||
|
// No need to register the waker if some tasks have been scheduled.
|
||||||
|
if !this.task_set.has_scheduled() {
|
||||||
|
this.wake_sink.register(cx.waker());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the indices of the scheduled tasks if any. If there are
|
||||||
|
// no scheduled tasks, `Poll::Pending` is returned and this future
|
||||||
|
// will be awaken again when enough tasks have been scheduled.
|
||||||
|
//
|
||||||
|
// NOTE: the current implementation requires a notification to be
|
||||||
|
// sent each time a sub-future has made progress. We may try at some
|
||||||
|
// point to benchmark an alternative strategy where a notification
|
||||||
|
// is requested only when all pending sub-futures have made progress,
|
||||||
|
// using `take_scheduled(this.pending_futures_count)`. This would
|
||||||
|
// reduce the cost of context switch but could hurt latency.
|
||||||
|
let scheduled_tasks = match this.task_set.take_scheduled(1) {
|
||||||
|
Some(st) => st,
|
||||||
|
None => return Poll::Pending,
|
||||||
|
};
|
||||||
|
|
||||||
|
for task_idx in scheduled_tasks {
|
||||||
|
if let SenderFutureState::Pending(future) = &mut this.future_states[task_idx] {
|
||||||
|
let task_waker_ref = this.task_set.waker_of(task_idx);
|
||||||
|
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
|
||||||
|
|
||||||
|
match future.as_mut().poll(task_cx_ref) {
|
||||||
|
Poll::Ready(Ok(output)) => {
|
||||||
|
this.future_states[task_idx] = SenderFutureState::Ready(output);
|
||||||
|
this.pending_futures_count -= 1;
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(_)) => {
|
||||||
|
this.state = FutureState::Completed;
|
||||||
|
|
||||||
|
return Poll::Ready(Err(BroadcastError {}));
|
||||||
|
}
|
||||||
|
Poll::Pending => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if this.pending_futures_count == 0 {
|
||||||
|
this.state = FutureState::Completed;
|
||||||
|
let outputs = mem::take(&mut this.future_states).into_iter();
|
||||||
|
|
||||||
|
return Poll::Ready(Ok(ReplyIterator(outputs)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Error returned when a message could not be delivered.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(super) struct BroadcastError {}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
enum FutureState {
|
||||||
|
Uninit,
|
||||||
|
Pending,
|
||||||
|
Completed,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The state of a `SenderFuture`.
|
||||||
|
enum SenderFutureState<R> {
|
||||||
|
Pending(SenderFuture<R>),
|
||||||
|
Ready(R),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An iterator over the replies to a broadcasted request.
|
||||||
|
pub(crate) struct ReplyIterator<R>(vec::IntoIter<SenderFutureState<R>>);
|
||||||
|
|
||||||
|
impl<R> Iterator for ReplyIterator<R> {
|
||||||
|
type Item = R;
|
||||||
|
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
self.0.next().map(|state| match state {
|
||||||
|
SenderFutureState::Ready(reply) => reply,
|
||||||
|
_ => panic!("reply missing in replies iterator"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn size_hint(&self) -> (usize, Option<usize>) {
|
||||||
|
self.0.size_hint()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, not(asynchronix_loom)))]
|
||||||
|
mod tests {
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::thread;
|
||||||
|
|
||||||
|
use futures_executor::block_on;
|
||||||
|
|
||||||
|
use crate::channel::Receiver;
|
||||||
|
use crate::model::Context;
|
||||||
|
use crate::time::{MonotonicTime, TearableAtomicTime};
|
||||||
|
use crate::util::priority_queue::PriorityQueue;
|
||||||
|
use crate::util::sync_cell::SyncCell;
|
||||||
|
|
||||||
|
use super::super::sender::{InputSender, ReplierSender};
|
||||||
|
use super::*;
|
||||||
|
use crate::model::Model;
|
||||||
|
|
||||||
|
struct Counter {
|
||||||
|
inner: Arc<AtomicUsize>,
|
||||||
|
}
|
||||||
|
impl Counter {
|
||||||
|
fn new(counter: Arc<AtomicUsize>) -> Self {
|
||||||
|
Self { inner: counter }
|
||||||
|
}
|
||||||
|
async fn inc(&mut self, by: usize) {
|
||||||
|
self.inner.fetch_add(by, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
async fn fetch_inc(&mut self, by: usize) -> usize {
|
||||||
|
let res = self.inner.fetch_add(by, Ordering::Relaxed);
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Model for Counter {}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn broadcast_event_smoke() {
|
||||||
|
const N_RECV: usize = 4;
|
||||||
|
|
||||||
|
let mut mailboxes = Vec::new();
|
||||||
|
let mut broadcaster = EventBroadcaster::default();
|
||||||
|
for _ in 0..N_RECV {
|
||||||
|
let mailbox = Receiver::new(10);
|
||||||
|
let address = mailbox.sender();
|
||||||
|
let sender = Box::new(InputSender::new(Counter::inc, address));
|
||||||
|
|
||||||
|
broadcaster.add(sender);
|
||||||
|
mailboxes.push(mailbox);
|
||||||
|
}
|
||||||
|
|
||||||
|
let th_broadcast = thread::spawn(move || {
|
||||||
|
block_on(broadcaster.broadcast(1)).unwrap();
|
||||||
|
});
|
||||||
|
|
||||||
|
let counter = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
|
let th_recv: Vec<_> = mailboxes
|
||||||
|
.into_iter()
|
||||||
|
.map(|mut mailbox| {
|
||||||
|
thread::spawn({
|
||||||
|
let mut counter = Counter::new(counter.clone());
|
||||||
|
|
||||||
|
move || {
|
||||||
|
let dummy_address = Receiver::new(1).sender();
|
||||||
|
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
|
||||||
|
let dummy_time =
|
||||||
|
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
|
||||||
|
let dummy_context = Context::new(
|
||||||
|
String::new(),
|
||||||
|
dummy_address,
|
||||||
|
dummy_priority_queue,
|
||||||
|
dummy_time,
|
||||||
|
);
|
||||||
|
block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap();
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
th_broadcast.join().unwrap();
|
||||||
|
for th in th_recv {
|
||||||
|
th.join().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(counter.load(Ordering::Relaxed), N_RECV);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn broadcast_query_smoke() {
|
||||||
|
const N_RECV: usize = 4;
|
||||||
|
|
||||||
|
let mut mailboxes = Vec::new();
|
||||||
|
let mut broadcaster = QueryBroadcaster::default();
|
||||||
|
for _ in 0..N_RECV {
|
||||||
|
let mailbox = Receiver::new(10);
|
||||||
|
let address = mailbox.sender();
|
||||||
|
let sender = Box::new(ReplierSender::new(Counter::fetch_inc, address));
|
||||||
|
|
||||||
|
broadcaster.add(sender);
|
||||||
|
mailboxes.push(mailbox);
|
||||||
|
}
|
||||||
|
|
||||||
|
let th_broadcast = thread::spawn(move || {
|
||||||
|
let iter = block_on(broadcaster.broadcast(1)).unwrap();
|
||||||
|
let sum = iter.fold(0, |acc, val| acc + val);
|
||||||
|
|
||||||
|
assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)}
|
||||||
|
});
|
||||||
|
|
||||||
|
let counter = Arc::new(AtomicUsize::new(0));
|
||||||
|
|
||||||
|
let th_recv: Vec<_> = mailboxes
|
||||||
|
.into_iter()
|
||||||
|
.map(|mut mailbox| {
|
||||||
|
thread::spawn({
|
||||||
|
let mut counter = Counter::new(counter.clone());
|
||||||
|
|
||||||
|
move || {
|
||||||
|
let dummy_address = Receiver::new(1).sender();
|
||||||
|
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
|
||||||
|
let dummy_time =
|
||||||
|
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
|
||||||
|
let dummy_context = Context::new(
|
||||||
|
String::new(),
|
||||||
|
dummy_address,
|
||||||
|
dummy_priority_queue,
|
||||||
|
dummy_time,
|
||||||
|
);
|
||||||
|
block_on(mailbox.recv(&mut counter, &dummy_context)).unwrap();
|
||||||
|
thread::sleep(std::time::Duration::from_millis(100));
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
th_broadcast.join().unwrap();
|
||||||
|
for th in th_recv {
|
||||||
|
th.join().unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(counter.load(Ordering::Relaxed), N_RECV);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, asynchronix_loom))]
|
||||||
|
mod tests {
|
||||||
|
use futures_channel::mpsc;
|
||||||
|
use futures_util::StreamExt;
|
||||||
|
|
||||||
|
use loom::model::Builder;
|
||||||
|
use loom::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use loom::thread;
|
||||||
|
|
||||||
|
use waker_fn::waker_fn;
|
||||||
|
|
||||||
|
use super::super::sender::SendError;
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
// An event that may be waken spuriously.
|
||||||
|
struct TestEvent<R> {
|
||||||
|
// The receiver is actually used only once in tests, so it is moved out
|
||||||
|
// of the `Option` on first use.
|
||||||
|
receiver: Option<mpsc::UnboundedReceiver<Option<R>>>,
|
||||||
|
}
|
||||||
|
impl<R: Send + 'static> Sender<(), R> for TestEvent<R> {
|
||||||
|
fn send(&mut self, _arg: ()) -> Pin<Box<dyn Future<Output = Result<R, SendError>> + Send>> {
|
||||||
|
let receiver = self.receiver.take().unwrap();
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
let mut stream = Box::pin(receiver.filter_map(|item| async { item }));
|
||||||
|
|
||||||
|
Ok(stream.next().await.unwrap())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An object that can wake a `TestEvent`.
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct TestEventWaker<R> {
|
||||||
|
sender: mpsc::UnboundedSender<Option<R>>,
|
||||||
|
}
|
||||||
|
impl<R> TestEventWaker<R> {
|
||||||
|
fn wake_spurious(&self) {
|
||||||
|
let _ = self.sender.unbounded_send(None);
|
||||||
|
}
|
||||||
|
fn wake_final(&self, value: R) {
|
||||||
|
let _ = self.sender.unbounded_send(Some(value));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn test_event<R>() -> (TestEvent<R>, TestEventWaker<R>) {
|
||||||
|
let (sender, receiver) = mpsc::unbounded();
|
||||||
|
|
||||||
|
(
|
||||||
|
TestEvent {
|
||||||
|
receiver: Some(receiver),
|
||||||
|
},
|
||||||
|
TestEventWaker { sender },
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn loom_broadcast_basic() {
|
||||||
|
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||||
|
|
||||||
|
let mut builder = Builder::new();
|
||||||
|
if builder.preemption_bound.is_none() {
|
||||||
|
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.check(move || {
|
||||||
|
let (test_event1, waker1) = test_event::<usize>();
|
||||||
|
let (test_event2, waker2) = test_event::<usize>();
|
||||||
|
let (test_event3, waker3) = test_event::<usize>();
|
||||||
|
|
||||||
|
let mut broadcaster = QueryBroadcaster::default();
|
||||||
|
broadcaster.add(Box::new(test_event1));
|
||||||
|
broadcaster.add(Box::new(test_event2));
|
||||||
|
broadcaster.add(Box::new(test_event3));
|
||||||
|
|
||||||
|
let mut fut = Box::pin(broadcaster.broadcast(()));
|
||||||
|
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
|
||||||
|
let is_scheduled_waker = is_scheduled.clone();
|
||||||
|
|
||||||
|
let waker = waker_fn(move || {
|
||||||
|
// We use swap rather than a plain store to work around this
|
||||||
|
// bug: <https://github.com/tokio-rs/loom/issues/254>
|
||||||
|
is_scheduled_waker.swap(true, Ordering::Release);
|
||||||
|
});
|
||||||
|
let mut cx = Context::from_waker(&waker);
|
||||||
|
|
||||||
|
let th1 = thread::spawn(move || waker1.wake_final(3));
|
||||||
|
let th2 = thread::spawn(move || waker2.wake_final(7));
|
||||||
|
let th3 = thread::spawn(move || waker3.wake_final(42));
|
||||||
|
|
||||||
|
loop {
|
||||||
|
match fut.as_mut().poll(&mut cx) {
|
||||||
|
Poll::Ready(Ok(mut res)) => {
|
||||||
|
assert_eq!(res.next(), Some(3));
|
||||||
|
assert_eq!(res.next(), Some(7));
|
||||||
|
assert_eq!(res.next(), Some(42));
|
||||||
|
assert_eq!(res.next(), None);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(_)) => panic!("sender error"),
|
||||||
|
Poll::Pending => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the task has not been scheduled, exit the polling loop.
|
||||||
|
if !is_scheduled.swap(false, Ordering::Acquire) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
th1.join().unwrap();
|
||||||
|
th2.join().unwrap();
|
||||||
|
th3.join().unwrap();
|
||||||
|
|
||||||
|
assert!(is_scheduled.load(Ordering::Acquire));
|
||||||
|
|
||||||
|
match fut.as_mut().poll(&mut cx) {
|
||||||
|
Poll::Ready(Ok(mut res)) => {
|
||||||
|
assert_eq!(res.next(), Some(3));
|
||||||
|
assert_eq!(res.next(), Some(7));
|
||||||
|
assert_eq!(res.next(), Some(42));
|
||||||
|
assert_eq!(res.next(), None);
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(_)) => panic!("sender error"),
|
||||||
|
Poll::Pending => panic!("the future has not completed"),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn loom_broadcast_spurious() {
|
||||||
|
const DEFAULT_PREEMPTION_BOUND: usize = 3;
|
||||||
|
|
||||||
|
let mut builder = Builder::new();
|
||||||
|
if builder.preemption_bound.is_none() {
|
||||||
|
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.check(move || {
|
||||||
|
let (test_event1, waker1) = test_event::<usize>();
|
||||||
|
let (test_event2, waker2) = test_event::<usize>();
|
||||||
|
|
||||||
|
let mut broadcaster = QueryBroadcaster::default();
|
||||||
|
broadcaster.add(Box::new(test_event1));
|
||||||
|
broadcaster.add(Box::new(test_event2));
|
||||||
|
|
||||||
|
let mut fut = Box::pin(broadcaster.broadcast(()));
|
||||||
|
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
|
||||||
|
let is_scheduled_waker = is_scheduled.clone();
|
||||||
|
|
||||||
|
let waker = waker_fn(move || {
|
||||||
|
// We use swap rather than a plain store to work around this
|
||||||
|
// bug: <https://github.com/tokio-rs/loom/issues/254>
|
||||||
|
is_scheduled_waker.swap(true, Ordering::Release);
|
||||||
|
});
|
||||||
|
let mut cx = Context::from_waker(&waker);
|
||||||
|
|
||||||
|
let spurious_waker = waker1.clone();
|
||||||
|
let th1 = thread::spawn(move || waker1.wake_final(3));
|
||||||
|
let th2 = thread::spawn(move || waker2.wake_final(7));
|
||||||
|
let th_spurious = thread::spawn(move || spurious_waker.wake_spurious());
|
||||||
|
|
||||||
|
loop {
|
||||||
|
match fut.as_mut().poll(&mut cx) {
|
||||||
|
Poll::Ready(Ok(mut res)) => {
|
||||||
|
assert_eq!(res.next(), Some(3));
|
||||||
|
assert_eq!(res.next(), Some(7));
|
||||||
|
assert_eq!(res.next(), None);
|
||||||
|
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(_)) => panic!("sender error"),
|
||||||
|
Poll::Pending => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the task has not been scheduled, exit the polling loop.
|
||||||
|
if !is_scheduled.swap(false, Ordering::Acquire) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
th1.join().unwrap();
|
||||||
|
th2.join().unwrap();
|
||||||
|
th_spurious.join().unwrap();
|
||||||
|
|
||||||
|
assert!(is_scheduled.load(Ordering::Acquire));
|
||||||
|
|
||||||
|
match fut.as_mut().poll(&mut cx) {
|
||||||
|
Poll::Ready(Ok(mut res)) => {
|
||||||
|
assert_eq!(res.next(), Some(3));
|
||||||
|
assert_eq!(res.next(), Some(7));
|
||||||
|
assert_eq!(res.next(), None);
|
||||||
|
}
|
||||||
|
Poll::Ready(Err(_)) => panic!("sender error"),
|
||||||
|
Poll::Pending => panic!("the future has not completed"),
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
136
asynchronix/src/ports/source/sender.rs
Normal file
136
asynchronix/src/ports/source/sender.rs
Normal file
@ -0,0 +1,136 @@
|
|||||||
|
use std::error::Error;
|
||||||
|
use std::fmt;
|
||||||
|
use std::future::Future;
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
use std::pin::Pin;
|
||||||
|
|
||||||
|
use futures_channel::oneshot;
|
||||||
|
use recycle_box::{coerce_box, RecycleBox};
|
||||||
|
|
||||||
|
use crate::channel;
|
||||||
|
use crate::model::Model;
|
||||||
|
use crate::ports::{InputFn, ReplierFn};
|
||||||
|
|
||||||
|
pub(super) type SenderFuture<R> = Pin<Box<dyn Future<Output = Result<R, SendError>> + Send>>;
|
||||||
|
|
||||||
|
/// An event or query sender abstracting over the target model and input method.
|
||||||
|
pub(super) trait Sender<T, R>: Send {
|
||||||
|
/// Asynchronously send the event or request.
|
||||||
|
fn send(&mut self, arg: T) -> SenderFuture<R>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can send events to an input port.
|
||||||
|
pub(super) struct InputSender<M: 'static, F, T, S> {
|
||||||
|
func: F,
|
||||||
|
sender: channel::Sender<M>,
|
||||||
|
_phantom_closure: PhantomData<fn(&mut M, T)>,
|
||||||
|
_phantom_closure_marker: PhantomData<S>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: Send, F, T, S> InputSender<M, F, T, S>
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
|
T: Send + 'static,
|
||||||
|
{
|
||||||
|
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
|
||||||
|
Self {
|
||||||
|
func,
|
||||||
|
sender,
|
||||||
|
_phantom_closure: PhantomData,
|
||||||
|
_phantom_closure_marker: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M: Send, F, T, S> Sender<T, ()> for InputSender<M, F, T, S>
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
|
T: Send + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
fn send(&mut self, arg: T) -> SenderFuture<()> {
|
||||||
|
let func = self.func.clone();
|
||||||
|
let sender = self.sender.clone();
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
sender
|
||||||
|
.send(move |model, scheduler, recycle_box| {
|
||||||
|
let fut = func.call(model, arg, scheduler);
|
||||||
|
|
||||||
|
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|_| SendError {})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can send a request to a replier port and retrieve a response.
|
||||||
|
pub(super) struct ReplierSender<M: 'static, F, T, R, S> {
|
||||||
|
func: F,
|
||||||
|
sender: channel::Sender<M>,
|
||||||
|
_phantom_closure: PhantomData<fn(&mut M, T) -> R>,
|
||||||
|
_phantom_closure_marker: PhantomData<S>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, F, T, R, S> ReplierSender<M, F, T, R, S>
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> ReplierFn<'a, M, T, R, S>,
|
||||||
|
T: Send + 'static,
|
||||||
|
R: Send + 'static,
|
||||||
|
{
|
||||||
|
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
|
||||||
|
Self {
|
||||||
|
func,
|
||||||
|
sender,
|
||||||
|
_phantom_closure: PhantomData,
|
||||||
|
_phantom_closure_marker: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<M, F, T, R, S> Sender<T, R> for ReplierSender<M, F, T, R, S>
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
|
||||||
|
T: Send + 'static,
|
||||||
|
R: Send + 'static,
|
||||||
|
S: Send,
|
||||||
|
{
|
||||||
|
fn send(&mut self, arg: T) -> SenderFuture<R> {
|
||||||
|
let func = self.func.clone();
|
||||||
|
let sender = self.sender.clone();
|
||||||
|
let (reply_sender, reply_receiver) = oneshot::channel();
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
sender
|
||||||
|
.send(move |model, scheduler, recycle_box| {
|
||||||
|
let fut = async move {
|
||||||
|
let reply = func.call(model, arg, scheduler).await;
|
||||||
|
let _ = reply_sender.send(reply);
|
||||||
|
};
|
||||||
|
|
||||||
|
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|_| SendError {})?;
|
||||||
|
|
||||||
|
reply_receiver.await.map_err(|_| SendError {})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Error returned when the mailbox was closed or dropped.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||||
|
pub(super) struct SendError {}
|
||||||
|
|
||||||
|
impl fmt::Display for SendError {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
write!(f, "sending message into a closed mailbox")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error for SendError {}
|
13
asynchronix/src/rpc.rs
Normal file
13
asynchronix/src/rpc.rs
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
//! Simulation management through remote procedure calls.
|
||||||
|
|
||||||
|
mod codegen;
|
||||||
|
mod endpoint_registry;
|
||||||
|
#[cfg(feature = "grpc-service")]
|
||||||
|
pub mod grpc;
|
||||||
|
mod key_registry;
|
||||||
|
mod simulation_service;
|
||||||
|
#[cfg(feature = "wasm-service")]
|
||||||
|
pub mod wasm;
|
||||||
|
|
||||||
|
pub use endpoint_registry::EndpointRegistry;
|
||||||
|
pub use simulation_service::SimulationService;
|
178
asynchronix/src/rpc/api/simulation.proto
Normal file
178
asynchronix/src/rpc/api/simulation.proto
Normal file
@ -0,0 +1,178 @@
|
|||||||
|
// The main simulation protocol.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
package simulation;
|
||||||
|
|
||||||
|
import "google/protobuf/duration.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
|
||||||
|
enum ErrorCode {
|
||||||
|
INTERNAL_ERROR = 0;
|
||||||
|
SIMULATION_NOT_STARTED = 1;
|
||||||
|
MISSING_ARGUMENT = 2;
|
||||||
|
INVALID_TIME = 3;
|
||||||
|
INVALID_DURATION = 4;
|
||||||
|
INVALID_MESSAGE = 5;
|
||||||
|
INVALID_KEY = 6;
|
||||||
|
SOURCE_NOT_FOUND = 10;
|
||||||
|
SINK_NOT_FOUND = 11;
|
||||||
|
KEY_NOT_FOUND = 12;
|
||||||
|
SIMULATION_TIME_OUT_OF_RANGE = 13;
|
||||||
|
}
|
||||||
|
|
||||||
|
message Error {
|
||||||
|
ErrorCode code = 1;
|
||||||
|
string message = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EventKey {
|
||||||
|
uint64 subkey1 = 1;
|
||||||
|
uint64 subkey2 = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message InitRequest { google.protobuf.Timestamp time = 1; }
|
||||||
|
message InitReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Empty empty = 1;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message TimeRequest {}
|
||||||
|
message TimeReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Timestamp time = 1;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message StepRequest {}
|
||||||
|
message StepReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Timestamp time = 1;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message StepUntilRequest {
|
||||||
|
oneof deadline { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Timestamp time = 1;
|
||||||
|
google.protobuf.Duration duration = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
message StepUntilReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Timestamp time = 1;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ScheduleEventRequest {
|
||||||
|
oneof deadline { // Expects exactly 1 variant.
|
||||||
|
google.protobuf.Timestamp time = 1;
|
||||||
|
google.protobuf.Duration duration = 2;
|
||||||
|
}
|
||||||
|
string source_name = 3;
|
||||||
|
bytes event = 4;
|
||||||
|
google.protobuf.Duration period = 5;
|
||||||
|
bool with_key = 6;
|
||||||
|
}
|
||||||
|
message ScheduleEventReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Empty empty = 1;
|
||||||
|
EventKey key = 2;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message CancelEventRequest { EventKey key = 1; }
|
||||||
|
message CancelEventReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Empty empty = 1;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProcessEventRequest {
|
||||||
|
string source_name = 1;
|
||||||
|
bytes event = 2;
|
||||||
|
}
|
||||||
|
message ProcessEventReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Empty empty = 1;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ProcessQueryRequest {
|
||||||
|
string source_name = 1;
|
||||||
|
bytes request = 2;
|
||||||
|
}
|
||||||
|
message ProcessQueryReply {
|
||||||
|
// This field is hoisted because protobuf3 does not support `repeated` within
|
||||||
|
// a `oneof`. It is Always empty if an error is returned
|
||||||
|
repeated bytes replies = 1;
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Empty empty = 10;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ReadEventsRequest { string sink_name = 1; }
|
||||||
|
message ReadEventsReply {
|
||||||
|
// This field is hoisted because protobuf3 does not support `repeated` within
|
||||||
|
// a `oneof`. It is Always empty if an error is returned
|
||||||
|
repeated bytes events = 1;
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Empty empty = 10;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message OpenSinkRequest { string sink_name = 1; }
|
||||||
|
message OpenSinkReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Empty empty = 10;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message CloseSinkRequest { string sink_name = 1; }
|
||||||
|
message CloseSinkReply {
|
||||||
|
oneof result { // Always returns exactly 1 variant.
|
||||||
|
google.protobuf.Empty empty = 10;
|
||||||
|
Error error = 100;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A convenience message type for custom transport implementation.
|
||||||
|
message AnyRequest {
|
||||||
|
oneof request { // Expects exactly 1 variant.
|
||||||
|
InitRequest init_request = 1;
|
||||||
|
TimeRequest time_request = 2;
|
||||||
|
StepRequest step_request = 3;
|
||||||
|
StepUntilRequest step_until_request = 4;
|
||||||
|
ScheduleEventRequest schedule_event_request = 5;
|
||||||
|
CancelEventRequest cancel_event_request = 6;
|
||||||
|
ProcessEventRequest process_event_request = 7;
|
||||||
|
ProcessQueryRequest process_query_request = 8;
|
||||||
|
ReadEventsRequest read_events_request = 9;
|
||||||
|
OpenSinkRequest open_sink_request = 10;
|
||||||
|
CloseSinkRequest close_sink_request = 11;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
service Simulation {
|
||||||
|
rpc Init(InitRequest) returns (InitReply);
|
||||||
|
rpc Time(TimeRequest) returns (TimeReply);
|
||||||
|
rpc Step(StepRequest) returns (StepReply);
|
||||||
|
rpc StepUntil(StepUntilRequest) returns (StepUntilReply);
|
||||||
|
rpc ScheduleEvent(ScheduleEventRequest) returns (ScheduleEventReply);
|
||||||
|
rpc CancelEvent(CancelEventRequest) returns (CancelEventReply);
|
||||||
|
rpc ProcessEvent(ProcessEventRequest) returns (ProcessEventReply);
|
||||||
|
rpc ProcessQuery(ProcessQueryRequest) returns (ProcessQueryReply);
|
||||||
|
rpc ReadEvents(ReadEventsRequest) returns (ReadEventsReply);
|
||||||
|
rpc OpenSink(OpenSinkRequest) returns (OpenSinkReply);
|
||||||
|
rpc CloseSink(CloseSinkRequest) returns (CloseSinkReply);
|
||||||
|
}
|
6
asynchronix/src/rpc/codegen.rs
Normal file
6
asynchronix/src/rpc/codegen.rs
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
#![allow(unreachable_pub)]
|
||||||
|
#![allow(clippy::enum_variant_names)]
|
||||||
|
#![allow(missing_docs)]
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
pub(crate) mod simulation;
|
0
asynchronix/src/rpc/codegen/.gitkeep
Normal file
0
asynchronix/src/rpc/codegen/.gitkeep
Normal file
1109
asynchronix/src/rpc/codegen/simulation.rs
Normal file
1109
asynchronix/src/rpc/codegen/simulation.rs
Normal file
File diff suppressed because it is too large
Load Diff
307
asynchronix/src/rpc/endpoint_registry.rs
Normal file
307
asynchronix/src/rpc/endpoint_registry.rs
Normal file
@ -0,0 +1,307 @@
|
|||||||
|
use std::collections::hash_map::Entry;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fmt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use rmp_serde::decode::Error as RmpDecodeError;
|
||||||
|
use rmp_serde::encode::Error as RmpEncodeError;
|
||||||
|
use serde::de::DeserializeOwned;
|
||||||
|
use serde::Serialize;
|
||||||
|
|
||||||
|
use crate::ports::{EventSinkStream, EventSource, QuerySource, ReplyReceiver};
|
||||||
|
use crate::simulation::{Action, ActionKey};
|
||||||
|
|
||||||
|
/// A registry that holds all sources and sinks meant to be accessed through
|
||||||
|
/// remote procedure calls.
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct EndpointRegistry {
|
||||||
|
event_sources: HashMap<String, Box<dyn EventSourceAny>>,
|
||||||
|
query_sources: HashMap<String, Box<dyn QuerySourceAny>>,
|
||||||
|
sinks: HashMap<String, Box<dyn EventSinkStreamAny>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EndpointRegistry {
|
||||||
|
/// Creates an empty `EndpointRegistry`.
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self::default()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds an event source to the registry.
|
||||||
|
///
|
||||||
|
/// If the specified name is already in use for another event source, the source
|
||||||
|
/// provided as argument is returned in the error.
|
||||||
|
pub fn add_event_source<T>(
|
||||||
|
&mut self,
|
||||||
|
source: EventSource<T>,
|
||||||
|
name: impl Into<String>,
|
||||||
|
) -> Result<(), EventSource<T>>
|
||||||
|
where
|
||||||
|
T: DeserializeOwned + Clone + Send + 'static,
|
||||||
|
{
|
||||||
|
match self.event_sources.entry(name.into()) {
|
||||||
|
Entry::Vacant(s) => {
|
||||||
|
s.insert(Box::new(source));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Entry::Occupied(_) => Err(source),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the specified event source if it is in
|
||||||
|
/// the registry.
|
||||||
|
pub(crate) fn get_event_source_mut(&mut self, name: &str) -> Option<&mut dyn EventSourceAny> {
|
||||||
|
self.event_sources.get_mut(name).map(|s| s.as_mut())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a query source to the registry.
|
||||||
|
///
|
||||||
|
/// If the specified name is already in use for another query source, the
|
||||||
|
/// source provided as argument is returned in the error.
|
||||||
|
pub fn add_query_source<T, R>(
|
||||||
|
&mut self,
|
||||||
|
source: QuerySource<T, R>,
|
||||||
|
name: impl Into<String>,
|
||||||
|
) -> Result<(), QuerySource<T, R>>
|
||||||
|
where
|
||||||
|
T: DeserializeOwned + Clone + Send + 'static,
|
||||||
|
R: Serialize + Send + 'static,
|
||||||
|
{
|
||||||
|
match self.query_sources.entry(name.into()) {
|
||||||
|
Entry::Vacant(s) => {
|
||||||
|
s.insert(Box::new(source));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Entry::Occupied(_) => Err(source),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the specified query source if it is in
|
||||||
|
/// the registry.
|
||||||
|
pub(crate) fn get_query_source_mut(&mut self, name: &str) -> Option<&mut dyn QuerySourceAny> {
|
||||||
|
self.query_sources.get_mut(name).map(|s| s.as_mut())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds a sink to the registry.
|
||||||
|
///
|
||||||
|
/// If the specified name is already in use for another sink, the sink
|
||||||
|
/// provided as argument is returned in the error.
|
||||||
|
pub fn add_event_sink<S>(&mut self, sink: S, name: impl Into<String>) -> Result<(), S>
|
||||||
|
where
|
||||||
|
S: EventSinkStream + Send + 'static,
|
||||||
|
S::Item: Serialize,
|
||||||
|
{
|
||||||
|
match self.sinks.entry(name.into()) {
|
||||||
|
Entry::Vacant(s) => {
|
||||||
|
s.insert(Box::new(sink));
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Entry::Occupied(_) => Err(sink),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a mutable reference to the specified sink if it is in the
|
||||||
|
/// registry.
|
||||||
|
pub(crate) fn get_event_sink_mut(&mut self, name: &str) -> Option<&mut dyn EventSinkStreamAny> {
|
||||||
|
self.sinks.get_mut(name).map(|s| s.as_mut())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for EndpointRegistry {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"EndpointRegistry ({} sources, {} sinks)",
|
||||||
|
self.event_sources.len(),
|
||||||
|
self.sinks.len()
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A type-erased `EventSource` that operates on MessagePack-encoded serialized
|
||||||
|
/// events.
|
||||||
|
pub(crate) trait EventSourceAny: Send + 'static {
|
||||||
|
/// Returns an action which, when processed, broadcasts an event to all
|
||||||
|
/// connected input ports.
|
||||||
|
///
|
||||||
|
/// The argument is expected to conform to the serde MessagePack encoding.
|
||||||
|
fn event(&mut self, msgpack_arg: &[u8]) -> Result<Action, RmpDecodeError>;
|
||||||
|
|
||||||
|
/// Returns a cancellable action and a cancellation key; when processed, the
|
||||||
|
/// action broadcasts an event to all connected input ports.
|
||||||
|
///
|
||||||
|
/// The argument is expected to conform to the serde MessagePack encoding.
|
||||||
|
fn keyed_event(&mut self, msgpack_arg: &[u8]) -> Result<(Action, ActionKey), RmpDecodeError>;
|
||||||
|
|
||||||
|
/// Returns a periodically recurring action which, when processed,
|
||||||
|
/// broadcasts an event to all connected input ports.
|
||||||
|
///
|
||||||
|
/// The argument is expected to conform to the serde MessagePack encoding.
|
||||||
|
fn periodic_event(
|
||||||
|
&mut self,
|
||||||
|
period: Duration,
|
||||||
|
msgpack_arg: &[u8],
|
||||||
|
) -> Result<Action, RmpDecodeError>;
|
||||||
|
|
||||||
|
/// Returns a cancellable, periodically recurring action and a cancellation
|
||||||
|
/// key; when processed, the action broadcasts an event to all connected
|
||||||
|
/// input ports.
|
||||||
|
///
|
||||||
|
/// The argument is expected to conform to the serde MessagePack encoding.
|
||||||
|
fn keyed_periodic_event(
|
||||||
|
&mut self,
|
||||||
|
period: Duration,
|
||||||
|
msgpack_arg: &[u8],
|
||||||
|
) -> Result<(Action, ActionKey), RmpDecodeError>;
|
||||||
|
|
||||||
|
/// Human-readable name of the event type, as returned by
|
||||||
|
/// `any::type_name()`.
|
||||||
|
fn event_type_name(&self) -> &'static str;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> EventSourceAny for EventSource<T>
|
||||||
|
where
|
||||||
|
T: DeserializeOwned + Clone + Send + 'static,
|
||||||
|
{
|
||||||
|
fn event(&mut self, msgpack_arg: &[u8]) -> Result<Action, RmpDecodeError> {
|
||||||
|
rmp_serde::from_read(msgpack_arg).map(|arg| self.event(arg))
|
||||||
|
}
|
||||||
|
fn keyed_event(&mut self, msgpack_arg: &[u8]) -> Result<(Action, ActionKey), RmpDecodeError> {
|
||||||
|
rmp_serde::from_read(msgpack_arg).map(|arg| self.keyed_event(arg))
|
||||||
|
}
|
||||||
|
fn periodic_event(
|
||||||
|
&mut self,
|
||||||
|
period: Duration,
|
||||||
|
msgpack_arg: &[u8],
|
||||||
|
) -> Result<Action, RmpDecodeError> {
|
||||||
|
rmp_serde::from_read(msgpack_arg).map(|arg| self.periodic_event(period, arg))
|
||||||
|
}
|
||||||
|
fn keyed_periodic_event(
|
||||||
|
&mut self,
|
||||||
|
period: Duration,
|
||||||
|
msgpack_arg: &[u8],
|
||||||
|
) -> Result<(Action, ActionKey), RmpDecodeError> {
|
||||||
|
rmp_serde::from_read(msgpack_arg).map(|arg| self.keyed_periodic_event(period, arg))
|
||||||
|
}
|
||||||
|
fn event_type_name(&self) -> &'static str {
|
||||||
|
std::any::type_name::<T>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A type-erased `QuerySource` that operates on MessagePack-encoded serialized
|
||||||
|
/// queries and returns MessagePack-encoded replies.
|
||||||
|
pub(crate) trait QuerySourceAny: Send + 'static {
|
||||||
|
/// Returns an action which, when processed, broadcasts a query to all
|
||||||
|
/// connected replier ports.
|
||||||
|
///
|
||||||
|
///
|
||||||
|
/// The argument is expected to conform to the serde MessagePack encoding.
|
||||||
|
fn query(
|
||||||
|
&mut self,
|
||||||
|
msgpack_arg: &[u8],
|
||||||
|
) -> Result<(Action, Box<dyn ReplyReceiverAny>), RmpDecodeError>;
|
||||||
|
|
||||||
|
/// Human-readable name of the request type, as returned by
|
||||||
|
/// `any::type_name()`.
|
||||||
|
fn request_type_name(&self) -> &'static str;
|
||||||
|
|
||||||
|
/// Human-readable name of the reply type, as returned by
|
||||||
|
/// `any::type_name()`.
|
||||||
|
fn reply_type_name(&self) -> &'static str;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, R> QuerySourceAny for QuerySource<T, R>
|
||||||
|
where
|
||||||
|
T: DeserializeOwned + Clone + Send + 'static,
|
||||||
|
R: Serialize + Send + 'static,
|
||||||
|
{
|
||||||
|
fn query(
|
||||||
|
&mut self,
|
||||||
|
msgpack_arg: &[u8],
|
||||||
|
) -> Result<(Action, Box<dyn ReplyReceiverAny>), RmpDecodeError> {
|
||||||
|
rmp_serde::from_read(msgpack_arg).map(|arg| {
|
||||||
|
let (action, reply_recv) = self.query(arg);
|
||||||
|
let reply_recv: Box<dyn ReplyReceiverAny> = Box::new(reply_recv);
|
||||||
|
|
||||||
|
(action, reply_recv)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn request_type_name(&self) -> &'static str {
|
||||||
|
std::any::type_name::<T>()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reply_type_name(&self) -> &'static str {
|
||||||
|
std::any::type_name::<R>()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A type-erased `EventSinkStream`.
|
||||||
|
pub(crate) trait EventSinkStreamAny: Send + 'static {
|
||||||
|
/// Human-readable name of the event type, as returned by
|
||||||
|
/// `any::type_name()`.
|
||||||
|
fn event_type_name(&self) -> &'static str;
|
||||||
|
|
||||||
|
/// Starts or resumes the collection of new events.
|
||||||
|
fn open(&mut self);
|
||||||
|
|
||||||
|
/// Pauses the collection of new events.
|
||||||
|
fn close(&mut self);
|
||||||
|
|
||||||
|
/// Encode and collect all events in a vector.
|
||||||
|
fn collect(&mut self) -> Result<Vec<Vec<u8>>, RmpEncodeError>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E> EventSinkStreamAny for E
|
||||||
|
where
|
||||||
|
E: EventSinkStream + Send + 'static,
|
||||||
|
E::Item: Serialize,
|
||||||
|
{
|
||||||
|
fn event_type_name(&self) -> &'static str {
|
||||||
|
std::any::type_name::<E::Item>()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open(&mut self) {
|
||||||
|
self.open();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn close(&mut self) {
|
||||||
|
self.close();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn collect(&mut self) -> Result<Vec<Vec<u8>>, RmpEncodeError> {
|
||||||
|
self.__try_fold(Vec::new(), |mut encoded_events, event| {
|
||||||
|
rmp_serde::to_vec_named(&event).map(|encoded_event| {
|
||||||
|
encoded_events.push(encoded_event);
|
||||||
|
|
||||||
|
encoded_events
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A type-erased `ReplyReceiver` that returns MessagePack-encoded replies..
|
||||||
|
pub(crate) trait ReplyReceiverAny {
|
||||||
|
/// Take the replies, if any, encode them and collect them in a vector.
|
||||||
|
fn take_collect(&mut self) -> Option<Result<Vec<Vec<u8>>, RmpEncodeError>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<R: Serialize + 'static> ReplyReceiverAny for ReplyReceiver<R> {
|
||||||
|
fn take_collect(&mut self) -> Option<Result<Vec<Vec<u8>>, RmpEncodeError>> {
|
||||||
|
let replies = self.take()?;
|
||||||
|
|
||||||
|
let encoded_replies = (move || {
|
||||||
|
let mut encoded_replies = Vec::new();
|
||||||
|
for reply in replies {
|
||||||
|
let encoded_reply = rmp_serde::to_vec_named(&reply)?;
|
||||||
|
encoded_replies.push(encoded_reply);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(encoded_replies)
|
||||||
|
})();
|
||||||
|
|
||||||
|
Some(encoded_replies)
|
||||||
|
}
|
||||||
|
}
|
142
asynchronix/src/rpc/grpc.rs
Normal file
142
asynchronix/src/rpc/grpc.rs
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
//! gRPC simulation service.
|
||||||
|
|
||||||
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Mutex;
|
||||||
|
use std::sync::MutexGuard;
|
||||||
|
|
||||||
|
use tonic::{transport::Server, Request, Response, Status};
|
||||||
|
|
||||||
|
use crate::rpc::EndpointRegistry;
|
||||||
|
use crate::simulation::SimInit;
|
||||||
|
|
||||||
|
use super::codegen::simulation::*;
|
||||||
|
use super::simulation_service::SimulationService;
|
||||||
|
|
||||||
|
/// Runs a gRPC simulation server.
|
||||||
|
///
|
||||||
|
/// The first argument is a closure that is called every time the simulation is
|
||||||
|
/// (re)started by the remote client. It must create a new `SimInit` object
|
||||||
|
/// complemented by a registry that exposes the public event and query
|
||||||
|
/// interface.
|
||||||
|
pub fn run<F>(sim_gen: F, addr: SocketAddr) -> Result<(), Box<dyn std::error::Error>>
|
||||||
|
where
|
||||||
|
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
|
||||||
|
{
|
||||||
|
// Use a single-threaded server.
|
||||||
|
let rt = tokio::runtime::Builder::new_current_thread()
|
||||||
|
.enable_io()
|
||||||
|
.build()?;
|
||||||
|
|
||||||
|
let sim_manager = GrpcSimulationService::new(sim_gen);
|
||||||
|
|
||||||
|
rt.block_on(async move {
|
||||||
|
Server::builder()
|
||||||
|
.add_service(simulation_server::SimulationServer::new(sim_manager))
|
||||||
|
.serve(addr)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
struct GrpcSimulationService {
|
||||||
|
inner: Mutex<SimulationService>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GrpcSimulationService {
|
||||||
|
fn new<F>(sim_gen: F) -> Self
|
||||||
|
where
|
||||||
|
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
|
||||||
|
{
|
||||||
|
Self {
|
||||||
|
inner: Mutex::new(SimulationService::new(sim_gen)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn inner(&self) -> MutexGuard<'_, SimulationService> {
|
||||||
|
self.inner.lock().unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tonic::async_trait]
|
||||||
|
impl simulation_server::Simulation for GrpcSimulationService {
|
||||||
|
async fn init(&self, request: Request<InitRequest>) -> Result<Response<InitReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().init(request)))
|
||||||
|
}
|
||||||
|
async fn time(&self, request: Request<TimeRequest>) -> Result<Response<TimeReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().time(request)))
|
||||||
|
}
|
||||||
|
async fn step(&self, request: Request<StepRequest>) -> Result<Response<StepReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().step(request)))
|
||||||
|
}
|
||||||
|
async fn step_until(
|
||||||
|
&self,
|
||||||
|
request: Request<StepUntilRequest>,
|
||||||
|
) -> Result<Response<StepUntilReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().step_until(request)))
|
||||||
|
}
|
||||||
|
async fn schedule_event(
|
||||||
|
&self,
|
||||||
|
request: Request<ScheduleEventRequest>,
|
||||||
|
) -> Result<Response<ScheduleEventReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().schedule_event(request)))
|
||||||
|
}
|
||||||
|
async fn cancel_event(
|
||||||
|
&self,
|
||||||
|
request: Request<CancelEventRequest>,
|
||||||
|
) -> Result<Response<CancelEventReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().cancel_event(request)))
|
||||||
|
}
|
||||||
|
async fn process_event(
|
||||||
|
&self,
|
||||||
|
request: Request<ProcessEventRequest>,
|
||||||
|
) -> Result<Response<ProcessEventReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().process_event(request)))
|
||||||
|
}
|
||||||
|
async fn process_query(
|
||||||
|
&self,
|
||||||
|
request: Request<ProcessQueryRequest>,
|
||||||
|
) -> Result<Response<ProcessQueryReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().process_query(request)))
|
||||||
|
}
|
||||||
|
async fn read_events(
|
||||||
|
&self,
|
||||||
|
request: Request<ReadEventsRequest>,
|
||||||
|
) -> Result<Response<ReadEventsReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().read_events(request)))
|
||||||
|
}
|
||||||
|
async fn open_sink(
|
||||||
|
&self,
|
||||||
|
request: Request<OpenSinkRequest>,
|
||||||
|
) -> Result<Response<OpenSinkReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().open_sink(request)))
|
||||||
|
}
|
||||||
|
async fn close_sink(
|
||||||
|
&self,
|
||||||
|
request: Request<CloseSinkRequest>,
|
||||||
|
) -> Result<Response<CloseSinkReply>, Status> {
|
||||||
|
let request = request.into_inner();
|
||||||
|
|
||||||
|
Ok(Response::new(self.inner().close_sink(request)))
|
||||||
|
}
|
||||||
|
}
|
48
asynchronix/src/rpc/key_registry.rs
Normal file
48
asynchronix/src/rpc/key_registry.rs
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
use crate::simulation::ActionKey;
|
||||||
|
use crate::time::MonotonicTime;
|
||||||
|
use crate::util::indexed_priority_queue::{IndexedPriorityQueue, InsertKey};
|
||||||
|
|
||||||
|
pub(crate) type KeyRegistryId = InsertKey;
|
||||||
|
|
||||||
|
/// A collection of `ActionKey`s indexed by a unique identifier.
|
||||||
|
#[derive(Default)]
|
||||||
|
pub(crate) struct KeyRegistry {
|
||||||
|
keys: IndexedPriorityQueue<MonotonicTime, ActionKey>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyRegistry {
|
||||||
|
/// Inserts an `ActionKey` into the registry.
|
||||||
|
///
|
||||||
|
/// The provided expiration deadline is the latest time at which the key may
|
||||||
|
/// still be active.
|
||||||
|
pub(crate) fn insert_key(
|
||||||
|
&mut self,
|
||||||
|
action_key: ActionKey,
|
||||||
|
expiration: MonotonicTime,
|
||||||
|
) -> KeyRegistryId {
|
||||||
|
self.keys.insert(expiration, action_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inserts a non-expiring `ActionKey` into the registry.
|
||||||
|
pub(crate) fn insert_eternal_key(&mut self, action_key: ActionKey) -> KeyRegistryId {
|
||||||
|
self.keys.insert(MonotonicTime::MAX, action_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes an `ActionKey` from the registry and returns it.
|
||||||
|
///
|
||||||
|
/// Returns `None` if the key was not found in the registry.
|
||||||
|
pub(crate) fn extract_key(&mut self, key_id: KeyRegistryId) -> Option<ActionKey> {
|
||||||
|
self.keys.extract(key_id).map(|(_, key)| key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove keys with an expiration deadline strictly predating the argument.
|
||||||
|
pub(crate) fn remove_expired_keys(&mut self, now: MonotonicTime) {
|
||||||
|
while let Some(expiration) = self.keys.peek_key() {
|
||||||
|
if *expiration >= now {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.keys.pull();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
695
asynchronix/src/rpc/simulation_service.rs
Normal file
695
asynchronix/src/rpc/simulation_service.rs
Normal file
@ -0,0 +1,695 @@
|
|||||||
|
use std::error;
|
||||||
|
use std::fmt;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use bytes::Buf;
|
||||||
|
use prost::Message;
|
||||||
|
use prost_types::Timestamp;
|
||||||
|
use tai_time::MonotonicTime;
|
||||||
|
|
||||||
|
use crate::rpc::key_registry::{KeyRegistry, KeyRegistryId};
|
||||||
|
use crate::rpc::EndpointRegistry;
|
||||||
|
use crate::simulation::{SimInit, Simulation};
|
||||||
|
|
||||||
|
use super::codegen::simulation::*;
|
||||||
|
|
||||||
|
/// Protobuf-based simulation manager.
|
||||||
|
///
|
||||||
|
/// A `SimulationService` enables the management of the lifecycle of a
|
||||||
|
/// simulation, including creating a
|
||||||
|
/// [`Simulation`](crate::simulation::Simulation), invoking its methods and
|
||||||
|
/// instantiating a new simulation.
|
||||||
|
///
|
||||||
|
/// Its methods map the various RPC service methods defined in
|
||||||
|
/// `simulation.proto`.
|
||||||
|
pub struct SimulationService {
|
||||||
|
sim_gen: Box<dyn FnMut() -> (SimInit, EndpointRegistry) + Send + 'static>,
|
||||||
|
sim_context: Option<(Simulation, EndpointRegistry, KeyRegistry)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SimulationService {
|
||||||
|
/// Creates a new `SimulationService` without any active simulation.
|
||||||
|
///
|
||||||
|
/// The argument is a closure that is called every time the simulation is
|
||||||
|
/// (re)started by the remote client. It must create a new `SimInit` object
|
||||||
|
/// complemented by a registry that exposes the public event and query
|
||||||
|
/// interface.
|
||||||
|
pub fn new<F>(sim_gen: F) -> Self
|
||||||
|
where
|
||||||
|
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
|
||||||
|
{
|
||||||
|
Self {
|
||||||
|
sim_gen: Box::new(sim_gen),
|
||||||
|
sim_context: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Processes an encoded `AnyRequest` message and returns an encoded reply.
|
||||||
|
pub fn process_request<B>(&mut self, request_buf: B) -> Result<Vec<u8>, InvalidRequest>
|
||||||
|
where
|
||||||
|
B: Buf,
|
||||||
|
{
|
||||||
|
match AnyRequest::decode(request_buf) {
|
||||||
|
Ok(AnyRequest { request: Some(req) }) => match req {
|
||||||
|
any_request::Request::InitRequest(request) => {
|
||||||
|
Ok(self.init(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::TimeRequest(request) => {
|
||||||
|
Ok(self.time(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::StepRequest(request) => {
|
||||||
|
Ok(self.step(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::StepUntilRequest(request) => {
|
||||||
|
Ok(self.step_until(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::ScheduleEventRequest(request) => {
|
||||||
|
Ok(self.schedule_event(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::CancelEventRequest(request) => {
|
||||||
|
Ok(self.cancel_event(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::ProcessEventRequest(request) => {
|
||||||
|
Ok(self.process_event(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::ProcessQueryRequest(request) => {
|
||||||
|
Ok(self.process_query(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::ReadEventsRequest(request) => {
|
||||||
|
Ok(self.read_events(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::OpenSinkRequest(request) => {
|
||||||
|
Ok(self.open_sink(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
any_request::Request::CloseSinkRequest(request) => {
|
||||||
|
Ok(self.close_sink(request).encode_to_vec())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Ok(AnyRequest { request: None }) => Err(InvalidRequest {
|
||||||
|
description: "the message did not contain any request".to_string(),
|
||||||
|
}),
|
||||||
|
Err(err) => Err(InvalidRequest {
|
||||||
|
description: format!("bad request: {}", err),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize a simulation with the provided time.
|
||||||
|
///
|
||||||
|
/// If a simulation is already active, it is destructed and replaced with a
|
||||||
|
/// new simulation.
|
||||||
|
///
|
||||||
|
/// If the initialization time is not provided, it is initialized with the
|
||||||
|
/// epoch of `MonotonicTime` (1970-01-01 00:00:00 TAI).
|
||||||
|
pub(crate) fn init(&mut self, request: InitRequest) -> InitReply {
|
||||||
|
let start_time = request.time.unwrap_or_default();
|
||||||
|
let reply = if let Some(start_time) = timestamp_to_monotonic(start_time) {
|
||||||
|
let (sim_init, endpoint_registry) = (self.sim_gen)();
|
||||||
|
let simulation = sim_init.init(start_time);
|
||||||
|
self.sim_context = Some((simulation, endpoint_registry, KeyRegistry::default()));
|
||||||
|
|
||||||
|
init_reply::Result::Empty(())
|
||||||
|
} else {
|
||||||
|
init_reply::Result::Error(Error {
|
||||||
|
code: ErrorCode::InvalidTime as i32,
|
||||||
|
message: "out-of-range nanosecond field".to_string(),
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
InitReply {
|
||||||
|
result: Some(reply),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the current simulation time.
|
||||||
|
pub(crate) fn time(&mut self, _request: TimeRequest) -> TimeReply {
|
||||||
|
let reply = match &self.sim_context {
|
||||||
|
Some((simulation, ..)) => {
|
||||||
|
if let Some(timestamp) = monotonic_to_timestamp(simulation.time()) {
|
||||||
|
time_reply::Result::Time(timestamp)
|
||||||
|
} else {
|
||||||
|
time_reply::Result::Error(Error {
|
||||||
|
code: ErrorCode::SimulationTimeOutOfRange as i32,
|
||||||
|
message: "the final simulation time is out of range".to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => time_reply::Result::Error(Error {
|
||||||
|
code: ErrorCode::SimulationNotStarted as i32,
|
||||||
|
message: "the simulation was not started".to_string(),
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
TimeReply {
|
||||||
|
result: Some(reply),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Advances simulation time to that of the next scheduled event, processing
|
||||||
|
/// that event as well as all other event scheduled for the same time.
|
||||||
|
///
|
||||||
|
/// Processing is gated by a (possibly blocking) call to
|
||||||
|
/// [`Clock::synchronize()`](crate::time::Clock::synchronize) on the
|
||||||
|
/// configured simulation clock. This method blocks until all newly
|
||||||
|
/// processed events have completed.
|
||||||
|
pub(crate) fn step(&mut self, _request: StepRequest) -> StepReply {
|
||||||
|
let reply = match &mut self.sim_context {
|
||||||
|
Some((simulation, ..)) => {
|
||||||
|
simulation.step();
|
||||||
|
if let Some(timestamp) = monotonic_to_timestamp(simulation.time()) {
|
||||||
|
step_reply::Result::Time(timestamp)
|
||||||
|
} else {
|
||||||
|
step_reply::Result::Error(Error {
|
||||||
|
code: ErrorCode::SimulationTimeOutOfRange as i32,
|
||||||
|
message: "the final simulation time is out of range".to_string(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => step_reply::Result::Error(Error {
|
||||||
|
code: ErrorCode::SimulationNotStarted as i32,
|
||||||
|
message: "the simulation was not started".to_string(),
|
||||||
|
}),
|
||||||
|
};
|
||||||
|
|
||||||
|
StepReply {
|
||||||
|
result: Some(reply),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Iteratively advances the simulation time until the specified deadline,
|
||||||
|
/// as if by calling
|
||||||
|
/// [`Simulation::step()`](crate::simulation::Simulation::step) repeatedly.
|
||||||
|
///
|
||||||
|
/// This method blocks until all events scheduled up to the specified target
|
||||||
|
/// time have completed. The simulation time upon completion is equal to the
|
||||||
|
/// specified target time, whether or not an event was scheduled for that
|
||||||
|
/// time.
|
||||||
|
pub(crate) fn step_until(&mut self, request: StepUntilRequest) -> StepUntilReply {
|
||||||
|
let reply = move || -> Result<Timestamp, (ErrorCode, &str)> {
|
||||||
|
let deadline = request
|
||||||
|
.deadline
|
||||||
|
.ok_or((ErrorCode::MissingArgument, "missing deadline argument"))?;
|
||||||
|
|
||||||
|
let simulation = match deadline {
|
||||||
|
step_until_request::Deadline::Time(time) => {
|
||||||
|
let time = timestamp_to_monotonic(time)
|
||||||
|
.ok_or((ErrorCode::InvalidTime, "out-of-range nanosecond field"))?;
|
||||||
|
|
||||||
|
let (simulation, ..) = self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
simulation.step_until(time).map_err(|_| {
|
||||||
|
(
|
||||||
|
ErrorCode::InvalidTime,
|
||||||
|
"the specified deadline lies in the past",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
simulation
|
||||||
|
}
|
||||||
|
|
||||||
|
step_until_request::Deadline::Duration(duration) => {
|
||||||
|
let duration = to_positive_duration(duration).ok_or((
|
||||||
|
ErrorCode::InvalidDuration,
|
||||||
|
"the specified deadline lies in the past",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let (simulation, ..) = self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
simulation.step_by(duration);
|
||||||
|
|
||||||
|
simulation
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let timestamp = monotonic_to_timestamp(simulation.time()).ok_or((
|
||||||
|
ErrorCode::SimulationTimeOutOfRange,
|
||||||
|
"the final simulation time is out of range",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(timestamp)
|
||||||
|
}();
|
||||||
|
|
||||||
|
StepUntilReply {
|
||||||
|
result: Some(match reply {
|
||||||
|
Ok(timestamp) => step_until_reply::Result::Time(timestamp),
|
||||||
|
Err((code, message)) => step_until_reply::Result::Error(Error {
|
||||||
|
code: code as i32,
|
||||||
|
message: message.to_string(),
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules an event at a future time.
|
||||||
|
pub(crate) fn schedule_event(&mut self, request: ScheduleEventRequest) -> ScheduleEventReply {
|
||||||
|
let reply = move || -> Result<Option<KeyRegistryId>, (ErrorCode, String)> {
|
||||||
|
let source_name = &request.source_name;
|
||||||
|
let msgpack_event = &request.event;
|
||||||
|
let with_key = request.with_key;
|
||||||
|
let period = request
|
||||||
|
.period
|
||||||
|
.map(|period| {
|
||||||
|
to_strictly_positive_duration(period).ok_or((
|
||||||
|
ErrorCode::InvalidDuration,
|
||||||
|
"the specified event period is not strictly positive".to_string(),
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
let (simulation, endpoint_registry, key_registry) =
|
||||||
|
self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let deadline = request.deadline.ok_or((
|
||||||
|
ErrorCode::MissingArgument,
|
||||||
|
"missing deadline argument".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let deadline = match deadline {
|
||||||
|
schedule_event_request::Deadline::Time(time) => timestamp_to_monotonic(time)
|
||||||
|
.ok_or((
|
||||||
|
ErrorCode::InvalidTime,
|
||||||
|
"out-of-range nanosecond field".to_string(),
|
||||||
|
))?,
|
||||||
|
schedule_event_request::Deadline::Duration(duration) => {
|
||||||
|
let duration = to_strictly_positive_duration(duration).ok_or((
|
||||||
|
ErrorCode::InvalidDuration,
|
||||||
|
"the specified scheduling deadline is not in the future".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
simulation.time() + duration
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let source = endpoint_registry.get_event_source_mut(source_name).ok_or((
|
||||||
|
ErrorCode::SourceNotFound,
|
||||||
|
"no event source is registered with the name '{}'".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let (action, action_key) = match (with_key, period) {
|
||||||
|
(false, None) => source.event(msgpack_event).map(|action| (action, None)),
|
||||||
|
(false, Some(period)) => source
|
||||||
|
.periodic_event(period, msgpack_event)
|
||||||
|
.map(|action| (action, None)),
|
||||||
|
(true, None) => source
|
||||||
|
.keyed_event(msgpack_event)
|
||||||
|
.map(|(action, key)| (action, Some(key))),
|
||||||
|
(true, Some(period)) => source
|
||||||
|
.keyed_periodic_event(period, msgpack_event)
|
||||||
|
.map(|(action, key)| (action, Some(key))),
|
||||||
|
}
|
||||||
|
.map_err(|_| {
|
||||||
|
(
|
||||||
|
ErrorCode::InvalidMessage,
|
||||||
|
format!(
|
||||||
|
"the event could not be deserialized as type '{}'",
|
||||||
|
source.event_type_name()
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let key_id = action_key.map(|action_key| {
|
||||||
|
// Free stale keys from the registry.
|
||||||
|
key_registry.remove_expired_keys(simulation.time());
|
||||||
|
|
||||||
|
if period.is_some() {
|
||||||
|
key_registry.insert_eternal_key(action_key)
|
||||||
|
} else {
|
||||||
|
key_registry.insert_key(action_key, deadline)
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
simulation.process(action);
|
||||||
|
|
||||||
|
Ok(key_id)
|
||||||
|
}();
|
||||||
|
|
||||||
|
ScheduleEventReply {
|
||||||
|
result: Some(match reply {
|
||||||
|
Ok(Some(key_id)) => {
|
||||||
|
let (subkey1, subkey2) = key_id.into_raw_parts();
|
||||||
|
schedule_event_reply::Result::Key(EventKey {
|
||||||
|
subkey1: subkey1
|
||||||
|
.try_into()
|
||||||
|
.expect("action key index is too large to be serialized"),
|
||||||
|
subkey2,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
Ok(None) => schedule_event_reply::Result::Empty(()),
|
||||||
|
Err((code, message)) => schedule_event_reply::Result::Error(Error {
|
||||||
|
code: code as i32,
|
||||||
|
message,
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cancels a keyed event.
|
||||||
|
pub(crate) fn cancel_event(&mut self, request: CancelEventRequest) -> CancelEventReply {
|
||||||
|
let reply = move || -> Result<(), (ErrorCode, String)> {
|
||||||
|
let key = request.key.ok_or((
|
||||||
|
ErrorCode::MissingArgument,
|
||||||
|
"missing key argument".to_string(),
|
||||||
|
))?;
|
||||||
|
let subkey1: usize = key
|
||||||
|
.subkey1
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| (ErrorCode::InvalidKey, "invalid event key".to_string()))?;
|
||||||
|
let subkey2 = key.subkey2;
|
||||||
|
|
||||||
|
let (simulation, _, key_registry) = self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let key_id = KeyRegistryId::from_raw_parts(subkey1, subkey2);
|
||||||
|
|
||||||
|
key_registry.remove_expired_keys(simulation.time());
|
||||||
|
let key = key_registry.extract_key(key_id).ok_or((
|
||||||
|
ErrorCode::InvalidKey,
|
||||||
|
"invalid or expired event key".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
key.cancel();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}();
|
||||||
|
|
||||||
|
CancelEventReply {
|
||||||
|
result: Some(match reply {
|
||||||
|
Ok(()) => cancel_event_reply::Result::Empty(()),
|
||||||
|
Err((code, message)) => cancel_event_reply::Result::Error(Error {
|
||||||
|
code: code as i32,
|
||||||
|
message,
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts an event from an event source immediately, blocking until
|
||||||
|
/// completion.
|
||||||
|
///
|
||||||
|
/// Simulation time remains unchanged.
|
||||||
|
pub(crate) fn process_event(&mut self, request: ProcessEventRequest) -> ProcessEventReply {
|
||||||
|
let reply = move || -> Result<(), (ErrorCode, String)> {
|
||||||
|
let source_name = &request.source_name;
|
||||||
|
let msgpack_event = &request.event;
|
||||||
|
|
||||||
|
let (simulation, registry, _) = self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let source = registry.get_event_source_mut(source_name).ok_or((
|
||||||
|
ErrorCode::SourceNotFound,
|
||||||
|
"no source is registered with the name '{}'".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let event = source.event(msgpack_event).map_err(|_| {
|
||||||
|
(
|
||||||
|
ErrorCode::InvalidMessage,
|
||||||
|
format!(
|
||||||
|
"the event could not be deserialized as type '{}'",
|
||||||
|
source.event_type_name()
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
simulation.process(event);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}();
|
||||||
|
|
||||||
|
ProcessEventReply {
|
||||||
|
result: Some(match reply {
|
||||||
|
Ok(()) => process_event_reply::Result::Empty(()),
|
||||||
|
Err((code, message)) => process_event_reply::Result::Error(Error {
|
||||||
|
code: code as i32,
|
||||||
|
message,
|
||||||
|
}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts an event from an event source immediately, blocking until
|
||||||
|
/// completion.
|
||||||
|
///
|
||||||
|
/// Simulation time remains unchanged.
|
||||||
|
pub(crate) fn process_query(&mut self, request: ProcessQueryRequest) -> ProcessQueryReply {
|
||||||
|
let reply = move || -> Result<Vec<Vec<u8>>, (ErrorCode, String)> {
|
||||||
|
let source_name = &request.source_name;
|
||||||
|
let msgpack_request = &request.request;
|
||||||
|
|
||||||
|
let (simulation, registry, _) = self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let source = registry.get_query_source_mut(source_name).ok_or((
|
||||||
|
ErrorCode::SourceNotFound,
|
||||||
|
"no source is registered with the name '{}'".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let (query, mut promise) = source.query(msgpack_request).map_err(|_| {
|
||||||
|
(
|
||||||
|
ErrorCode::InvalidMessage,
|
||||||
|
format!(
|
||||||
|
"the request could not be deserialized as type '{}'",
|
||||||
|
source.request_type_name()
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
simulation.process(query);
|
||||||
|
|
||||||
|
let replies = promise.take_collect().ok_or((
|
||||||
|
ErrorCode::InternalError,
|
||||||
|
"a reply to the query was expected but none was available".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
replies.map_err(|_| {
|
||||||
|
(
|
||||||
|
ErrorCode::InvalidMessage,
|
||||||
|
format!(
|
||||||
|
"the reply could not be serialized as type '{}'",
|
||||||
|
source.reply_type_name()
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}();
|
||||||
|
|
||||||
|
match reply {
|
||||||
|
Ok(replies) => ProcessQueryReply {
|
||||||
|
replies,
|
||||||
|
result: Some(process_query_reply::Result::Empty(())),
|
||||||
|
},
|
||||||
|
Err((code, message)) => ProcessQueryReply {
|
||||||
|
replies: Vec::new(),
|
||||||
|
result: Some(process_query_reply::Result::Error(Error {
|
||||||
|
code: code as i32,
|
||||||
|
message,
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read all events from an event sink.
|
||||||
|
pub(crate) fn read_events(&mut self, request: ReadEventsRequest) -> ReadEventsReply {
|
||||||
|
let reply = move || -> Result<Vec<Vec<u8>>, (ErrorCode, String)> {
|
||||||
|
let sink_name = &request.sink_name;
|
||||||
|
|
||||||
|
let (_, registry, _) = self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let sink = registry.get_event_sink_mut(sink_name).ok_or((
|
||||||
|
ErrorCode::SinkNotFound,
|
||||||
|
"no sink is registered with the name '{}'".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
sink.collect().map_err(|_| {
|
||||||
|
(
|
||||||
|
ErrorCode::InvalidMessage,
|
||||||
|
format!(
|
||||||
|
"the event could not be serialized from type '{}'",
|
||||||
|
sink.event_type_name()
|
||||||
|
),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}();
|
||||||
|
|
||||||
|
match reply {
|
||||||
|
Ok(events) => ReadEventsReply {
|
||||||
|
events,
|
||||||
|
result: Some(read_events_reply::Result::Empty(())),
|
||||||
|
},
|
||||||
|
Err((code, message)) => ReadEventsReply {
|
||||||
|
events: Vec::new(),
|
||||||
|
result: Some(read_events_reply::Result::Error(Error {
|
||||||
|
code: code as i32,
|
||||||
|
message,
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Opens an event sink.
|
||||||
|
pub(crate) fn open_sink(&mut self, request: OpenSinkRequest) -> OpenSinkReply {
|
||||||
|
let reply = move || -> Result<(), (ErrorCode, String)> {
|
||||||
|
let sink_name = &request.sink_name;
|
||||||
|
|
||||||
|
let (_, registry, _) = self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let sink = registry.get_event_sink_mut(sink_name).ok_or((
|
||||||
|
ErrorCode::SinkNotFound,
|
||||||
|
"no sink is registered with the name '{}'".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
sink.open();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}();
|
||||||
|
|
||||||
|
match reply {
|
||||||
|
Ok(()) => OpenSinkReply {
|
||||||
|
result: Some(open_sink_reply::Result::Empty(())),
|
||||||
|
},
|
||||||
|
Err((code, message)) => OpenSinkReply {
|
||||||
|
result: Some(open_sink_reply::Result::Error(Error {
|
||||||
|
code: code as i32,
|
||||||
|
message,
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Closes an event sink.
|
||||||
|
pub(crate) fn close_sink(&mut self, request: CloseSinkRequest) -> CloseSinkReply {
|
||||||
|
let reply = move || -> Result<(), (ErrorCode, String)> {
|
||||||
|
let sink_name = &request.sink_name;
|
||||||
|
|
||||||
|
let (_, registry, _) = self.sim_context.as_mut().ok_or((
|
||||||
|
ErrorCode::SimulationNotStarted,
|
||||||
|
"the simulation was not started".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let sink = registry.get_event_sink_mut(sink_name).ok_or((
|
||||||
|
ErrorCode::SinkNotFound,
|
||||||
|
"no sink is registered with the name '{}'".to_string(),
|
||||||
|
))?;
|
||||||
|
|
||||||
|
sink.close();
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}();
|
||||||
|
|
||||||
|
match reply {
|
||||||
|
Ok(()) => CloseSinkReply {
|
||||||
|
result: Some(close_sink_reply::Result::Empty(())),
|
||||||
|
},
|
||||||
|
Err((code, message)) => CloseSinkReply {
|
||||||
|
result: Some(close_sink_reply::Result::Error(Error {
|
||||||
|
code: code as i32,
|
||||||
|
message,
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for SimulationService {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.debug_struct("SimulationService").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct InvalidRequest {
|
||||||
|
description: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for InvalidRequest {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
f.write_str(&self.description)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl error::Error for InvalidRequest {}
|
||||||
|
|
||||||
|
/// Attempts a cast from a `MonotonicTime` to a protobuf `Timestamp`.
|
||||||
|
///
|
||||||
|
/// This will fail if the time is outside the protobuf-specified range for
|
||||||
|
/// timestamps (0001-01-01 00:00:00 to 9999-12-31 23:59:59).
|
||||||
|
fn monotonic_to_timestamp(monotonic_time: MonotonicTime) -> Option<Timestamp> {
|
||||||
|
// Unix timestamp for 0001-01-01 00:00:00, the minimum accepted by
|
||||||
|
// protobuf's specification for the `Timestamp` type.
|
||||||
|
const MIN_SECS: i64 = -62135596800;
|
||||||
|
// Unix timestamp for 9999-12-31 23:59:59, the maximum accepted by
|
||||||
|
// protobuf's specification for the `Timestamp` type.
|
||||||
|
const MAX_SECS: i64 = 253402300799;
|
||||||
|
|
||||||
|
let secs = monotonic_time.as_secs();
|
||||||
|
if !(MIN_SECS..=MAX_SECS).contains(&secs) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Timestamp {
|
||||||
|
seconds: secs,
|
||||||
|
nanos: monotonic_time.subsec_nanos() as i32,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts a cast from a protobuf `Timestamp` to a `MonotonicTime`.
|
||||||
|
///
|
||||||
|
/// This should never fail provided that the `Timestamp` complies with the
|
||||||
|
/// protobuf specification. It can only fail if the nanosecond part is negative
|
||||||
|
/// or greater than 999'999'999.
|
||||||
|
fn timestamp_to_monotonic(timestamp: Timestamp) -> Option<MonotonicTime> {
|
||||||
|
let nanos: u32 = timestamp.nanos.try_into().ok()?;
|
||||||
|
|
||||||
|
MonotonicTime::new(timestamp.seconds, nanos)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts a cast from a protobuf `Duration` to a `std::time::Duration`.
|
||||||
|
///
|
||||||
|
/// If the `Duration` complies with the protobuf specification, this can only
|
||||||
|
/// fail if the duration is negative.
|
||||||
|
fn to_positive_duration(duration: prost_types::Duration) -> Option<Duration> {
|
||||||
|
if duration.seconds < 0 || duration.nanos < 0 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Duration::new(
|
||||||
|
duration.seconds as u64,
|
||||||
|
duration.nanos as u32,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Attempts a cast from a protobuf `Duration` to a strictly positive
|
||||||
|
/// `std::time::Duration`.
|
||||||
|
///
|
||||||
|
/// If the `Duration` complies with the protobuf specification, this can only
|
||||||
|
/// fail if the duration is negative or null.
|
||||||
|
fn to_strictly_positive_duration(duration: prost_types::Duration) -> Option<Duration> {
|
||||||
|
if duration.seconds < 0 || duration.nanos < 0 || (duration.seconds == 0 && duration.nanos == 0)
|
||||||
|
{
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Duration::new(
|
||||||
|
duration.seconds as u64,
|
||||||
|
duration.nanos as u32,
|
||||||
|
))
|
||||||
|
}
|
82
asynchronix/src/rpc/wasm.rs
Normal file
82
asynchronix/src/rpc/wasm.rs
Normal file
@ -0,0 +1,82 @@
|
|||||||
|
//! WASM simulation service.
|
||||||
|
//!
|
||||||
|
//! This module provides [`WasmSimulationService`], a thin wrapper over a
|
||||||
|
//! [`SimulationService`] that can be use from JavaScript.
|
||||||
|
//!
|
||||||
|
//! Although it is readily possible to use a
|
||||||
|
//! [`Simulation`](crate::simulation::Simulation) object from WASM,
|
||||||
|
//! [`WasmSimulationService`] goes further by exposing the complete simulation
|
||||||
|
//! API to JavaScript through protobuf.
|
||||||
|
//!
|
||||||
|
//! Keep in mind that WASM only supports single-threaded execution and therefore
|
||||||
|
//! any simulation bench compiled to WASM should instantiate simulations with
|
||||||
|
//! either [`SimInit::new()`](crate::simulation::SimInit::new) or
|
||||||
|
//! [`SimInit::with_num_threads(1)`](crate::simulation::SimInit::with_num_threads),
|
||||||
|
//! failing which the simulation will panic upon initialization.
|
||||||
|
//!
|
||||||
|
//! [`WasmSimulationService`] is exported to the JavaScript namespace as
|
||||||
|
//! `SimulationService`, and [`WasmSimulationService::process_request`] as
|
||||||
|
//! `SimulationService.processRequest`.
|
||||||
|
|
||||||
|
use wasm_bindgen::prelude::*;
|
||||||
|
|
||||||
|
use super::{EndpointRegistry, SimulationService};
|
||||||
|
use crate::simulation::SimInit;
|
||||||
|
|
||||||
|
/// A simulation service that can be used from JavaScript.
|
||||||
|
///
|
||||||
|
/// This would typically be used by implementing a `run` function in Rust and
|
||||||
|
/// export it to WASM:
|
||||||
|
///
|
||||||
|
/// ```no_run
|
||||||
|
/// #[wasm_bindgen]
|
||||||
|
/// pub fn run() -> WasmSimulationService {
|
||||||
|
/// WasmSimulationService::new(my_custom_bench_generator)
|
||||||
|
/// }
|
||||||
|
/// ```
|
||||||
|
///
|
||||||
|
/// which can then be used on the JS side to create a `SimulationService` as a
|
||||||
|
/// JS object, e.g. with:
|
||||||
|
///
|
||||||
|
/// ```js
|
||||||
|
/// const simu = run();
|
||||||
|
///
|
||||||
|
/// // ...build a protobuf request and encode it as a `Uint8Array`...
|
||||||
|
///
|
||||||
|
/// const reply = simu.processRequest(myRequest);
|
||||||
|
///
|
||||||
|
/// // ...decode the protobuf reply...
|
||||||
|
/// ```
|
||||||
|
#[wasm_bindgen(js_name = SimulationService)]
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct WasmSimulationService(SimulationService);
|
||||||
|
|
||||||
|
#[wasm_bindgen(js_class = SimulationService)]
|
||||||
|
impl WasmSimulationService {
|
||||||
|
/// Processes a protobuf-encoded `AnyRequest` message and returns a
|
||||||
|
/// protobuf-encoded reply.
|
||||||
|
///
|
||||||
|
/// For the Protocol Buffer definitions, see the `simulation.proto` file.
|
||||||
|
#[wasm_bindgen(js_name = processRequest)]
|
||||||
|
pub fn process_request(&mut self, request: &[u8]) -> Result<Box<[u8]>, JsError> {
|
||||||
|
self.0
|
||||||
|
.process_request(request)
|
||||||
|
.map(|reply| reply.into_boxed_slice())
|
||||||
|
.map_err(|e| JsError::new(&e.to_string()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl WasmSimulationService {
|
||||||
|
/// Creates a new `SimulationService` without any active simulation.
|
||||||
|
///
|
||||||
|
/// The argument is a closure that is called every time the simulation is
|
||||||
|
/// (re)started by the remote client. It must create a new `SimInit` object
|
||||||
|
/// complemented by a registry that exposes the public event and query
|
||||||
|
/// interface.
|
||||||
|
pub fn new<F>(sim_gen: F) -> Self
|
||||||
|
where
|
||||||
|
F: FnMut() -> (SimInit, EndpointRegistry) + Send + 'static,
|
||||||
|
{
|
||||||
|
Self(SimulationService::new(sim_gen))
|
||||||
|
}
|
||||||
|
}
|
@ -14,8 +14,9 @@
|
|||||||
//! using the [`Address`]es of the target models,
|
//! using the [`Address`]es of the target models,
|
||||||
//! 3. instantiation of a [`SimInit`] simulation builder and migration of all
|
//! 3. instantiation of a [`SimInit`] simulation builder and migration of all
|
||||||
//! models and mailboxes to the builder with [`SimInit::add_model()`],
|
//! models and mailboxes to the builder with [`SimInit::add_model()`],
|
||||||
//! 4. initialization of a [`Simulation`] instance with [`SimInit::init()`] or
|
//! 4. initialization of a [`Simulation`] instance with [`SimInit::init()`],
|
||||||
//! [`SimInit::init_with_clock()`],
|
//! possibly preceded by the setup of a custom clock with
|
||||||
|
//! [`SimInit::set_clock()`],
|
||||||
//! 5. discrete-time simulation, which typically involves scheduling events and
|
//! 5. discrete-time simulation, which typically involves scheduling events and
|
||||||
//! incrementing simulation time while observing the models outputs.
|
//! incrementing simulation time while observing the models outputs.
|
||||||
//!
|
//!
|
||||||
@ -76,7 +77,7 @@
|
|||||||
//! such pathological deadlocks and the "expected" deadlock that occurs when all
|
//! such pathological deadlocks and the "expected" deadlock that occurs when all
|
||||||
//! events in a given time slice have completed and all models are starved on an
|
//! events in a given time slice have completed and all models are starved on an
|
||||||
//! empty mailbox. Consequently, blocking method such as [`SimInit::init()`],
|
//! empty mailbox. Consequently, blocking method such as [`SimInit::init()`],
|
||||||
//! [`Simulation::step()`], [`Simulation::send_event()`], etc., will return
|
//! [`Simulation::step()`], [`Simulation::process_event()`], etc., will return
|
||||||
//! without error after a pathological deadlock, leaving the user responsible
|
//! without error after a pathological deadlock, leaving the user responsible
|
||||||
//! for inferring the deadlock from the behavior of the simulation in the next
|
//! for inferring the deadlock from the behavior of the simulation in the next
|
||||||
//! steps. This is obviously not ideal, but is hopefully only a temporary state
|
//! steps. This is obviously not ideal, but is hopefully only a temporary state
|
||||||
@ -86,18 +87,20 @@
|
|||||||
//!
|
//!
|
||||||
//! Although uncommon, there is sometimes a need for connecting and/or
|
//! Although uncommon, there is sometimes a need for connecting and/or
|
||||||
//! disconnecting models after they have been migrated to the simulation.
|
//! disconnecting models after they have been migrated to the simulation.
|
||||||
//! Likewise, one may want to connect or disconnect an [`EventSlot`] or
|
//! Likewise, one may want to connect or disconnect an
|
||||||
//! [`EventStream`] after the simulation has been instantiated.
|
//! [`EventSlot`](crate::ports::EventSlot) or
|
||||||
|
//! [`EventBuffer`](crate::ports::EventBuffer) after the simulation has been
|
||||||
|
//! instantiated.
|
||||||
//!
|
//!
|
||||||
//! There is actually a very simple solution to this problem: since the
|
//! There is actually a very simple solution to this problem: since the
|
||||||
//! [`InputFn`](crate::model::InputFn) trait also matches closures of type
|
//! [`InputFn`] trait also matches closures of type `FnOnce(&mut impl Model)`,
|
||||||
//! `FnOnce(&mut impl Model)`, it is enough to invoke
|
//! it is enough to invoke [`Simulation::process_event()`] with a closure that
|
||||||
//! [`Simulation::send_event()`] with a closure that connects or disconnects a
|
//! connects or disconnects a port, such as:
|
||||||
//! port, such as:
|
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! # use asynchronix::model::{Model, Output};
|
//! # use asynchronix::model::{Context, Model};
|
||||||
//! # use asynchronix::time::{MonotonicTime, Scheduler};
|
//! # use asynchronix::ports::Output;
|
||||||
|
//! # use asynchronix::time::MonotonicTime;
|
||||||
//! # use asynchronix::simulation::{Mailbox, SimInit};
|
//! # use asynchronix::simulation::{Mailbox, SimInit};
|
||||||
//! # pub struct ModelA {
|
//! # pub struct ModelA {
|
||||||
//! # pub output: Output<i32>,
|
//! # pub output: Output<i32>,
|
||||||
@ -111,7 +114,7 @@
|
|||||||
//! # let modelA_addr = Mailbox::<ModelA>::new().address();
|
//! # let modelA_addr = Mailbox::<ModelA>::new().address();
|
||||||
//! # let modelB_addr = Mailbox::<ModelB>::new().address();
|
//! # let modelB_addr = Mailbox::<ModelB>::new().address();
|
||||||
//! # let mut simu = SimInit::new().init(MonotonicTime::EPOCH);
|
//! # let mut simu = SimInit::new().init(MonotonicTime::EPOCH);
|
||||||
//! simu.send_event(
|
//! simu.process_event(
|
||||||
//! |m: &mut ModelA| {
|
//! |m: &mut ModelA| {
|
||||||
//! m.output.connect(ModelB::input, modelB_addr);
|
//! m.output.connect(ModelB::input, modelB_addr);
|
||||||
//! },
|
//! },
|
||||||
@ -119,12 +122,17 @@
|
|||||||
//! &modelA_addr
|
//! &modelA_addr
|
||||||
//! );
|
//! );
|
||||||
//! ```
|
//! ```
|
||||||
mod endpoints;
|
|
||||||
mod mailbox;
|
mod mailbox;
|
||||||
|
mod scheduler;
|
||||||
mod sim_init;
|
mod sim_init;
|
||||||
|
|
||||||
pub use endpoints::{EventSlot, EventStream};
|
|
||||||
pub use mailbox::{Address, Mailbox};
|
pub use mailbox::{Address, Mailbox};
|
||||||
|
pub(crate) use scheduler::{
|
||||||
|
schedule_event_at_unchecked, schedule_keyed_event_at_unchecked,
|
||||||
|
schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked,
|
||||||
|
KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction, SchedulerQueue,
|
||||||
|
};
|
||||||
|
pub use scheduler::{Action, ActionKey, Deadline, SchedulingError};
|
||||||
pub use sim_init::SimInit;
|
pub use sim_init::SimInit;
|
||||||
|
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
@ -136,37 +144,33 @@ use std::time::Duration;
|
|||||||
use recycle_box::{coerce_box, RecycleBox};
|
use recycle_box::{coerce_box, RecycleBox};
|
||||||
|
|
||||||
use crate::executor::Executor;
|
use crate::executor::Executor;
|
||||||
use crate::model::{InputFn, Model, ReplierFn};
|
use crate::model::{Context, Model, SetupContext};
|
||||||
use crate::time::{
|
use crate::ports::{InputFn, ReplierFn};
|
||||||
self, Clock, Deadline, EventKey, MonotonicTime, NoClock, ScheduledEvent, SchedulerQueue,
|
use crate::time::{Clock, MonotonicTime, TearableAtomicTime};
|
||||||
SchedulingError, TearableAtomicTime,
|
use crate::util::seq_futures::SeqFuture;
|
||||||
};
|
|
||||||
use crate::util::futures::SeqFuture;
|
|
||||||
use crate::util::slot;
|
use crate::util::slot;
|
||||||
use crate::util::sync_cell::SyncCell;
|
use crate::util::sync_cell::{SyncCell, SyncCellReader};
|
||||||
|
|
||||||
/// Simulation environment.
|
/// Simulation environment.
|
||||||
///
|
///
|
||||||
/// A `Simulation` is created by calling
|
/// A `Simulation` is created by calling
|
||||||
/// [`SimInit::init()`](crate::simulation::SimInit::init) or
|
/// [`SimInit::init()`](crate::simulation::SimInit::init) on a simulation
|
||||||
/// [`SimInit::init_with_clock()`](crate::simulation::SimInit::init_with_clock)
|
/// initializer. It contains an asynchronous executor that runs all simulation
|
||||||
/// method on a simulation initializer. It contains an asynchronous executor
|
/// models added beforehand to [`SimInit`].
|
||||||
/// that runs all simulation models added beforehand to
|
|
||||||
/// [`SimInit`](crate::simulation::SimInit).
|
|
||||||
///
|
///
|
||||||
/// A [`Simulation`] object also manages an event scheduling queue and
|
/// A [`Simulation`] object also manages an event scheduling queue and
|
||||||
/// simulation time. The scheduling queue can be accessed from the simulation
|
/// simulation time. The scheduling queue can be accessed from the simulation
|
||||||
/// itself, but also from models via the optional
|
/// itself, but also from models via the optional
|
||||||
/// [`&Scheduler`](time::Scheduler) argument of input and replier port methods.
|
/// [`&Context`](crate::model::Context) argument of input and replier port methods.
|
||||||
/// Likewise, simulation time can be accessed with the [`Simulation::time()`]
|
/// Likewise, simulation time can be accessed with the [`Simulation::time()`]
|
||||||
/// method, or from models with the [`Scheduler::time()`](time::Scheduler::time)
|
/// method, or from models with the [`Context::time()`](crate::model::Context::time)
|
||||||
/// method.
|
/// method.
|
||||||
///
|
///
|
||||||
/// Events and queries can be scheduled immediately, *i.e.* for the current
|
/// Events and queries can be scheduled immediately, *i.e.* for the current
|
||||||
/// simulation time, using [`send_event()`](Simulation::send_event) and
|
/// simulation time, using [`process_event()`](Simulation::process_event) and
|
||||||
/// [`send_query()`](Simulation::send_query). Calling these methods will block
|
/// [`send_query()`](Simulation::process_query). Calling these methods will
|
||||||
/// until all computations triggered by such event or query have completed. In
|
/// block until all computations triggered by such event or query have
|
||||||
/// the case of queries, the response is returned.
|
/// completed. In the case of queries, the response is returned.
|
||||||
///
|
///
|
||||||
/// Events can also be scheduled at a future simulation time using one of the
|
/// Events can also be scheduled at a future simulation time using one of the
|
||||||
/// [`schedule_*()`](Simulation::schedule_event) method. These methods queue an
|
/// [`schedule_*()`](Simulation::schedule_event) method. These methods queue an
|
||||||
@ -177,7 +181,7 @@ use crate::util::sync_cell::SyncCell;
|
|||||||
///
|
///
|
||||||
/// 1. increment simulation time until that of the next scheduled event in
|
/// 1. increment simulation time until that of the next scheduled event in
|
||||||
/// chronological order, then
|
/// chronological order, then
|
||||||
/// 2. call [`Clock::synchronize()`](time::Clock::synchronize) which, unless the
|
/// 2. call [`Clock::synchronize()`](crate::time::Clock::synchronize) which, unless the
|
||||||
/// simulation is configured to run as fast as possible, blocks until the
|
/// simulation is configured to run as fast as possible, blocks until the
|
||||||
/// desired wall clock time, and finally
|
/// desired wall clock time, and finally
|
||||||
/// 3. run all computations scheduled for the new simulation time.
|
/// 3. run all computations scheduled for the new simulation time.
|
||||||
@ -193,32 +197,18 @@ pub struct Simulation {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Simulation {
|
impl Simulation {
|
||||||
/// Creates a new `Simulation`.
|
/// Creates a new `Simulation` with the specified clock.
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
executor: Executor,
|
executor: Executor,
|
||||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||||
time: SyncCell<TearableAtomicTime>,
|
time: SyncCell<TearableAtomicTime>,
|
||||||
|
clock: Box<dyn Clock + 'static>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
Self {
|
Self {
|
||||||
executor,
|
executor,
|
||||||
scheduler_queue,
|
scheduler_queue,
|
||||||
time,
|
time,
|
||||||
clock: Box::new(NoClock::new()),
|
clock,
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a new `Simulation` with the specified clock.
|
|
||||||
pub(crate) fn with_clock(
|
|
||||||
executor: Executor,
|
|
||||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
|
||||||
time: SyncCell<TearableAtomicTime>,
|
|
||||||
clock: impl Clock + 'static,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
executor,
|
|
||||||
scheduler_queue,
|
|
||||||
time,
|
|
||||||
clock: Box::new(clock),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,7 +221,7 @@ impl Simulation {
|
|||||||
/// that event as well as all other event scheduled for the same time.
|
/// that event as well as all other event scheduled for the same time.
|
||||||
///
|
///
|
||||||
/// Processing is gated by a (possibly blocking) call to
|
/// Processing is gated by a (possibly blocking) call to
|
||||||
/// [`Clock::synchronize()`](time::Clock::synchronize) on the configured
|
/// [`Clock::synchronize()`](crate::time::Clock::synchronize) on the configured
|
||||||
/// simulation clock. This method blocks until all newly processed events
|
/// simulation clock. This method blocks until all newly processed events
|
||||||
/// have completed.
|
/// have completed.
|
||||||
pub fn step(&mut self) {
|
pub fn step(&mut self) {
|
||||||
@ -267,6 +257,37 @@ impl Simulation {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Schedules an action at a future time.
|
||||||
|
///
|
||||||
|
/// An error is returned if the specified time is not in the future of the
|
||||||
|
/// current simulation time.
|
||||||
|
///
|
||||||
|
/// If multiple actions send events at the same simulation time to the same
|
||||||
|
/// model, these events are guaranteed to be processed according to the
|
||||||
|
/// scheduling order of the actions.
|
||||||
|
pub fn schedule(
|
||||||
|
&mut self,
|
||||||
|
deadline: impl Deadline,
|
||||||
|
action: Action,
|
||||||
|
) -> Result<(), SchedulingError> {
|
||||||
|
let now = self.time();
|
||||||
|
let time = deadline.into_time(now);
|
||||||
|
if now >= time {
|
||||||
|
return Err(SchedulingError::InvalidScheduledTime);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
|
||||||
|
|
||||||
|
// The channel ID is set to the same value for all actions. This
|
||||||
|
// ensures that the relative scheduling order of all source events is
|
||||||
|
// preserved, which is important if some of them target the same models.
|
||||||
|
// The value 0 was chosen as it prevents collisions with channel IDs as
|
||||||
|
// the latter are always non-zero.
|
||||||
|
scheduler_queue.insert((time, 0), action);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Schedules an event at a future time.
|
/// Schedules an event at a future time.
|
||||||
///
|
///
|
||||||
/// An error is returned if the specified time is not in the future of the
|
/// An error is returned if the specified time is not in the future of the
|
||||||
@ -275,7 +296,7 @@ impl Simulation {
|
|||||||
/// Events scheduled for the same time and targeting the same model are
|
/// Events scheduled for the same time and targeting the same model are
|
||||||
/// guaranteed to be processed according to the scheduling order.
|
/// guaranteed to be processed according to the scheduling order.
|
||||||
///
|
///
|
||||||
/// See also: [`time::Scheduler::schedule_event`].
|
/// See also: [`Context::schedule_event`](crate::model::Context::schedule_event).
|
||||||
pub fn schedule_event<M, F, T, S>(
|
pub fn schedule_event<M, F, T, S>(
|
||||||
&mut self,
|
&mut self,
|
||||||
deadline: impl Deadline,
|
deadline: impl Deadline,
|
||||||
@ -294,7 +315,7 @@ impl Simulation {
|
|||||||
if now >= time {
|
if now >= time {
|
||||||
return Err(SchedulingError::InvalidScheduledTime);
|
return Err(SchedulingError::InvalidScheduledTime);
|
||||||
}
|
}
|
||||||
time::schedule_event_at_unchecked(time, func, arg, address.into().0, &self.scheduler_queue);
|
schedule_event_at_unchecked(time, func, arg, address.into().0, &self.scheduler_queue);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -307,14 +328,14 @@ impl Simulation {
|
|||||||
/// Events scheduled for the same time and targeting the same model are
|
/// Events scheduled for the same time and targeting the same model are
|
||||||
/// guaranteed to be processed according to the scheduling order.
|
/// guaranteed to be processed according to the scheduling order.
|
||||||
///
|
///
|
||||||
/// See also: [`time::Scheduler::schedule_keyed_event`].
|
/// See also: [`Context::schedule_keyed_event`](crate::model::Context::schedule_keyed_event).
|
||||||
pub fn schedule_keyed_event<M, F, T, S>(
|
pub fn schedule_keyed_event<M, F, T, S>(
|
||||||
&mut self,
|
&mut self,
|
||||||
deadline: impl Deadline,
|
deadline: impl Deadline,
|
||||||
func: F,
|
func: F,
|
||||||
arg: T,
|
arg: T,
|
||||||
address: impl Into<Address<M>>,
|
address: impl Into<Address<M>>,
|
||||||
) -> Result<EventKey, SchedulingError>
|
) -> Result<ActionKey, SchedulingError>
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
@ -326,7 +347,7 @@ impl Simulation {
|
|||||||
if now >= time {
|
if now >= time {
|
||||||
return Err(SchedulingError::InvalidScheduledTime);
|
return Err(SchedulingError::InvalidScheduledTime);
|
||||||
}
|
}
|
||||||
let event_key = time::schedule_keyed_event_at_unchecked(
|
let event_key = schedule_keyed_event_at_unchecked(
|
||||||
time,
|
time,
|
||||||
func,
|
func,
|
||||||
arg,
|
arg,
|
||||||
@ -345,7 +366,7 @@ impl Simulation {
|
|||||||
/// Events scheduled for the same time and targeting the same model are
|
/// Events scheduled for the same time and targeting the same model are
|
||||||
/// guaranteed to be processed according to the scheduling order.
|
/// guaranteed to be processed according to the scheduling order.
|
||||||
///
|
///
|
||||||
/// See also: [`time::Scheduler::schedule_periodic_event`].
|
/// See also: [`Context::schedule_periodic_event`](crate::model::Context::schedule_periodic_event).
|
||||||
pub fn schedule_periodic_event<M, F, T, S>(
|
pub fn schedule_periodic_event<M, F, T, S>(
|
||||||
&mut self,
|
&mut self,
|
||||||
deadline: impl Deadline,
|
deadline: impl Deadline,
|
||||||
@ -368,7 +389,7 @@ impl Simulation {
|
|||||||
if period.is_zero() {
|
if period.is_zero() {
|
||||||
return Err(SchedulingError::NullRepetitionPeriod);
|
return Err(SchedulingError::NullRepetitionPeriod);
|
||||||
}
|
}
|
||||||
time::schedule_periodic_event_at_unchecked(
|
schedule_periodic_event_at_unchecked(
|
||||||
time,
|
time,
|
||||||
period,
|
period,
|
||||||
func,
|
func,
|
||||||
@ -389,7 +410,7 @@ impl Simulation {
|
|||||||
/// Events scheduled for the same time and targeting the same model are
|
/// Events scheduled for the same time and targeting the same model are
|
||||||
/// guaranteed to be processed according to the scheduling order.
|
/// guaranteed to be processed according to the scheduling order.
|
||||||
///
|
///
|
||||||
/// See also: [`time::Scheduler::schedule_keyed_periodic_event`].
|
/// See also: [`Context::schedule_keyed_periodic_event`](crate::model::Context::schedule_keyed_periodic_event).
|
||||||
pub fn schedule_keyed_periodic_event<M, F, T, S>(
|
pub fn schedule_keyed_periodic_event<M, F, T, S>(
|
||||||
&mut self,
|
&mut self,
|
||||||
deadline: impl Deadline,
|
deadline: impl Deadline,
|
||||||
@ -397,7 +418,7 @@ impl Simulation {
|
|||||||
func: F,
|
func: F,
|
||||||
arg: T,
|
arg: T,
|
||||||
address: impl Into<Address<M>>,
|
address: impl Into<Address<M>>,
|
||||||
) -> Result<EventKey, SchedulingError>
|
) -> Result<ActionKey, SchedulingError>
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
@ -412,7 +433,7 @@ impl Simulation {
|
|||||||
if period.is_zero() {
|
if period.is_zero() {
|
||||||
return Err(SchedulingError::NullRepetitionPeriod);
|
return Err(SchedulingError::NullRepetitionPeriod);
|
||||||
}
|
}
|
||||||
let event_key = time::schedule_periodic_keyed_event_at_unchecked(
|
let event_key = schedule_periodic_keyed_event_at_unchecked(
|
||||||
time,
|
time,
|
||||||
period,
|
period,
|
||||||
func,
|
func,
|
||||||
@ -424,10 +445,19 @@ impl Simulation {
|
|||||||
Ok(event_key)
|
Ok(event_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sends and processes an event, blocking until completion.
|
/// Processes an action immediately, blocking until completion.
|
||||||
|
///
|
||||||
|
/// Simulation time remains unchanged. The periodicity of the action, if
|
||||||
|
/// any, is ignored.
|
||||||
|
pub fn process(&mut self, action: Action) {
|
||||||
|
action.spawn_and_forget(&self.executor);
|
||||||
|
self.executor.run();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Processes an event immediately, blocking until completion.
|
||||||
///
|
///
|
||||||
/// Simulation time remains unchanged.
|
/// Simulation time remains unchanged.
|
||||||
pub fn send_event<M, F, T, S>(&mut self, func: F, arg: T, address: impl Into<Address<M>>)
|
pub fn process_event<M, F, T, S>(&mut self, func: F, arg: T, address: impl Into<Address<M>>)
|
||||||
where
|
where
|
||||||
M: Model,
|
M: Model,
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
@ -454,10 +484,10 @@ impl Simulation {
|
|||||||
self.executor.run();
|
self.executor.run();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sends and processes a query, blocking until completion.
|
/// Processes a query immediately, blocking until completion.
|
||||||
///
|
///
|
||||||
/// Simulation time remains unchanged.
|
/// Simulation time remains unchanged.
|
||||||
pub fn send_query<M, F, T, R, S>(
|
pub fn process_query<M, F, T, R, S>(
|
||||||
&mut self,
|
&mut self,
|
||||||
func: F,
|
func: F,
|
||||||
arg: T,
|
arg: T,
|
||||||
@ -497,36 +527,34 @@ impl Simulation {
|
|||||||
reply_reader.try_read().map_err(|_| QueryError {})
|
reply_reader.try_read().map_err(|_| QueryError {})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Advances simulation time to that of the next scheduled event if its
|
/// Advances simulation time to that of the next scheduled action if its
|
||||||
/// scheduling time does not exceed the specified bound, processing that
|
/// scheduling time does not exceed the specified bound, processing that
|
||||||
/// event as well as all other events scheduled for the same time.
|
/// action as well as all other actions scheduled for the same time.
|
||||||
///
|
///
|
||||||
/// If at least one event was found that satisfied the time bound, the
|
/// If at least one action was found that satisfied the time bound, the
|
||||||
/// corresponding new simulation time is returned.
|
/// corresponding new simulation time is returned.
|
||||||
fn step_to_next_bounded(&mut self, upper_time_bound: MonotonicTime) -> Option<MonotonicTime> {
|
fn step_to_next_bounded(&mut self, upper_time_bound: MonotonicTime) -> Option<MonotonicTime> {
|
||||||
// Function pulling the next event. If the event is periodic, it is
|
// Function pulling the next action. If the action is periodic, it is
|
||||||
// immediately re-scheduled.
|
// immediately re-scheduled.
|
||||||
fn pull_next_event(
|
fn pull_next_action(scheduler_queue: &mut MutexGuard<SchedulerQueue>) -> Action {
|
||||||
scheduler_queue: &mut MutexGuard<SchedulerQueue>,
|
let ((time, channel_id), action) = scheduler_queue.pull().unwrap();
|
||||||
) -> Box<dyn ScheduledEvent> {
|
if let Some((action_clone, period)) = action.next() {
|
||||||
let ((time, channel_id), event) = scheduler_queue.pull().unwrap();
|
scheduler_queue.insert((time + period, channel_id), action_clone);
|
||||||
if let Some((event_clone, period)) = event.next() {
|
|
||||||
scheduler_queue.insert((time + period, channel_id), event_clone);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
event
|
action
|
||||||
}
|
}
|
||||||
|
|
||||||
// Closure returning the next key which time stamp is no older than the
|
// Closure returning the next key which time stamp is no older than the
|
||||||
// upper bound, if any. Cancelled events are pulled and discarded.
|
// upper bound, if any. Cancelled actions are pulled and discarded.
|
||||||
let peek_next_key = |scheduler_queue: &mut MutexGuard<SchedulerQueue>| {
|
let peek_next_key = |scheduler_queue: &mut MutexGuard<SchedulerQueue>| {
|
||||||
loop {
|
loop {
|
||||||
match scheduler_queue.peek() {
|
match scheduler_queue.peek() {
|
||||||
Some((&k, t)) if k.0 <= upper_time_bound => {
|
Some((&key, action)) if key.0 <= upper_time_bound => {
|
||||||
if !t.is_cancelled() {
|
if !action.is_cancelled() {
|
||||||
break Some(k);
|
break Some(key);
|
||||||
}
|
}
|
||||||
// Discard cancelled events.
|
// Discard cancelled actions.
|
||||||
scheduler_queue.pull();
|
scheduler_queue.pull();
|
||||||
}
|
}
|
||||||
_ => break None,
|
_ => break None,
|
||||||
@ -540,37 +568,37 @@ impl Simulation {
|
|||||||
self.time.write(current_key.0);
|
self.time.write(current_key.0);
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let event = pull_next_event(&mut scheduler_queue);
|
let action = pull_next_action(&mut scheduler_queue);
|
||||||
let mut next_key = peek_next_key(&mut scheduler_queue);
|
let mut next_key = peek_next_key(&mut scheduler_queue);
|
||||||
if next_key != Some(current_key) {
|
if next_key != Some(current_key) {
|
||||||
// Since there are no other events targeting the same mailbox
|
// Since there are no other actions targeting the same mailbox
|
||||||
// and the same time, the event is spawned immediately.
|
// and the same time, the action is spawned immediately.
|
||||||
event.spawn_and_forget(&self.executor);
|
action.spawn_and_forget(&self.executor);
|
||||||
} else {
|
} else {
|
||||||
// To ensure that their relative order of execution is
|
// To ensure that their relative order of execution is
|
||||||
// preserved, all event targeting the same mailbox are executed
|
// preserved, all actions targeting the same mailbox are
|
||||||
// sequentially within a single compound future.
|
// executed sequentially within a single compound future.
|
||||||
let mut event_sequence = SeqFuture::new();
|
let mut action_sequence = SeqFuture::new();
|
||||||
event_sequence.push(event.into_future());
|
action_sequence.push(action.into_future());
|
||||||
loop {
|
loop {
|
||||||
let event = pull_next_event(&mut scheduler_queue);
|
let action = pull_next_action(&mut scheduler_queue);
|
||||||
event_sequence.push(event.into_future());
|
action_sequence.push(action.into_future());
|
||||||
next_key = peek_next_key(&mut scheduler_queue);
|
next_key = peek_next_key(&mut scheduler_queue);
|
||||||
if next_key != Some(current_key) {
|
if next_key != Some(current_key) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Spawn a compound future that sequentially polls all events
|
// Spawn a compound future that sequentially polls all actions
|
||||||
// targeting the same mailbox.
|
// targeting the same mailbox.
|
||||||
self.executor.spawn_and_forget(event_sequence);
|
self.executor.spawn_and_forget(action_sequence);
|
||||||
}
|
}
|
||||||
|
|
||||||
current_key = match next_key {
|
current_key = match next_key {
|
||||||
// If the next event is scheduled at the same time, update the
|
// If the next action is scheduled at the same time, update the
|
||||||
// key and continue.
|
// key and continue.
|
||||||
Some(k) if k.0 == current_key.0 => k,
|
Some(k) if k.0 == current_key.0 => k,
|
||||||
// Otherwise wait until all events have completed and return.
|
// Otherwise wait until all actions have completed and return.
|
||||||
_ => {
|
_ => {
|
||||||
drop(scheduler_queue); // make sure the queue's mutex is released.
|
drop(scheduler_queue); // make sure the queue's mutex is released.
|
||||||
let current_time = current_key.0;
|
let current_time = current_key.0;
|
||||||
@ -584,10 +612,10 @@ impl Simulation {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Iteratively advances simulation time and processes all events scheduled
|
/// Iteratively advances simulation time and processes all actions scheduled
|
||||||
/// up to the specified target time.
|
/// up to the specified target time.
|
||||||
///
|
///
|
||||||
/// Once the method returns it is guaranteed that (i) all events scheduled
|
/// Once the method returns it is guaranteed that (i) all actions scheduled
|
||||||
/// up to the specified target time have completed and (ii) the final
|
/// up to the specified target time have completed and (ii) the final
|
||||||
/// simulation time matches the target time.
|
/// simulation time matches the target time.
|
||||||
///
|
///
|
||||||
@ -598,7 +626,7 @@ impl Simulation {
|
|||||||
match self.step_to_next_bounded(target_time) {
|
match self.step_to_next_bounded(target_time) {
|
||||||
// The target time was reached exactly.
|
// The target time was reached exactly.
|
||||||
Some(t) if t == target_time => return,
|
Some(t) if t == target_time => return,
|
||||||
// No events are scheduled before or at the target time.
|
// No actions are scheduled before or at the target time.
|
||||||
None => {
|
None => {
|
||||||
// Update the simulation time.
|
// Update the simulation time.
|
||||||
self.time.write(target_time);
|
self.time.write(target_time);
|
||||||
@ -633,3 +661,26 @@ impl fmt::Display for QueryError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Error for QueryError {}
|
impl Error for QueryError {}
|
||||||
|
|
||||||
|
/// Adds a model and its mailbox to the simulation bench.
|
||||||
|
pub(crate) fn add_model<M: Model>(
|
||||||
|
mut model: M,
|
||||||
|
mailbox: Mailbox<M>,
|
||||||
|
name: String,
|
||||||
|
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||||
|
time: SyncCellReader<TearableAtomicTime>,
|
||||||
|
executor: &Executor,
|
||||||
|
) {
|
||||||
|
let sender = mailbox.0.sender();
|
||||||
|
|
||||||
|
let context = Context::new(name, sender, scheduler_queue, time);
|
||||||
|
let setup_context = SetupContext::new(&mailbox, &context, executor);
|
||||||
|
|
||||||
|
model.setup(&setup_context);
|
||||||
|
|
||||||
|
let mut receiver = mailbox.0;
|
||||||
|
executor.spawn_and_forget(async move {
|
||||||
|
let mut model = model.init(&context).await.0;
|
||||||
|
while receiver.recv(&mut model, &context).await.is_ok() {}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
@ -1,69 +0,0 @@
|
|||||||
use std::fmt;
|
|
||||||
use std::sync::{Arc, Mutex, TryLockError, TryLockResult};
|
|
||||||
|
|
||||||
use crate::util::spsc_queue;
|
|
||||||
|
|
||||||
/// An iterator that returns all events that were broadcast by an output port.
|
|
||||||
///
|
|
||||||
/// Events are returned in first-in-first-out order. Note that even if the
|
|
||||||
/// iterator returns `None`, it may still produce more items after simulation
|
|
||||||
/// time is incremented.
|
|
||||||
pub struct EventStream<T> {
|
|
||||||
consumer: spsc_queue::Consumer<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> EventStream<T> {
|
|
||||||
/// Creates a new `EventStream`.
|
|
||||||
pub(crate) fn new(consumer: spsc_queue::Consumer<T>) -> Self {
|
|
||||||
Self { consumer }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Iterator for EventStream<T> {
|
|
||||||
type Item = T;
|
|
||||||
|
|
||||||
fn next(&mut self) -> Option<Self::Item> {
|
|
||||||
self.consumer.pop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> fmt::Debug for EventStream<T> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
f.debug_struct("EventStream").finish_non_exhaustive()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A single-value slot that holds the last event that was broadcast by an
|
|
||||||
/// output port.
|
|
||||||
pub struct EventSlot<T> {
|
|
||||||
slot: Arc<Mutex<Option<T>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> EventSlot<T> {
|
|
||||||
/// Creates a new `EventSlot`.
|
|
||||||
pub(crate) fn new(slot: Arc<Mutex<Option<T>>>) -> Self {
|
|
||||||
Self { slot }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Take the last event, if any, leaving the slot empty.
|
|
||||||
///
|
|
||||||
/// Note that even after the event is taken, it may become populated anew
|
|
||||||
/// after simulation time is incremented.
|
|
||||||
pub fn take(&mut self) -> Option<T> {
|
|
||||||
// We don't actually need to take self by mutable reference, but this
|
|
||||||
// signature is probably less surprising for the user and more
|
|
||||||
// consistent with `EventStream`. It also prevents multi-threaded
|
|
||||||
// access, which would be likely to be misused.
|
|
||||||
match self.slot.try_lock() {
|
|
||||||
TryLockResult::Ok(mut v) => v.take(),
|
|
||||||
TryLockResult::Err(TryLockError::WouldBlock) => None,
|
|
||||||
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> fmt::Debug for EventSlot<T> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
f.debug_struct("EventSlot").finish_non_exhaustive()
|
|
||||||
}
|
|
||||||
}
|
|
555
asynchronix/src/simulation/scheduler.rs
Normal file
555
asynchronix/src/simulation/scheduler.rs
Normal file
@ -0,0 +1,555 @@
|
|||||||
|
//! Scheduling functions and types.
|
||||||
|
|
||||||
|
use std::error::Error;
|
||||||
|
use std::future::Future;
|
||||||
|
use std::hash::{Hash, Hasher};
|
||||||
|
use std::pin::Pin;
|
||||||
|
use std::sync::atomic::{AtomicBool, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::task::{Context, Poll};
|
||||||
|
use std::time::Duration;
|
||||||
|
use std::{fmt, ptr};
|
||||||
|
|
||||||
|
use pin_project_lite::pin_project;
|
||||||
|
use recycle_box::{coerce_box, RecycleBox};
|
||||||
|
|
||||||
|
use crate::channel::Sender;
|
||||||
|
use crate::executor::Executor;
|
||||||
|
use crate::model::Model;
|
||||||
|
use crate::ports::InputFn;
|
||||||
|
use crate::time::MonotonicTime;
|
||||||
|
use crate::util::priority_queue::PriorityQueue;
|
||||||
|
|
||||||
|
/// Shorthand for the scheduler queue type.
|
||||||
|
|
||||||
|
// Why use both time and channel ID as the key? The short answer is that this
|
||||||
|
// ensures that events targeting the same model are sent in the order they were
|
||||||
|
// scheduled. More precisely, this ensures that events targeting the same model
|
||||||
|
// are ordered contiguously in the priority queue, which in turns allows the
|
||||||
|
// event loop to easily aggregate such events into single futures and thus
|
||||||
|
// control their relative order of execution.
|
||||||
|
pub(crate) type SchedulerQueue = PriorityQueue<(MonotonicTime, usize), Action>;
|
||||||
|
|
||||||
|
/// Trait abstracting over time-absolute and time-relative deadlines.
|
||||||
|
///
|
||||||
|
/// This trait is implemented by [`std::time::Duration`] and
|
||||||
|
/// [`MonotonicTime`].
|
||||||
|
pub trait Deadline {
|
||||||
|
/// Make this deadline into an absolute timestamp, using the provided
|
||||||
|
/// current time as a reference.
|
||||||
|
fn into_time(self, now: MonotonicTime) -> MonotonicTime;
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deadline for Duration {
|
||||||
|
#[inline(always)]
|
||||||
|
fn into_time(self, now: MonotonicTime) -> MonotonicTime {
|
||||||
|
now + self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Deadline for MonotonicTime {
|
||||||
|
#[inline(always)]
|
||||||
|
fn into_time(self, _: MonotonicTime) -> MonotonicTime {
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Handle to a scheduled action.
|
||||||
|
///
|
||||||
|
/// An `ActionKey` can be used to cancel a scheduled action.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
#[must_use = "prefer unkeyed scheduling methods if the action is never cancelled"]
|
||||||
|
pub struct ActionKey {
|
||||||
|
is_cancelled: Arc<AtomicBool>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ActionKey {
|
||||||
|
/// Creates a key for a pending action.
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
is_cancelled: Arc::new(AtomicBool::new(false)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks whether the action was cancelled.
|
||||||
|
pub(crate) fn is_cancelled(&self) -> bool {
|
||||||
|
self.is_cancelled.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cancels the associated action.
|
||||||
|
pub fn cancel(self) {
|
||||||
|
self.is_cancelled.store(true, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialEq for ActionKey {
|
||||||
|
/// Implements equality by considering clones to be equivalent, rather than
|
||||||
|
/// keys with the same `is_cancelled` value.
|
||||||
|
fn eq(&self, other: &Self) -> bool {
|
||||||
|
ptr::eq(&*self.is_cancelled, &*other.is_cancelled)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Eq for ActionKey {}
|
||||||
|
|
||||||
|
impl Hash for ActionKey {
|
||||||
|
/// Implements `Hash`` by considering clones to be equivalent, rather than
|
||||||
|
/// keys with the same `is_cancelled` value.
|
||||||
|
fn hash<H>(&self, state: &mut H)
|
||||||
|
where
|
||||||
|
H: Hasher,
|
||||||
|
{
|
||||||
|
ptr::hash(&*self.is_cancelled, state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Error returned when the scheduled time or the repetition period are invalid.
|
||||||
|
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
||||||
|
pub enum SchedulingError {
|
||||||
|
/// The scheduled time does not lie in the future of the current simulation
|
||||||
|
/// time.
|
||||||
|
InvalidScheduledTime,
|
||||||
|
/// The repetition period is zero.
|
||||||
|
NullRepetitionPeriod,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for SchedulingError {
|
||||||
|
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Self::InvalidScheduledTime => write!(
|
||||||
|
fmt,
|
||||||
|
"the scheduled time should be in the future of the current simulation time"
|
||||||
|
),
|
||||||
|
Self::NullRepetitionPeriod => write!(fmt, "the repetition period cannot be zero"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Error for SchedulingError {}
|
||||||
|
|
||||||
|
/// A possibly periodic, possibly cancellable action that can be scheduled or
|
||||||
|
/// processed immediately.
|
||||||
|
pub struct Action {
|
||||||
|
inner: Box<dyn ActionInner>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Action {
|
||||||
|
/// Creates a new `Action` from an `ActionInner`.
|
||||||
|
pub(crate) fn new<S: ActionInner>(s: S) -> Self {
|
||||||
|
Self { inner: Box::new(s) }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reports whether the action was cancelled.
|
||||||
|
pub(crate) fn is_cancelled(&self) -> bool {
|
||||||
|
self.inner.is_cancelled()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If this is a periodic action, returns a boxed clone of this action and
|
||||||
|
/// its repetition period; otherwise returns `None`.
|
||||||
|
pub(crate) fn next(&self) -> Option<(Action, Duration)> {
|
||||||
|
self.inner
|
||||||
|
.next()
|
||||||
|
.map(|(inner, period)| (Self { inner }, period))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a boxed future that performs the action.
|
||||||
|
pub(crate) fn into_future(self) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||||
|
self.inner.into_future()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawns the future that performs the action onto the provided executor.
|
||||||
|
///
|
||||||
|
/// This method is typically more efficient that spawning the boxed future
|
||||||
|
/// from `into_future` since it can directly spawn the unboxed future.
|
||||||
|
pub(crate) fn spawn_and_forget(self, executor: &Executor) {
|
||||||
|
self.inner.spawn_and_forget(executor)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Debug for Action {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
|
f.debug_struct("SchedulableEvent").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Trait abstracting over the inner type of an action.
|
||||||
|
pub(crate) trait ActionInner: Send + 'static {
|
||||||
|
/// Reports whether the action was cancelled.
|
||||||
|
fn is_cancelled(&self) -> bool;
|
||||||
|
|
||||||
|
/// If this is a periodic action, returns a boxed clone of this action and
|
||||||
|
/// its repetition period; otherwise returns `None`.
|
||||||
|
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)>;
|
||||||
|
|
||||||
|
/// Returns a boxed future that performs the action.
|
||||||
|
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>>;
|
||||||
|
|
||||||
|
/// Spawns the future that performs the action onto the provided executor.
|
||||||
|
///
|
||||||
|
/// This method is typically more efficient that spawning the boxed future
|
||||||
|
/// from `into_future` since it can directly spawn the unboxed future.
|
||||||
|
fn spawn_and_forget(self: Box<Self>, executor: &Executor);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules an event at a future time.
|
||||||
|
///
|
||||||
|
/// This function does not check whether the specified time lies in the future
|
||||||
|
/// of the current simulation time.
|
||||||
|
pub(crate) fn schedule_event_at_unchecked<M, F, T, S>(
|
||||||
|
time: MonotonicTime,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
sender: Sender<M>,
|
||||||
|
scheduler_queue: &Mutex<SchedulerQueue>,
|
||||||
|
) where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let channel_id = sender.channel_id();
|
||||||
|
|
||||||
|
let action = Action::new(OnceAction::new(process_event(func, arg, sender)));
|
||||||
|
|
||||||
|
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
||||||
|
scheduler_queue.insert((time, channel_id), action);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules an event at a future time, returning an action key.
|
||||||
|
///
|
||||||
|
/// This function does not check whether the specified time lies in the future
|
||||||
|
/// of the current simulation time.
|
||||||
|
pub(crate) fn schedule_keyed_event_at_unchecked<M, F, T, S>(
|
||||||
|
time: MonotonicTime,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
sender: Sender<M>,
|
||||||
|
scheduler_queue: &Mutex<SchedulerQueue>,
|
||||||
|
) -> ActionKey
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let event_key = ActionKey::new();
|
||||||
|
let channel_id = sender.channel_id();
|
||||||
|
let action = Action::new(KeyedOnceAction::new(
|
||||||
|
|ek| send_keyed_event(ek, func, arg, sender),
|
||||||
|
event_key.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
||||||
|
scheduler_queue.insert((time, channel_id), action);
|
||||||
|
|
||||||
|
event_key
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules a periodic event at a future time.
|
||||||
|
///
|
||||||
|
/// This function does not check whether the specified time lies in the future
|
||||||
|
/// of the current simulation time.
|
||||||
|
pub(crate) fn schedule_periodic_event_at_unchecked<M, F, T, S>(
|
||||||
|
time: MonotonicTime,
|
||||||
|
period: Duration,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
sender: Sender<M>,
|
||||||
|
scheduler_queue: &Mutex<SchedulerQueue>,
|
||||||
|
) where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let channel_id = sender.channel_id();
|
||||||
|
|
||||||
|
let action = Action::new(PeriodicAction::new(
|
||||||
|
|| process_event(func, arg, sender),
|
||||||
|
period,
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
||||||
|
scheduler_queue.insert((time, channel_id), action);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Schedules an event at a future time, returning an action key.
|
||||||
|
///
|
||||||
|
/// This function does not check whether the specified time lies in the future
|
||||||
|
/// of the current simulation time.
|
||||||
|
pub(crate) fn schedule_periodic_keyed_event_at_unchecked<M, F, T, S>(
|
||||||
|
time: MonotonicTime,
|
||||||
|
period: Duration,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
sender: Sender<M>,
|
||||||
|
scheduler_queue: &Mutex<SchedulerQueue>,
|
||||||
|
) -> ActionKey
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
S: Send + 'static,
|
||||||
|
{
|
||||||
|
let event_key = ActionKey::new();
|
||||||
|
let channel_id = sender.channel_id();
|
||||||
|
let action = Action::new(KeyedPeriodicAction::new(
|
||||||
|
|ek| send_keyed_event(ek, func, arg, sender),
|
||||||
|
period,
|
||||||
|
event_key.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
||||||
|
scheduler_queue.insert((time, channel_id), action);
|
||||||
|
|
||||||
|
event_key
|
||||||
|
}
|
||||||
|
|
||||||
|
pin_project! {
|
||||||
|
/// An object that can be converted to a future performing a single
|
||||||
|
/// non-cancellable action.
|
||||||
|
///
|
||||||
|
/// Note that this particular action is in fact already a future: since the
|
||||||
|
/// future cannot be cancelled and the action does not need to be cloned,
|
||||||
|
/// there is no need to defer the construction of the future. This makes
|
||||||
|
/// `into_future` a trivial cast, which saves a boxing operation.
|
||||||
|
pub(crate) struct OnceAction<F> {
|
||||||
|
#[pin]
|
||||||
|
fut: F,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F> OnceAction<F>
|
||||||
|
where
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
/// Constructs a new `OnceAction`.
|
||||||
|
pub(crate) fn new(fut: F) -> Self {
|
||||||
|
OnceAction { fut }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F> Future for OnceAction<F>
|
||||||
|
where
|
||||||
|
F: Future,
|
||||||
|
{
|
||||||
|
type Output = F::Output;
|
||||||
|
|
||||||
|
#[inline(always)]
|
||||||
|
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||||
|
self.project().fut.poll(cx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<F> ActionInner for OnceAction<F>
|
||||||
|
where
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
fn is_cancelled(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||||
|
// No need for boxing, type coercion is enough here.
|
||||||
|
Box::into_pin(self)
|
||||||
|
}
|
||||||
|
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
|
||||||
|
executor.spawn_and_forget(*self);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can be converted to a future performing a non-cancellable,
|
||||||
|
/// periodic action.
|
||||||
|
pub(crate) struct PeriodicAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce() -> F) + Clone + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
/// A clonable generator for the associated future.
|
||||||
|
gen: G,
|
||||||
|
/// The action repetition period.
|
||||||
|
period: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<G, F> PeriodicAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce() -> F) + Clone + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
/// Constructs a new `PeriodicAction`.
|
||||||
|
pub(crate) fn new(gen: G, period: Duration) -> Self {
|
||||||
|
Self { gen, period }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<G, F> ActionInner for PeriodicAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce() -> F) + Clone + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
fn is_cancelled(&self) -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
|
||||||
|
let event = Box::new(Self::new(self.gen.clone(), self.period));
|
||||||
|
|
||||||
|
Some((event, self.period))
|
||||||
|
}
|
||||||
|
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||||
|
Box::pin((self.gen)())
|
||||||
|
}
|
||||||
|
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
|
||||||
|
executor.spawn_and_forget((self.gen)());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can be converted to a future performing a single, cancellable
|
||||||
|
/// action.
|
||||||
|
pub(crate) struct KeyedOnceAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce(ActionKey) -> F) + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
/// A generator for the associated future.
|
||||||
|
gen: G,
|
||||||
|
/// The event cancellation key.
|
||||||
|
event_key: ActionKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<G, F> KeyedOnceAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce(ActionKey) -> F) + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
/// Constructs a new `KeyedOnceAction`.
|
||||||
|
pub(crate) fn new(gen: G, event_key: ActionKey) -> Self {
|
||||||
|
Self { gen, event_key }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<G, F> ActionInner for KeyedOnceAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce(ActionKey) -> F) + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
fn is_cancelled(&self) -> bool {
|
||||||
|
self.event_key.is_cancelled()
|
||||||
|
}
|
||||||
|
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||||
|
Box::pin((self.gen)(self.event_key))
|
||||||
|
}
|
||||||
|
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
|
||||||
|
executor.spawn_and_forget((self.gen)(self.event_key));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object that can be converted to a future performing a periodic,
|
||||||
|
/// cancellable action.
|
||||||
|
pub(crate) struct KeyedPeriodicAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
/// A clonable generator for associated future.
|
||||||
|
gen: G,
|
||||||
|
/// The repetition period.
|
||||||
|
period: Duration,
|
||||||
|
/// The event cancellation key.
|
||||||
|
event_key: ActionKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<G, F> KeyedPeriodicAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
/// Constructs a new `KeyedPeriodicAction`.
|
||||||
|
pub(crate) fn new(gen: G, period: Duration, event_key: ActionKey) -> Self {
|
||||||
|
Self {
|
||||||
|
gen,
|
||||||
|
period,
|
||||||
|
event_key,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<G, F> ActionInner for KeyedPeriodicAction<G, F>
|
||||||
|
where
|
||||||
|
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
|
||||||
|
F: Future<Output = ()> + Send + 'static,
|
||||||
|
{
|
||||||
|
fn is_cancelled(&self) -> bool {
|
||||||
|
self.event_key.is_cancelled()
|
||||||
|
}
|
||||||
|
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
|
||||||
|
let event = Box::new(Self::new(
|
||||||
|
self.gen.clone(),
|
||||||
|
self.period,
|
||||||
|
self.event_key.clone(),
|
||||||
|
));
|
||||||
|
|
||||||
|
Some((event, self.period))
|
||||||
|
}
|
||||||
|
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||||
|
Box::pin((self.gen)(self.event_key))
|
||||||
|
}
|
||||||
|
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
|
||||||
|
executor.spawn_and_forget((self.gen)(self.event_key));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Asynchronously sends a non-cancellable event to a model input.
|
||||||
|
pub(crate) async fn process_event<M, F, T, S>(func: F, arg: T, sender: Sender<M>)
|
||||||
|
where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
|
T: Send + 'static,
|
||||||
|
{
|
||||||
|
let _ = sender
|
||||||
|
.send(
|
||||||
|
move |model: &mut M,
|
||||||
|
scheduler,
|
||||||
|
recycle_box: RecycleBox<()>|
|
||||||
|
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
|
||||||
|
let fut = func.call(model, arg, scheduler);
|
||||||
|
|
||||||
|
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Asynchronously sends a cancellable event to a model input.
|
||||||
|
pub(crate) async fn send_keyed_event<M, F, T, S>(
|
||||||
|
event_key: ActionKey,
|
||||||
|
func: F,
|
||||||
|
arg: T,
|
||||||
|
sender: Sender<M>,
|
||||||
|
) where
|
||||||
|
M: Model,
|
||||||
|
F: for<'a> InputFn<'a, M, T, S>,
|
||||||
|
T: Send + Clone + 'static,
|
||||||
|
{
|
||||||
|
let _ = sender
|
||||||
|
.send(
|
||||||
|
move |model: &mut M,
|
||||||
|
scheduler,
|
||||||
|
recycle_box: RecycleBox<()>|
|
||||||
|
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
|
||||||
|
let fut = async move {
|
||||||
|
// Only perform the call if the event wasn't cancelled.
|
||||||
|
if !event_key.is_cancelled() {
|
||||||
|
func.call(model, arg, scheduler).await;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
@ -3,18 +3,19 @@ use std::sync::{Arc, Mutex};
|
|||||||
|
|
||||||
use crate::executor::Executor;
|
use crate::executor::Executor;
|
||||||
use crate::model::Model;
|
use crate::model::Model;
|
||||||
use crate::time::{Clock, Scheduler};
|
use crate::time::{Clock, NoClock};
|
||||||
use crate::time::{MonotonicTime, SchedulerQueue, TearableAtomicTime};
|
use crate::time::{MonotonicTime, TearableAtomicTime};
|
||||||
use crate::util::priority_queue::PriorityQueue;
|
use crate::util::priority_queue::PriorityQueue;
|
||||||
use crate::util::sync_cell::SyncCell;
|
use crate::util::sync_cell::SyncCell;
|
||||||
|
|
||||||
use super::{Mailbox, Simulation};
|
use super::{add_model, Mailbox, SchedulerQueue, Simulation};
|
||||||
|
|
||||||
/// Builder for a multi-threaded, discrete-event simulation.
|
/// Builder for a multi-threaded, discrete-event simulation.
|
||||||
pub struct SimInit {
|
pub struct SimInit {
|
||||||
executor: Executor,
|
executor: Executor,
|
||||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
||||||
time: SyncCell<TearableAtomicTime>,
|
time: SyncCell<TearableAtomicTime>,
|
||||||
|
clock: Box<dyn Clock + 'static>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SimInit {
|
impl SimInit {
|
||||||
@ -24,33 +25,60 @@ impl SimInit {
|
|||||||
Self::with_num_threads(num_cpus::get())
|
Self::with_num_threads(num_cpus::get())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a builder for a multithreaded simulation running on the
|
/// Creates a builder for a simulation running on the specified number of
|
||||||
/// specified number of threads.
|
/// threads.
|
||||||
|
///
|
||||||
|
/// Note that the number of worker threads is automatically constrained to
|
||||||
|
/// be between 1 and `usize::BITS` (inclusive).
|
||||||
pub fn with_num_threads(num_threads: usize) -> Self {
|
pub fn with_num_threads(num_threads: usize) -> Self {
|
||||||
// The current executor's implementation caps the number of thread to 64
|
let num_threads = num_threads.clamp(1, usize::BITS as usize);
|
||||||
// on 64-bit systems and 32 on 32-bit systems.
|
|
||||||
let num_threads = num_threads.min(usize::BITS as usize);
|
let executor = if num_threads == 1 {
|
||||||
|
Executor::new_single_threaded()
|
||||||
|
} else {
|
||||||
|
Executor::new_multi_threaded(num_threads)
|
||||||
|
};
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
executor: Executor::new(num_threads),
|
executor,
|
||||||
scheduler_queue: Arc::new(Mutex::new(PriorityQueue::new())),
|
scheduler_queue: Arc::new(Mutex::new(PriorityQueue::new())),
|
||||||
time: SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)),
|
time: SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)),
|
||||||
|
clock: Box::new(NoClock::new()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a model and its mailbox to the simulation bench.
|
/// Adds a model and its mailbox to the simulation bench.
|
||||||
pub fn add_model<M: Model>(self, model: M, mailbox: Mailbox<M>) -> Self {
|
///
|
||||||
|
/// The `name` argument needs not be unique (it can be the empty string) and
|
||||||
|
/// is used for convenience for the model instance identification (e.g. for
|
||||||
|
/// logging purposes).
|
||||||
|
pub fn add_model<M: Model>(
|
||||||
|
self,
|
||||||
|
model: M,
|
||||||
|
mailbox: Mailbox<M>,
|
||||||
|
name: impl Into<String>,
|
||||||
|
) -> Self {
|
||||||
let scheduler_queue = self.scheduler_queue.clone();
|
let scheduler_queue = self.scheduler_queue.clone();
|
||||||
let time = self.time.reader();
|
let time = self.time.reader();
|
||||||
let mut receiver = mailbox.0;
|
|
||||||
|
|
||||||
self.executor.spawn_and_forget(async move {
|
add_model(
|
||||||
let sender = receiver.sender();
|
model,
|
||||||
let scheduler = Scheduler::new(sender, scheduler_queue, time);
|
mailbox,
|
||||||
let mut model = model.init(&scheduler).await.0;
|
name.into(),
|
||||||
|
scheduler_queue,
|
||||||
|
time,
|
||||||
|
&self.executor,
|
||||||
|
);
|
||||||
|
|
||||||
while receiver.recv(&mut model, &scheduler).await.is_ok() {}
|
self
|
||||||
});
|
}
|
||||||
|
|
||||||
|
/// Synchronize the simulation with the provided [`Clock`].
|
||||||
|
///
|
||||||
|
/// If the clock isn't explicitly set then the default [`NoClock`] is used,
|
||||||
|
/// resulting in the simulation running as fast as possible.
|
||||||
|
pub fn set_clock(mut self, clock: impl Clock + 'static) -> Self {
|
||||||
|
self.clock = Box::new(clock);
|
||||||
|
|
||||||
self
|
self
|
||||||
}
|
}
|
||||||
@ -58,32 +86,12 @@ impl SimInit {
|
|||||||
/// Builds a simulation initialized at the specified simulation time,
|
/// Builds a simulation initialized at the specified simulation time,
|
||||||
/// executing the [`Model::init()`](crate::model::Model::init) method on all
|
/// executing the [`Model::init()`](crate::model::Model::init) method on all
|
||||||
/// model initializers.
|
/// model initializers.
|
||||||
///
|
|
||||||
/// This is equivalent to calling [`SimInit::init_with_clock()`] with a
|
|
||||||
/// [`NoClock`](crate::time::NoClock) argument and effectively makes the
|
|
||||||
/// simulation run as fast as possible.
|
|
||||||
pub fn init(mut self, start_time: MonotonicTime) -> Simulation {
|
pub fn init(mut self, start_time: MonotonicTime) -> Simulation {
|
||||||
self.time.write(start_time);
|
self.time.write(start_time);
|
||||||
|
self.clock.synchronize(start_time);
|
||||||
self.executor.run();
|
self.executor.run();
|
||||||
|
|
||||||
Simulation::new(self.executor, self.scheduler_queue, self.time)
|
Simulation::new(self.executor, self.scheduler_queue, self.time, self.clock)
|
||||||
}
|
|
||||||
|
|
||||||
/// Builds a simulation synchronized with the provided
|
|
||||||
/// [`Clock`](crate::time::Clock) and initialized at the specified
|
|
||||||
/// simulation time, executing the
|
|
||||||
/// [`Model::init()`](crate::model::Model::init) method on all model
|
|
||||||
/// initializers.
|
|
||||||
pub fn init_with_clock(
|
|
||||||
mut self,
|
|
||||||
start_time: MonotonicTime,
|
|
||||||
mut clock: impl Clock + 'static,
|
|
||||||
) -> Simulation {
|
|
||||||
self.time.write(start_time);
|
|
||||||
clock.synchronize(start_time);
|
|
||||||
self.executor.run();
|
|
||||||
|
|
||||||
Simulation::with_clock(self.executor, self.scheduler_queue, self.time, clock)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,9 +4,7 @@
|
|||||||
//!
|
//!
|
||||||
//! * [`MonotonicTime`]: a monotonic timestamp based on the [TAI] time standard,
|
//! * [`MonotonicTime`]: a monotonic timestamp based on the [TAI] time standard,
|
||||||
//! * [`Clock`]: a trait for types that can synchronize a simulation,
|
//! * [`Clock`]: a trait for types that can synchronize a simulation,
|
||||||
//! implemented for instance by [`SystemClock`] and [`AutoSystemClock`],
|
//! implemented for instance by [`SystemClock`] and [`AutoSystemClock`].
|
||||||
//! * [`Scheduler`]: a model-local handle to the global scheduler that can be
|
|
||||||
//! used by models to schedule future actions onto themselves.
|
|
||||||
//!
|
//!
|
||||||
//! [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
|
//! [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
|
||||||
//!
|
//!
|
||||||
@ -17,8 +15,8 @@
|
|||||||
//! the specified timestamp.
|
//! the specified timestamp.
|
||||||
//!
|
//!
|
||||||
//! ```
|
//! ```
|
||||||
//! use asynchronix::model::Model;
|
//! use asynchronix::model::{Context, Model};
|
||||||
//! use asynchronix::time::{MonotonicTime, Scheduler};
|
//! use asynchronix::time::MonotonicTime;
|
||||||
//!
|
//!
|
||||||
//! // An alarm clock model.
|
//! // An alarm clock model.
|
||||||
//! pub struct AlarmClock {
|
//! pub struct AlarmClock {
|
||||||
@ -32,8 +30,8 @@
|
|||||||
//! }
|
//! }
|
||||||
//!
|
//!
|
||||||
//! // Sets an alarm [input port].
|
//! // Sets an alarm [input port].
|
||||||
//! pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
|
//! pub fn set(&mut self, setting: MonotonicTime, context: &Context<Self>) {
|
||||||
//! if scheduler.schedule_event(setting, Self::ring, ()).is_err() {
|
//! if context.schedule_event(setting, Self::ring, ()).is_err() {
|
||||||
//! println!("The alarm clock can only be set for a future time");
|
//! println!("The alarm clock can only be set for a future time");
|
||||||
//! }
|
//! }
|
||||||
//! }
|
//! }
|
||||||
@ -49,14 +47,8 @@
|
|||||||
|
|
||||||
mod clock;
|
mod clock;
|
||||||
mod monotonic_time;
|
mod monotonic_time;
|
||||||
mod scheduler;
|
|
||||||
|
pub use tai_time::MonotonicTime;
|
||||||
|
|
||||||
pub use clock::{AutoSystemClock, Clock, NoClock, SyncStatus, SystemClock};
|
pub use clock::{AutoSystemClock, Clock, NoClock, SyncStatus, SystemClock};
|
||||||
pub(crate) use monotonic_time::TearableAtomicTime;
|
pub(crate) use monotonic_time::TearableAtomicTime;
|
||||||
pub use monotonic_time::{MonotonicTime, SystemTimeError};
|
|
||||||
pub(crate) use scheduler::{
|
|
||||||
schedule_event_at_unchecked, schedule_keyed_event_at_unchecked,
|
|
||||||
schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked,
|
|
||||||
ScheduledEvent, SchedulerQueue,
|
|
||||||
};
|
|
||||||
pub use scheduler::{Deadline, EventKey, Scheduler, SchedulingError};
|
|
||||||
|
@ -1,14 +1,16 @@
|
|||||||
use std::time::{Duration, Instant, SystemTime};
|
use std::time::{Duration, Instant, SystemTime};
|
||||||
|
|
||||||
|
use tai_time::MonotonicClock;
|
||||||
|
|
||||||
use crate::time::MonotonicTime;
|
use crate::time::MonotonicTime;
|
||||||
|
|
||||||
/// A type that can be used to synchronize a simulation.
|
/// A type that can be used to synchronize a simulation.
|
||||||
///
|
///
|
||||||
/// This trait abstract over the different types of clocks, such as
|
/// This trait abstracts over different types of clocks, such as
|
||||||
/// as-fast-as-possible and real-time clocks.
|
/// as-fast-as-possible and real-time clocks.
|
||||||
///
|
///
|
||||||
/// A clock can be associated to a simulation at initialization time by calling
|
/// A clock can be associated to a simulation prior to initialization by calling
|
||||||
/// [`SimInit::init_with_clock()`](crate::simulation::SimInit::init_with_clock).
|
/// [`SimInit::set_clock()`](crate::simulation::SimInit::set_clock).
|
||||||
pub trait Clock: Send {
|
pub trait Clock: Send {
|
||||||
/// Blocks until the deadline.
|
/// Blocks until the deadline.
|
||||||
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus;
|
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus;
|
||||||
@ -49,10 +51,7 @@ impl Clock for NoClock {
|
|||||||
/// This clock accepts an arbitrary reference time and remains synchronized with
|
/// This clock accepts an arbitrary reference time and remains synchronized with
|
||||||
/// the system's monotonic clock.
|
/// the system's monotonic clock.
|
||||||
#[derive(Copy, Clone, Debug)]
|
#[derive(Copy, Clone, Debug)]
|
||||||
pub struct SystemClock {
|
pub struct SystemClock(MonotonicClock);
|
||||||
wall_clock_ref: Instant,
|
|
||||||
simulation_ref: MonotonicTime,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SystemClock {
|
impl SystemClock {
|
||||||
/// Constructs a `SystemClock` with an offset between simulation clock and
|
/// Constructs a `SystemClock` with an offset between simulation clock and
|
||||||
@ -69,7 +68,7 @@ impl SystemClock {
|
|||||||
/// use asynchronix::simulation::SimInit;
|
/// use asynchronix::simulation::SimInit;
|
||||||
/// use asynchronix::time::{MonotonicTime, SystemClock};
|
/// use asynchronix::time::{MonotonicTime, SystemClock};
|
||||||
///
|
///
|
||||||
/// let t0 = MonotonicTime::new(1_234_567_890, 0);
|
/// let t0 = MonotonicTime::new(1_234_567_890, 0).unwrap();
|
||||||
///
|
///
|
||||||
/// // Make the simulation start in 1s.
|
/// // Make the simulation start in 1s.
|
||||||
/// let clock = SystemClock::from_instant(t0, Instant::now() + Duration::from_secs(1));
|
/// let clock = SystemClock::from_instant(t0, Instant::now() + Duration::from_secs(1));
|
||||||
@ -77,13 +76,14 @@ impl SystemClock {
|
|||||||
/// let simu = SimInit::new()
|
/// let simu = SimInit::new()
|
||||||
/// // .add_model(...)
|
/// // .add_model(...)
|
||||||
/// // .add_model(...)
|
/// // .add_model(...)
|
||||||
/// .init_with_clock(t0, clock);
|
/// .set_clock(clock)
|
||||||
|
/// .init(t0);
|
||||||
/// ```
|
/// ```
|
||||||
pub fn from_instant(simulation_ref: MonotonicTime, wall_clock_ref: Instant) -> Self {
|
pub fn from_instant(simulation_ref: MonotonicTime, wall_clock_ref: Instant) -> Self {
|
||||||
Self {
|
Self(MonotonicClock::init_from_instant(
|
||||||
wall_clock_ref,
|
|
||||||
simulation_ref,
|
simulation_ref,
|
||||||
}
|
wall_clock_ref,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Constructs a `SystemClock` with an offset between simulation clock and
|
/// Constructs a `SystemClock` with an offset between simulation clock and
|
||||||
@ -109,7 +109,7 @@ impl SystemClock {
|
|||||||
/// use asynchronix::simulation::SimInit;
|
/// use asynchronix::simulation::SimInit;
|
||||||
/// use asynchronix::time::{MonotonicTime, SystemClock};
|
/// use asynchronix::time::{MonotonicTime, SystemClock};
|
||||||
///
|
///
|
||||||
/// let t0 = MonotonicTime::new(1_234_567_890, 0);
|
/// let t0 = MonotonicTime::new(1_234_567_890, 0).unwrap();
|
||||||
///
|
///
|
||||||
/// // Make the simulation start at the next full second boundary.
|
/// // Make the simulation start at the next full second boundary.
|
||||||
/// let now_secs = UNIX_EPOCH.elapsed().unwrap().as_secs();
|
/// let now_secs = UNIX_EPOCH.elapsed().unwrap().as_secs();
|
||||||
@ -120,58 +120,14 @@ impl SystemClock {
|
|||||||
/// let simu = SimInit::new()
|
/// let simu = SimInit::new()
|
||||||
/// // .add_model(...)
|
/// // .add_model(...)
|
||||||
/// // .add_model(...)
|
/// // .add_model(...)
|
||||||
/// .init_with_clock(t0, clock);
|
/// .set_clock(clock)
|
||||||
|
/// .init(t0);
|
||||||
/// ```
|
/// ```
|
||||||
pub fn from_system_time(simulation_ref: MonotonicTime, wall_clock_ref: SystemTime) -> Self {
|
pub fn from_system_time(simulation_ref: MonotonicTime, wall_clock_ref: SystemTime) -> Self {
|
||||||
// Select the best-correlated `Instant`/`SystemTime` pair from several
|
Self(MonotonicClock::init_from_system_time(
|
||||||
// samples to improve robustness towards possible thread suspension
|
|
||||||
// between the calls to `SystemTime::now()` and `Instant::now()`.
|
|
||||||
const SAMPLES: usize = 3;
|
|
||||||
|
|
||||||
let mut last_instant = Instant::now();
|
|
||||||
let mut min_delta = Duration::MAX;
|
|
||||||
let mut ref_time = None;
|
|
||||||
|
|
||||||
// Select the best-correlated instant/date pair.
|
|
||||||
for _ in 0..SAMPLES {
|
|
||||||
// The inner loop is to work around monotonic clock platform bugs
|
|
||||||
// that may cause `checked_duration_since` to fail.
|
|
||||||
let (date, instant, delta) = loop {
|
|
||||||
let date = SystemTime::now();
|
|
||||||
let instant = Instant::now();
|
|
||||||
let delta = instant.checked_duration_since(last_instant);
|
|
||||||
last_instant = instant;
|
|
||||||
|
|
||||||
if let Some(delta) = delta {
|
|
||||||
break (date, instant, delta);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Store the current instant/date if the time elapsed since the last
|
|
||||||
// measurement is shorter than the previous candidate.
|
|
||||||
if min_delta > delta {
|
|
||||||
min_delta = delta;
|
|
||||||
ref_time = Some((instant, date));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the selected instant/date as the wall clock reference and adjust
|
|
||||||
// the simulation reference accordingly.
|
|
||||||
let (instant_ref, date_ref) = ref_time.unwrap();
|
|
||||||
let simulation_ref = if date_ref > wall_clock_ref {
|
|
||||||
let correction = date_ref.duration_since(wall_clock_ref).unwrap();
|
|
||||||
|
|
||||||
simulation_ref + correction
|
|
||||||
} else {
|
|
||||||
let correction = wall_clock_ref.duration_since(date_ref).unwrap();
|
|
||||||
|
|
||||||
simulation_ref - correction
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
wall_clock_ref: instant_ref,
|
|
||||||
simulation_ref,
|
simulation_ref,
|
||||||
}
|
wall_clock_ref,
|
||||||
|
))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,22 +135,14 @@ impl Clock for SystemClock {
|
|||||||
/// Blocks until the system time corresponds to the specified simulation
|
/// Blocks until the system time corresponds to the specified simulation
|
||||||
/// time.
|
/// time.
|
||||||
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus {
|
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus {
|
||||||
let target_time = if deadline >= self.simulation_ref {
|
let now = self.0.now();
|
||||||
self.wall_clock_ref + deadline.duration_since(self.simulation_ref)
|
if now <= deadline {
|
||||||
} else {
|
spin_sleep::sleep(deadline.duration_since(now));
|
||||||
self.wall_clock_ref - self.simulation_ref.duration_since(deadline)
|
|
||||||
};
|
|
||||||
|
|
||||||
let now = Instant::now();
|
return SyncStatus::Synchronized;
|
||||||
|
|
||||||
match target_time.checked_duration_since(now) {
|
|
||||||
Some(sleep_duration) => {
|
|
||||||
spin_sleep::sleep(sleep_duration);
|
|
||||||
|
|
||||||
SyncStatus::Synchronized
|
|
||||||
}
|
|
||||||
None => SyncStatus::OutOfSync(now.duration_since(target_time)),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SyncStatus::OutOfSync(now.duration_since(deadline))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -233,3 +181,29 @@ impl Clock for AutoSystemClock {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod test {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn smoke_system_clock() {
|
||||||
|
let t0 = MonotonicTime::EPOCH;
|
||||||
|
const TOLERANCE: f64 = 0.0005; // [s]
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
let mut clock = SystemClock::from_instant(t0, now);
|
||||||
|
let t1 = t0 + Duration::from_millis(200);
|
||||||
|
clock.synchronize(t1);
|
||||||
|
let elapsed = now.elapsed().as_secs_f64();
|
||||||
|
let dt = t1.duration_since(t0).as_secs_f64();
|
||||||
|
|
||||||
|
assert!(
|
||||||
|
(dt - elapsed) <= TOLERANCE,
|
||||||
|
"Expected t = {:.6}s +/- {:.6}s, measured t = {:.6}s",
|
||||||
|
dt,
|
||||||
|
TOLERANCE,
|
||||||
|
elapsed,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -1,483 +1,10 @@
|
|||||||
//! Monotonic simulation time.
|
//! Monotonic simulation time.
|
||||||
|
|
||||||
use std::error::Error;
|
|
||||||
use std::fmt;
|
|
||||||
use std::ops::{Add, AddAssign, Sub, SubAssign};
|
|
||||||
use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
|
use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
|
||||||
use std::time::{Duration, SystemTime};
|
|
||||||
|
use super::MonotonicTime;
|
||||||
|
|
||||||
use crate::util::sync_cell::TearableAtomic;
|
use crate::util::sync_cell::TearableAtomic;
|
||||||
|
|
||||||
const NANOS_PER_SEC: u32 = 1_000_000_000;
|
|
||||||
|
|
||||||
/// A nanosecond-precision monotonic clock timestamp.
|
|
||||||
///
|
|
||||||
/// A timestamp specifies a [TAI] point in time. It is represented as a 64-bit
|
|
||||||
/// signed number of seconds and a positive number of nanoseconds, counted with
|
|
||||||
/// reference to 1970-01-01 00:00:00 TAI. This timestamp format has a number of
|
|
||||||
/// desirable properties:
|
|
||||||
///
|
|
||||||
/// - it enables cheap inter-operation with the standard [`Duration`] type which
|
|
||||||
/// uses a very similar internal representation,
|
|
||||||
/// - it constitutes a strict 96-bit superset of 80-bit PTP IEEE-1588
|
|
||||||
/// timestamps, with the same epoch,
|
|
||||||
/// - if required, exact conversion to a Unix timestamp is trivial and only
|
|
||||||
/// requires subtracting from this timestamp the number of leap seconds
|
|
||||||
/// between TAI and UTC time (see also the
|
|
||||||
/// [`as_unix_secs()`](MonotonicTime::as_unix_secs) method).
|
|
||||||
///
|
|
||||||
/// Although no date-time conversion methods are provided, conversion from
|
|
||||||
/// timestamp to TAI date-time representations and back can be easily performed
|
|
||||||
/// using `NaiveDateTime` from the [chrono] crate or `OffsetDateTime` from the
|
|
||||||
/// [time] crate, treating the timestamp as a regular (UTC) Unix timestamp.
|
|
||||||
///
|
|
||||||
/// [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
|
|
||||||
/// [chrono]: https://crates.io/crates/chrono
|
|
||||||
/// [time]: https://crates.io/crates/time
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// // Set the timestamp to 2009-02-13 23:31:30.987654321 TAI.
|
|
||||||
/// let mut timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
|
||||||
///
|
|
||||||
/// // Increment the timestamp by 123.456s.
|
|
||||||
/// timestamp += Duration::new(123, 456_000_000);
|
|
||||||
///
|
|
||||||
/// assert_eq!(timestamp, MonotonicTime::new(1_234_568_014, 443_654_321));
|
|
||||||
/// assert_eq!(timestamp.as_secs(), 1_234_568_014);
|
|
||||||
/// assert_eq!(timestamp.subsec_nanos(), 443_654_321);
|
|
||||||
/// ```
|
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
|
||||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
|
||||||
pub struct MonotonicTime {
|
|
||||||
/// The number of whole seconds in the future (if positive) or in the past
|
|
||||||
/// (if negative) of 1970-01-01 00:00:00 TAI.
|
|
||||||
///
|
|
||||||
/// Note that the automatic derivation of `PartialOrd` relies on
|
|
||||||
/// lexicographical comparison so the `secs` field must appear before
|
|
||||||
/// `nanos` in declaration order to be given higher priority.
|
|
||||||
secs: i64,
|
|
||||||
/// The sub-second number of nanoseconds in the future of the point in time
|
|
||||||
/// defined by `secs`.
|
|
||||||
nanos: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl MonotonicTime {
|
|
||||||
/// The epoch used by `MonotonicTime`, equal to 1970-01-01 00:00:00 TAI.
|
|
||||||
///
|
|
||||||
/// This epoch coincides with the PTP epoch defined in the IEEE-1588
|
|
||||||
/// standard.
|
|
||||||
pub const EPOCH: Self = Self { secs: 0, nanos: 0 };
|
|
||||||
|
|
||||||
/// The minimum possible `MonotonicTime` timestamp.
|
|
||||||
pub const MIN: Self = Self {
|
|
||||||
secs: i64::MIN,
|
|
||||||
nanos: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// The maximum possible `MonotonicTime` timestamp.
|
|
||||||
pub const MAX: Self = Self {
|
|
||||||
secs: i64::MAX,
|
|
||||||
nanos: NANOS_PER_SEC - 1,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Creates a timestamp directly from timestamp parts.
|
|
||||||
///
|
|
||||||
/// The number of seconds is relative to the [`EPOCH`](MonotonicTime::EPOCH)
|
|
||||||
/// (1970-01-01 00:00:00 TAI). It is negative for dates in the past of the
|
|
||||||
/// epoch.
|
|
||||||
///
|
|
||||||
/// The number of nanoseconds is always positive and always points towards
|
|
||||||
/// the future.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// This constructor will panic if the number of nanoseconds is greater than
|
|
||||||
/// or equal to 1 second.
|
|
||||||
///
|
|
||||||
/// # Example
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// // A timestamp set to 2009-02-13 23:31:30.987654321 TAI.
|
|
||||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
|
||||||
///
|
|
||||||
/// // A timestamp set 3.5s before the epoch.
|
|
||||||
/// let timestamp = MonotonicTime::new(-4, 500_000_000);
|
|
||||||
/// assert_eq!(timestamp, MonotonicTime::EPOCH - Duration::new(3, 500_000_000));
|
|
||||||
/// ```
|
|
||||||
pub const fn new(secs: i64, subsec_nanos: u32) -> Self {
|
|
||||||
assert!(
|
|
||||||
subsec_nanos < NANOS_PER_SEC,
|
|
||||||
"invalid number of nanoseconds"
|
|
||||||
);
|
|
||||||
|
|
||||||
Self {
|
|
||||||
secs,
|
|
||||||
nanos: subsec_nanos,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a timestamp from the current system time.
|
|
||||||
///
|
|
||||||
/// The argument is the current difference between TAI and UTC time in
|
|
||||||
/// seconds (a.k.a. leap seconds). For reference, this offset has been +37s
|
|
||||||
/// since 2017-01-01, a value which is to remain valid until at least
|
|
||||||
/// 2024-06-29. See the [official IERS bulletin
|
|
||||||
/// C](https://datacenter.iers.org/data/latestVersion/bulletinC.txt) for
|
|
||||||
/// leap second announcements or the [IETF
|
|
||||||
/// table](https://www.ietf.org/timezones/data/leap-seconds.list) for
|
|
||||||
/// current and historical values.
|
|
||||||
///
|
|
||||||
/// # Errors
|
|
||||||
///
|
|
||||||
/// This method will return an error if the reported system time is in the
|
|
||||||
/// past of the Unix epoch or if the offset-adjusted timestamp is outside
|
|
||||||
/// the representable range.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// // Compute the current TAI time assuming that the current difference
|
|
||||||
/// // between TAI and UTC time is 37s.
|
|
||||||
/// let timestamp = MonotonicTime::from_system(37).unwrap();
|
|
||||||
/// ```
|
|
||||||
pub fn from_system(leap_secs: i64) -> Result<Self, SystemTimeError> {
|
|
||||||
let utc_timestamp = SystemTime::now()
|
|
||||||
.duration_since(SystemTime::UNIX_EPOCH)
|
|
||||||
.map_err(|_| SystemTimeError::InvalidSystemTime)?;
|
|
||||||
|
|
||||||
Self::new(leap_secs, 0)
|
|
||||||
.checked_add(utc_timestamp)
|
|
||||||
.ok_or(SystemTimeError::OutOfRange)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of whole seconds relative to
|
|
||||||
/// [`EPOCH`](MonotonicTime::EPOCH) (1970-01-01 00:00:00 TAI).
|
|
||||||
///
|
|
||||||
/// Consistently with the interpretation of seconds and nanoseconds in the
|
|
||||||
/// [`new()`](Self::new) constructor, seconds are always rounded towards
|
|
||||||
/// `-∞`.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
|
||||||
/// assert_eq!(timestamp.as_secs(), 1_234_567_890);
|
|
||||||
///
|
|
||||||
/// let timestamp = MonotonicTime::EPOCH - Duration::new(3, 500_000_000);
|
|
||||||
/// assert_eq!(timestamp.as_secs(), -4);
|
|
||||||
/// ```
|
|
||||||
pub const fn as_secs(&self) -> i64 {
|
|
||||||
self.secs
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of seconds of the corresponding Unix time.
|
|
||||||
///
|
|
||||||
/// The argument is the difference between TAI and UTC time in seconds
|
|
||||||
/// (a.k.a. leap seconds) applicable at the date represented by the
|
|
||||||
/// timestamp. See the [official IERS bulletin
|
|
||||||
/// C](https://datacenter.iers.org/data/latestVersion/bulletinC.txt) for
|
|
||||||
/// leap second announcements or the [IETF
|
|
||||||
/// table](https://www.ietf.org/timezones/data/leap-seconds.list) for
|
|
||||||
/// current and historical values.
|
|
||||||
///
|
|
||||||
/// This method merely subtracts the offset from the value returned by
|
|
||||||
/// [`as_secs()`](Self::as_secs) and checks for potential overflow; its main
|
|
||||||
/// purpose is to prevent mistakes regarding the direction in which the
|
|
||||||
/// offset should be applied.
|
|
||||||
///
|
|
||||||
/// Note that the nanosecond part of a Unix timestamp can be simply
|
|
||||||
/// retrieved with [`subsec_nanos()`](Self::subsec_nanos) since UTC and TAI
|
|
||||||
/// differ by a whole number of seconds.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// This will panic if the offset-adjusted timestamp cannot be represented
|
|
||||||
/// as an `i64`.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// // Set the date to 2000-01-01 00:00:00 TAI.
|
|
||||||
/// let timestamp = MonotonicTime::new(946_684_800, 0);
|
|
||||||
///
|
|
||||||
/// // Convert to a Unix timestamp, accounting for the +32s difference between
|
|
||||||
/// // TAI and UTC on 2000-01-01.
|
|
||||||
/// let unix_secs = timestamp.as_unix_secs(32);
|
|
||||||
/// ```
|
|
||||||
pub const fn as_unix_secs(&self, leap_secs: i64) -> i64 {
|
|
||||||
if let Some(secs) = self.secs.checked_sub(leap_secs) {
|
|
||||||
secs
|
|
||||||
} else {
|
|
||||||
panic!("timestamp outside representable range");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the sub-second fractional part in nanoseconds.
|
|
||||||
///
|
|
||||||
/// Note that nanoseconds always point towards the future even if the date
|
|
||||||
/// is in the past of the [`EPOCH`](MonotonicTime::EPOCH).
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
|
||||||
/// assert_eq!(timestamp.subsec_nanos(), 987_654_321);
|
|
||||||
/// ```
|
|
||||||
pub const fn subsec_nanos(&self) -> u32 {
|
|
||||||
self.nanos
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds a duration to a timestamp, checking for overflow.
|
|
||||||
///
|
|
||||||
/// Returns `None` if overflow occurred.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
|
||||||
/// assert!(timestamp.checked_add(Duration::new(10, 123_456_789)).is_some());
|
|
||||||
/// assert!(timestamp.checked_add(Duration::MAX).is_none());
|
|
||||||
/// ```
|
|
||||||
pub const fn checked_add(self, rhs: Duration) -> Option<Self> {
|
|
||||||
// A durations in seconds greater than `i64::MAX` is actually fine as
|
|
||||||
// long as the number of seconds does not effectively overflow which is
|
|
||||||
// why the below does not use `checked_add`. So technically the below
|
|
||||||
// addition may wrap around on the negative side due to the
|
|
||||||
// unsigned-to-signed cast of the duration, but this does not
|
|
||||||
// necessarily indicate an actual overflow. Actual overflow can be ruled
|
|
||||||
// out by verifying that the new timestamp is in the future of the old
|
|
||||||
// timestamp.
|
|
||||||
let mut secs = self.secs.wrapping_add(rhs.as_secs() as i64);
|
|
||||||
|
|
||||||
// Check for overflow.
|
|
||||||
if secs < self.secs {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut nanos = self.nanos + rhs.subsec_nanos();
|
|
||||||
if nanos >= NANOS_PER_SEC {
|
|
||||||
secs = if let Some(s) = secs.checked_add(1) {
|
|
||||||
s
|
|
||||||
} else {
|
|
||||||
return None;
|
|
||||||
};
|
|
||||||
nanos -= NANOS_PER_SEC;
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(Self { secs, nanos })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Subtracts a duration from a timestamp, checking for overflow.
|
|
||||||
///
|
|
||||||
/// Returns `None` if overflow occurred.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
|
|
||||||
/// assert!(timestamp.checked_sub(Duration::new(10, 123_456_789)).is_some());
|
|
||||||
/// assert!(timestamp.checked_sub(Duration::MAX).is_none());
|
|
||||||
/// ```
|
|
||||||
pub const fn checked_sub(self, rhs: Duration) -> Option<Self> {
|
|
||||||
// A durations in seconds greater than `i64::MAX` is actually fine as
|
|
||||||
// long as the number of seconds does not effectively overflow, which is
|
|
||||||
// why the below does not use `checked_sub`. So technically the below
|
|
||||||
// subtraction may wrap around on the positive side due to the
|
|
||||||
// unsigned-to-signed cast of the duration, but this does not
|
|
||||||
// necessarily indicate an actual overflow. Actual overflow can be ruled
|
|
||||||
// out by verifying that the new timestamp is in the past of the old
|
|
||||||
// timestamp.
|
|
||||||
let mut secs = self.secs.wrapping_sub(rhs.as_secs() as i64);
|
|
||||||
|
|
||||||
// Check for overflow.
|
|
||||||
if secs > self.secs {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
let nanos = if self.nanos < rhs.subsec_nanos() {
|
|
||||||
secs = if let Some(s) = secs.checked_sub(1) {
|
|
||||||
s
|
|
||||||
} else {
|
|
||||||
return None;
|
|
||||||
};
|
|
||||||
|
|
||||||
(self.nanos + NANOS_PER_SEC) - rhs.subsec_nanos()
|
|
||||||
} else {
|
|
||||||
self.nanos - rhs.subsec_nanos()
|
|
||||||
};
|
|
||||||
|
|
||||||
Some(Self { secs, nanos })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Subtracts a timestamp from another timestamp.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// Panics if the argument lies in the future of `self`.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321);
|
|
||||||
/// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789);
|
|
||||||
/// assert_eq!(
|
|
||||||
/// timestamp_later.duration_since(timestamp_earlier),
|
|
||||||
/// Duration::new(20, 135_802_468)
|
|
||||||
/// );
|
|
||||||
/// ```
|
|
||||||
pub fn duration_since(self, earlier: Self) -> Duration {
|
|
||||||
self.checked_duration_since(earlier)
|
|
||||||
.expect("attempt to substract a timestamp from an earlier timestamp")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Computes the duration elapsed between a timestamp and an earlier
|
|
||||||
/// timestamp, checking that the timestamps are appropriately ordered.
|
|
||||||
///
|
|
||||||
/// Returns `None` if the argument lies in the future of `self`.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
/// use asynchronix::time::MonotonicTime;
|
|
||||||
///
|
|
||||||
/// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321);
|
|
||||||
/// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789);
|
|
||||||
/// assert!(timestamp_later.checked_duration_since(timestamp_earlier).is_some());
|
|
||||||
/// assert!(timestamp_earlier.checked_duration_since(timestamp_later).is_none());
|
|
||||||
/// ```
|
|
||||||
pub const fn checked_duration_since(self, earlier: Self) -> Option<Duration> {
|
|
||||||
// If the subtraction of the nanosecond fractions would overflow, carry
|
|
||||||
// over one second to the nanoseconds.
|
|
||||||
let (secs, nanos) = if earlier.nanos > self.nanos {
|
|
||||||
if let Some(s) = self.secs.checked_sub(1) {
|
|
||||||
(s, self.nanos + NANOS_PER_SEC)
|
|
||||||
} else {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
(self.secs, self.nanos)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Make sure the computation of the duration will not overflow the
|
|
||||||
// seconds.
|
|
||||||
if secs < earlier.secs {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This subtraction may wrap around if the difference between the two
|
|
||||||
// timestamps is more than `i64::MAX`, but even if it does the result
|
|
||||||
// will be correct once cast to an unsigned integer.
|
|
||||||
let delta_secs = secs.wrapping_sub(earlier.secs) as u64;
|
|
||||||
|
|
||||||
// The below subtraction is guaranteed to never overflow.
|
|
||||||
let delta_nanos = nanos - earlier.nanos;
|
|
||||||
|
|
||||||
Some(Duration::new(delta_secs, delta_nanos))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Add<Duration> for MonotonicTime {
|
|
||||||
type Output = Self;
|
|
||||||
|
|
||||||
/// Adds a duration to a timestamp.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// This function panics if the resulting timestamp cannot be
|
|
||||||
/// represented. See [`MonotonicTime::checked_add`] for a panic-free
|
|
||||||
/// version.
|
|
||||||
fn add(self, other: Duration) -> Self {
|
|
||||||
self.checked_add(other)
|
|
||||||
.expect("overflow when adding duration to timestamp")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Sub<Duration> for MonotonicTime {
|
|
||||||
type Output = Self;
|
|
||||||
|
|
||||||
/// Subtracts a duration from a timestamp.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// This function panics if the resulting timestamp cannot be
|
|
||||||
/// represented. See [`MonotonicTime::checked_sub`] for a panic-free
|
|
||||||
/// version.
|
|
||||||
fn sub(self, other: Duration) -> Self {
|
|
||||||
self.checked_sub(other)
|
|
||||||
.expect("overflow when subtracting duration from timestamp")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AddAssign<Duration> for MonotonicTime {
|
|
||||||
/// Increments the timestamp by a duration.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// This function panics if the resulting timestamp cannot be represented.
|
|
||||||
fn add_assign(&mut self, other: Duration) {
|
|
||||||
*self = *self + other;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SubAssign<Duration> for MonotonicTime {
|
|
||||||
/// Decrements the timestamp by a duration.
|
|
||||||
///
|
|
||||||
/// # Panics
|
|
||||||
///
|
|
||||||
/// This function panics if the resulting timestamp cannot be represented.
|
|
||||||
fn sub_assign(&mut self, other: Duration) {
|
|
||||||
*self = *self - other;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An error that may be returned when initializing a [`MonotonicTime`] from
|
|
||||||
/// system time.
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
|
||||||
pub enum SystemTimeError {
|
|
||||||
/// The system time is in the past of the Unix epoch.
|
|
||||||
InvalidSystemTime,
|
|
||||||
/// The system time cannot be represented as a `MonotonicTime`.
|
|
||||||
OutOfRange,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for SystemTimeError {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::InvalidSystemTime => write!(fmt, "invalid system time"),
|
|
||||||
Self::OutOfRange => write!(fmt, "timestamp outside representable range"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for SystemTimeError {}
|
|
||||||
|
|
||||||
/// A tearable atomic adapter over a `MonotonicTime`.
|
/// A tearable atomic adapter over a `MonotonicTime`.
|
||||||
///
|
///
|
||||||
/// This makes it possible to store the simulation time in a `SyncCell`, an
|
/// This makes it possible to store the simulation time in a `SyncCell`, an
|
||||||
@ -490,8 +17,8 @@ pub(crate) struct TearableAtomicTime {
|
|||||||
impl TearableAtomicTime {
|
impl TearableAtomicTime {
|
||||||
pub(crate) fn new(time: MonotonicTime) -> Self {
|
pub(crate) fn new(time: MonotonicTime) -> Self {
|
||||||
Self {
|
Self {
|
||||||
secs: AtomicI64::new(time.secs),
|
secs: AtomicI64::new(time.as_secs()),
|
||||||
nanos: AtomicU32::new(time.nanos),
|
nanos: AtomicU32::new(time.subsec_nanos()),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -502,170 +29,17 @@ impl TearableAtomic for TearableAtomicTime {
|
|||||||
fn tearable_load(&self) -> MonotonicTime {
|
fn tearable_load(&self) -> MonotonicTime {
|
||||||
// Load each field separately. This can never create invalid values of a
|
// Load each field separately. This can never create invalid values of a
|
||||||
// `MonotonicTime`, even if the load is torn.
|
// `MonotonicTime`, even if the load is torn.
|
||||||
MonotonicTime {
|
MonotonicTime::new(
|
||||||
secs: self.secs.load(Ordering::Relaxed),
|
self.secs.load(Ordering::Relaxed),
|
||||||
nanos: self.nanos.load(Ordering::Relaxed),
|
self.nanos.load(Ordering::Relaxed),
|
||||||
}
|
)
|
||||||
|
.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tearable_store(&self, value: MonotonicTime) {
|
fn tearable_store(&self, value: MonotonicTime) {
|
||||||
// Write each field separately. This can never create invalid values of
|
// Write each field separately. This can never create invalid values of
|
||||||
// a `MonotonicTime`, even if the store is torn.
|
// a `MonotonicTime`, even if the store is torn.
|
||||||
self.secs.store(value.secs, Ordering::Relaxed);
|
self.secs.store(value.as_secs(), Ordering::Relaxed);
|
||||||
self.nanos.store(value.nanos, Ordering::Relaxed);
|
self.nanos.store(value.subsec_nanos(), Ordering::Relaxed);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(all(test, not(asynchronix_loom)))]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_equality() {
|
|
||||||
let t0 = MonotonicTime::new(123, 123_456_789);
|
|
||||||
let t1 = MonotonicTime::new(123, 123_456_789);
|
|
||||||
let t2 = MonotonicTime::new(123, 123_456_790);
|
|
||||||
let t3 = MonotonicTime::new(124, 123_456_789);
|
|
||||||
|
|
||||||
assert_eq!(t0, t1);
|
|
||||||
assert_ne!(t0, t2);
|
|
||||||
assert_ne!(t0, t3);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_ordering() {
|
|
||||||
let t0 = MonotonicTime::new(0, 1);
|
|
||||||
let t1 = MonotonicTime::new(1, 0);
|
|
||||||
|
|
||||||
assert!(t1 > t0);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(miri))]
|
|
||||||
#[test]
|
|
||||||
fn time_from_system_smoke() {
|
|
||||||
const START_OF_2022: i64 = 1640995200;
|
|
||||||
const START_OF_2050: i64 = 2524608000;
|
|
||||||
|
|
||||||
let now_secs = MonotonicTime::from_system(0).unwrap().as_secs();
|
|
||||||
|
|
||||||
assert!(now_secs > START_OF_2022);
|
|
||||||
assert!(now_secs < START_OF_2050);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic]
|
|
||||||
fn time_invalid() {
|
|
||||||
MonotonicTime::new(123, 1_000_000_000);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_duration_since_smoke() {
|
|
||||||
let t0 = MonotonicTime::new(100, 100_000_000);
|
|
||||||
let t1 = MonotonicTime::new(123, 223_456_789);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
t1.checked_duration_since(t0),
|
|
||||||
Some(Duration::new(23, 123_456_789))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_duration_with_carry() {
|
|
||||||
let t0 = MonotonicTime::new(100, 200_000_000);
|
|
||||||
let t1 = MonotonicTime::new(101, 100_000_000);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
t1.checked_duration_since(t0),
|
|
||||||
Some(Duration::new(0, 900_000_000))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_duration_since_extreme() {
|
|
||||||
const MIN_TIME: MonotonicTime = MonotonicTime::new(i64::MIN, 0);
|
|
||||||
const MAX_TIME: MonotonicTime = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
MAX_TIME.checked_duration_since(MIN_TIME),
|
|
||||||
Some(Duration::new(u64::MAX, NANOS_PER_SEC - 1))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_duration_since_invalid() {
|
|
||||||
let t0 = MonotonicTime::new(100, 0);
|
|
||||||
let t1 = MonotonicTime::new(99, 0);
|
|
||||||
|
|
||||||
assert_eq!(t1.checked_duration_since(t0), None);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_add_duration_smoke() {
|
|
||||||
let t = MonotonicTime::new(-100, 100_000_000);
|
|
||||||
let dt = Duration::new(400, 300_000_000);
|
|
||||||
|
|
||||||
assert_eq!(t + dt, MonotonicTime::new(300, 400_000_000));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_add_duration_with_carry() {
|
|
||||||
let t = MonotonicTime::new(-100, 900_000_000);
|
|
||||||
let dt1 = Duration::new(400, 100_000_000);
|
|
||||||
let dt2 = Duration::new(400, 300_000_000);
|
|
||||||
|
|
||||||
assert_eq!(t + dt1, MonotonicTime::new(301, 0));
|
|
||||||
assert_eq!(t + dt2, MonotonicTime::new(301, 200_000_000));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_add_duration_extreme() {
|
|
||||||
let t = MonotonicTime::new(i64::MIN, 0);
|
|
||||||
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
|
|
||||||
|
|
||||||
assert_eq!(t + dt, MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic]
|
|
||||||
fn time_add_duration_overflow() {
|
|
||||||
let t = MonotonicTime::new(i64::MIN, 1);
|
|
||||||
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
|
|
||||||
|
|
||||||
let _ = t + dt;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_sub_duration_smoke() {
|
|
||||||
let t = MonotonicTime::new(100, 500_000_000);
|
|
||||||
let dt = Duration::new(400, 300_000_000);
|
|
||||||
|
|
||||||
assert_eq!(t - dt, MonotonicTime::new(-300, 200_000_000));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_sub_duration_with_carry() {
|
|
||||||
let t = MonotonicTime::new(100, 100_000_000);
|
|
||||||
let dt1 = Duration::new(400, 100_000_000);
|
|
||||||
let dt2 = Duration::new(400, 300_000_000);
|
|
||||||
|
|
||||||
assert_eq!(t - dt1, MonotonicTime::new(-300, 0));
|
|
||||||
assert_eq!(t - dt2, MonotonicTime::new(-301, 800_000_000));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn time_sub_duration_extreme() {
|
|
||||||
let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1);
|
|
||||||
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
|
|
||||||
|
|
||||||
assert_eq!(t - dt, MonotonicTime::new(i64::MIN, 0));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
#[should_panic]
|
|
||||||
fn time_sub_duration_overflow() {
|
|
||||||
let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 2);
|
|
||||||
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
|
|
||||||
|
|
||||||
let _ = t - dt;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,943 +0,0 @@
|
|||||||
//! Scheduling functions and types.
|
|
||||||
|
|
||||||
use std::error::Error;
|
|
||||||
use std::fmt;
|
|
||||||
use std::future::Future;
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
use std::pin::Pin;
|
|
||||||
use std::sync::atomic::{AtomicBool, Ordering};
|
|
||||||
use std::sync::{Arc, Mutex};
|
|
||||||
use std::task::{Context, Poll};
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
use pin_project_lite::pin_project;
|
|
||||||
use recycle_box::{coerce_box, RecycleBox};
|
|
||||||
|
|
||||||
use crate::channel::{ChannelId, Sender};
|
|
||||||
use crate::executor::Executor;
|
|
||||||
use crate::model::{InputFn, Model};
|
|
||||||
use crate::time::{MonotonicTime, TearableAtomicTime};
|
|
||||||
use crate::util::priority_queue::PriorityQueue;
|
|
||||||
use crate::util::sync_cell::SyncCellReader;
|
|
||||||
|
|
||||||
/// Shorthand for the scheduler queue type.
|
|
||||||
pub(crate) type SchedulerQueue = PriorityQueue<(MonotonicTime, ChannelId), Box<dyn ScheduledEvent>>;
|
|
||||||
|
|
||||||
/// Trait abstracting over time-absolute and time-relative deadlines.
|
|
||||||
///
|
|
||||||
/// This trait is implemented by [`std::time::Duration`] and
|
|
||||||
/// [`MonotonicTime`].
|
|
||||||
pub trait Deadline {
|
|
||||||
/// Make this deadline into an absolute timestamp, using the provided
|
|
||||||
/// current time as a reference.
|
|
||||||
fn into_time(self, now: MonotonicTime) -> MonotonicTime;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deadline for Duration {
|
|
||||||
#[inline(always)]
|
|
||||||
fn into_time(self, now: MonotonicTime) -> MonotonicTime {
|
|
||||||
now + self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Deadline for MonotonicTime {
|
|
||||||
#[inline(always)]
|
|
||||||
fn into_time(self, _: MonotonicTime) -> MonotonicTime {
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A local scheduler for models.
|
|
||||||
///
|
|
||||||
/// A `Scheduler` is a handle to the global scheduler associated to a model
|
|
||||||
/// instance. It can be used by the model to retrieve the simulation time or
|
|
||||||
/// schedule delayed actions on itself.
|
|
||||||
///
|
|
||||||
/// ### Caveat: self-scheduling `async` methods
|
|
||||||
///
|
|
||||||
/// Due to a current rustc issue, `async` methods that schedule themselves will
|
|
||||||
/// not compile unless an explicit `Send` bound is added to the returned future.
|
|
||||||
/// This can be done by replacing the `async` signature with a partially
|
|
||||||
/// desugared signature such as:
|
|
||||||
///
|
|
||||||
/// ```ignore
|
|
||||||
/// fn self_scheduling_method<'a>(
|
|
||||||
/// &'a mut self,
|
|
||||||
/// arg: MyEventType,
|
|
||||||
/// scheduler: &'a Scheduler<Self>
|
|
||||||
/// ) -> impl Future<Output=()> + Send + 'a {
|
|
||||||
/// async move {
|
|
||||||
/// /* implementation */
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
///
|
|
||||||
/// Self-scheduling methods which are not `async` are not affected by this
|
|
||||||
/// issue.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// A model that sends a greeting after some delay.
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
/// use asynchronix::model::{Model, Output}; use asynchronix::time::Scheduler;
|
|
||||||
///
|
|
||||||
/// #[derive(Default)]
|
|
||||||
/// pub struct DelayedGreeter {
|
|
||||||
/// msg_out: Output<String>,
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// impl DelayedGreeter {
|
|
||||||
/// // Triggers a greeting on the output port after some delay [input port].
|
|
||||||
/// pub async fn greet_with_delay(&mut self, delay: Duration, scheduler: &Scheduler<Self>) {
|
|
||||||
/// let time = scheduler.time();
|
|
||||||
/// let greeting = format!("Hello, this message was scheduled at: {:?}.", time);
|
|
||||||
///
|
|
||||||
/// if delay.is_zero() {
|
|
||||||
/// self.msg_out.send(greeting).await;
|
|
||||||
/// } else {
|
|
||||||
/// scheduler.schedule_event(delay, Self::send_msg, greeting).unwrap();
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// // Sends a message to the output [private input port].
|
|
||||||
/// async fn send_msg(&mut self, msg: String) {
|
|
||||||
/// self.msg_out.send(msg).await;
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
/// impl Model for DelayedGreeter {}
|
|
||||||
/// ```
|
|
||||||
|
|
||||||
// The self-scheduling caveat seems related to this issue:
|
|
||||||
// https://github.com/rust-lang/rust/issues/78649
|
|
||||||
pub struct Scheduler<M: Model> {
|
|
||||||
sender: Sender<M>,
|
|
||||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
|
||||||
time: SyncCellReader<TearableAtomicTime>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M: Model> Scheduler<M> {
|
|
||||||
/// Creates a new local scheduler.
|
|
||||||
pub(crate) fn new(
|
|
||||||
sender: Sender<M>,
|
|
||||||
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
|
|
||||||
time: SyncCellReader<TearableAtomicTime>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
sender,
|
|
||||||
scheduler_queue,
|
|
||||||
time,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the current simulation time.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use asynchronix::model::Model;
|
|
||||||
/// use asynchronix::time::{MonotonicTime, Scheduler};
|
|
||||||
///
|
|
||||||
/// fn is_third_millenium<M: Model>(scheduler: &Scheduler<M>) -> bool {
|
|
||||||
/// let time = scheduler.time();
|
|
||||||
///
|
|
||||||
/// time >= MonotonicTime::new(978307200, 0) && time < MonotonicTime::new(32535216000, 0)
|
|
||||||
/// }
|
|
||||||
/// ```
|
|
||||||
pub fn time(&self) -> MonotonicTime {
|
|
||||||
self.time.try_read().expect("internal simulation error: could not perform a synchronized read of the simulation time")
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedules an event at a future time.
|
|
||||||
///
|
|
||||||
/// An error is returned if the specified deadline is not in the future of
|
|
||||||
/// the current simulation time.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
///
|
|
||||||
/// use asynchronix::model::Model;
|
|
||||||
/// use asynchronix::time::Scheduler;
|
|
||||||
///
|
|
||||||
/// // A timer.
|
|
||||||
/// pub struct Timer {}
|
|
||||||
///
|
|
||||||
/// impl Timer {
|
|
||||||
/// // Sets an alarm [input port].
|
|
||||||
/// pub fn set(&mut self, setting: Duration, scheduler: &Scheduler<Self>) {
|
|
||||||
/// if scheduler.schedule_event(setting, Self::ring, ()).is_err() {
|
|
||||||
/// println!("The alarm clock can only be set for a future time");
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// // Rings [private input port].
|
|
||||||
/// fn ring(&mut self) {
|
|
||||||
/// println!("Brringggg");
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// impl Model for Timer {}
|
|
||||||
/// ```
|
|
||||||
pub fn schedule_event<F, T, S>(
|
|
||||||
&self,
|
|
||||||
deadline: impl Deadline,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
) -> Result<(), SchedulingError>
|
|
||||||
where
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
let now = self.time();
|
|
||||||
let time = deadline.into_time(now);
|
|
||||||
if now >= time {
|
|
||||||
return Err(SchedulingError::InvalidScheduledTime);
|
|
||||||
}
|
|
||||||
let sender = self.sender.clone();
|
|
||||||
schedule_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedules a cancellable event at a future time and returns an event key.
|
|
||||||
///
|
|
||||||
/// An error is returned if the specified deadline is not in the future of
|
|
||||||
/// the current simulation time.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use asynchronix::model::Model;
|
|
||||||
/// use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
|
|
||||||
///
|
|
||||||
/// // An alarm clock that can be cancelled.
|
|
||||||
/// #[derive(Default)]
|
|
||||||
/// pub struct CancellableAlarmClock {
|
|
||||||
/// event_key: Option<EventKey>,
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// impl CancellableAlarmClock {
|
|
||||||
/// // Sets an alarm [input port].
|
|
||||||
/// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
|
|
||||||
/// self.cancel();
|
|
||||||
/// match scheduler.schedule_keyed_event(setting, Self::ring, ()) {
|
|
||||||
/// Ok(event_key) => self.event_key = Some(event_key),
|
|
||||||
/// Err(_) => println!("The alarm clock can only be set for a future time"),
|
|
||||||
/// };
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// // Cancels the current alarm, if any [input port].
|
|
||||||
/// pub fn cancel(&mut self) {
|
|
||||||
/// self.event_key.take().map(|k| k.cancel());
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// // Rings the alarm [private input port].
|
|
||||||
/// fn ring(&mut self) {
|
|
||||||
/// println!("Brringggg!");
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// impl Model for CancellableAlarmClock {}
|
|
||||||
/// ```
|
|
||||||
pub fn schedule_keyed_event<F, T, S>(
|
|
||||||
&self,
|
|
||||||
deadline: impl Deadline,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
) -> Result<EventKey, SchedulingError>
|
|
||||||
where
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
let now = self.time();
|
|
||||||
let time = deadline.into_time(now);
|
|
||||||
if now >= time {
|
|
||||||
return Err(SchedulingError::InvalidScheduledTime);
|
|
||||||
}
|
|
||||||
let sender = self.sender.clone();
|
|
||||||
let event_key =
|
|
||||||
schedule_keyed_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue);
|
|
||||||
|
|
||||||
Ok(event_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedules a periodically recurring event at a future time.
|
|
||||||
///
|
|
||||||
/// An error is returned if the specified deadline is not in the future of
|
|
||||||
/// the current simulation time or if the specified period is null.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
///
|
|
||||||
/// use asynchronix::model::Model;
|
|
||||||
/// use asynchronix::time::{MonotonicTime, Scheduler};
|
|
||||||
///
|
|
||||||
/// // An alarm clock beeping at 1Hz.
|
|
||||||
/// pub struct BeepingAlarmClock {}
|
|
||||||
///
|
|
||||||
/// impl BeepingAlarmClock {
|
|
||||||
/// // Sets an alarm [input port].
|
|
||||||
/// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
|
|
||||||
/// if scheduler.schedule_periodic_event(
|
|
||||||
/// setting,
|
|
||||||
/// Duration::from_secs(1), // 1Hz = 1/1s
|
|
||||||
/// Self::beep,
|
|
||||||
/// ()
|
|
||||||
/// ).is_err() {
|
|
||||||
/// println!("The alarm clock can only be set for a future time");
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// // Emits a single beep [private input port].
|
|
||||||
/// fn beep(&mut self) {
|
|
||||||
/// println!("Beep!");
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// impl Model for BeepingAlarmClock {}
|
|
||||||
/// ```
|
|
||||||
pub fn schedule_periodic_event<F, T, S>(
|
|
||||||
&self,
|
|
||||||
deadline: impl Deadline,
|
|
||||||
period: Duration,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
) -> Result<(), SchedulingError>
|
|
||||||
where
|
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
let now = self.time();
|
|
||||||
let time = deadline.into_time(now);
|
|
||||||
if now >= time {
|
|
||||||
return Err(SchedulingError::InvalidScheduledTime);
|
|
||||||
}
|
|
||||||
if period.is_zero() {
|
|
||||||
return Err(SchedulingError::NullRepetitionPeriod);
|
|
||||||
}
|
|
||||||
let sender = self.sender.clone();
|
|
||||||
schedule_periodic_event_at_unchecked(
|
|
||||||
time,
|
|
||||||
period,
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
&self.scheduler_queue,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedules a cancellable, periodically recurring event at a future time
|
|
||||||
/// and returns an event key.
|
|
||||||
///
|
|
||||||
/// An error is returned if the specified deadline is not in the future of
|
|
||||||
/// the current simulation time or if the specified period is null.
|
|
||||||
///
|
|
||||||
/// # Examples
|
|
||||||
///
|
|
||||||
/// ```
|
|
||||||
/// use std::time::Duration;
|
|
||||||
///
|
|
||||||
/// use asynchronix::model::Model;
|
|
||||||
/// use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
|
|
||||||
///
|
|
||||||
/// // An alarm clock beeping at 1Hz that can be cancelled before it sets off, or
|
|
||||||
/// // stopped after it sets off.
|
|
||||||
/// #[derive(Default)]
|
|
||||||
/// pub struct CancellableBeepingAlarmClock {
|
|
||||||
/// event_key: Option<EventKey>,
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// impl CancellableBeepingAlarmClock {
|
|
||||||
/// // Sets an alarm [input port].
|
|
||||||
/// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
|
|
||||||
/// self.cancel();
|
|
||||||
/// match scheduler.schedule_keyed_periodic_event(
|
|
||||||
/// setting,
|
|
||||||
/// Duration::from_secs(1), // 1Hz = 1/1s
|
|
||||||
/// Self::beep,
|
|
||||||
/// ()
|
|
||||||
/// ) {
|
|
||||||
/// Ok(event_key) => self.event_key = Some(event_key),
|
|
||||||
/// Err(_) => println!("The alarm clock can only be set for a future time"),
|
|
||||||
/// };
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// // Cancels or stops the alarm [input port].
|
|
||||||
/// pub fn cancel(&mut self) {
|
|
||||||
/// self.event_key.take().map(|k| k.cancel());
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// // Emits a single beep [private input port].
|
|
||||||
/// fn beep(&mut self) {
|
|
||||||
/// println!("Beep!");
|
|
||||||
/// }
|
|
||||||
/// }
|
|
||||||
///
|
|
||||||
/// impl Model for CancellableBeepingAlarmClock {}
|
|
||||||
/// ```
|
|
||||||
pub fn schedule_keyed_periodic_event<F, T, S>(
|
|
||||||
&self,
|
|
||||||
deadline: impl Deadline,
|
|
||||||
period: Duration,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
) -> Result<EventKey, SchedulingError>
|
|
||||||
where
|
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
let now = self.time();
|
|
||||||
let time = deadline.into_time(now);
|
|
||||||
if now >= time {
|
|
||||||
return Err(SchedulingError::InvalidScheduledTime);
|
|
||||||
}
|
|
||||||
if period.is_zero() {
|
|
||||||
return Err(SchedulingError::NullRepetitionPeriod);
|
|
||||||
}
|
|
||||||
let sender = self.sender.clone();
|
|
||||||
let event_key = schedule_periodic_keyed_event_at_unchecked(
|
|
||||||
time,
|
|
||||||
period,
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
&self.scheduler_queue,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(event_key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M: Model> fmt::Debug for Scheduler<M> {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
f.debug_struct("Scheduler").finish_non_exhaustive()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handle to a scheduled event.
|
|
||||||
///
|
|
||||||
/// An `EventKey` can be used to cancel a future event.
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
#[must_use = "prefer unkeyed scheduling methods if the event is never cancelled"]
|
|
||||||
pub struct EventKey {
|
|
||||||
is_cancelled: Arc<AtomicBool>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EventKey {
|
|
||||||
/// Creates a key for a pending event.
|
|
||||||
pub(crate) fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
is_cancelled: Arc::new(AtomicBool::new(false)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks whether the event was cancelled.
|
|
||||||
pub(crate) fn is_cancelled(&self) -> bool {
|
|
||||||
self.is_cancelled.load(Ordering::Relaxed)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Cancels the associated event.
|
|
||||||
pub fn cancel(self) {
|
|
||||||
self.is_cancelled.store(true, Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error returned when the scheduled time or the repetition period are invalid.
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
|
||||||
pub enum SchedulingError {
|
|
||||||
/// The scheduled time does not lie in the future of the current simulation
|
|
||||||
/// time.
|
|
||||||
InvalidScheduledTime,
|
|
||||||
/// The repetition period is zero.
|
|
||||||
NullRepetitionPeriod,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Display for SchedulingError {
|
|
||||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Self::InvalidScheduledTime => write!(
|
|
||||||
fmt,
|
|
||||||
"the scheduled time should be in the future of the current simulation time"
|
|
||||||
),
|
|
||||||
Self::NullRepetitionPeriod => write!(fmt, "the repetition period cannot be zero"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for SchedulingError {}
|
|
||||||
|
|
||||||
/// Schedules an event at a future time.
|
|
||||||
///
|
|
||||||
/// This method does not check whether the specified time lies in the future
|
|
||||||
/// of the current simulation time.
|
|
||||||
pub(crate) fn schedule_event_at_unchecked<M, F, T, S>(
|
|
||||||
time: MonotonicTime,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
sender: Sender<M>,
|
|
||||||
scheduler_queue: &Mutex<SchedulerQueue>,
|
|
||||||
) where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
let channel_id = sender.channel_id();
|
|
||||||
|
|
||||||
let event_dispatcher = Box::new(new_event_dispatcher(func, arg, sender));
|
|
||||||
|
|
||||||
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
|
||||||
scheduler_queue.insert((time, channel_id), event_dispatcher);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedules an event at a future time, returning an event key.
|
|
||||||
///
|
|
||||||
/// This method does not check whether the specified time lies in the future
|
|
||||||
/// of the current simulation time.
|
|
||||||
pub(crate) fn schedule_keyed_event_at_unchecked<M, F, T, S>(
|
|
||||||
time: MonotonicTime,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
sender: Sender<M>,
|
|
||||||
scheduler_queue: &Mutex<SchedulerQueue>,
|
|
||||||
) -> EventKey
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
let event_key = EventKey::new();
|
|
||||||
let channel_id = sender.channel_id();
|
|
||||||
let event_dispatcher = Box::new(KeyedEventDispatcher::new(
|
|
||||||
event_key.clone(),
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
));
|
|
||||||
|
|
||||||
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
|
||||||
scheduler_queue.insert((time, channel_id), event_dispatcher);
|
|
||||||
|
|
||||||
event_key
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedules a periodic event at a future time.
|
|
||||||
///
|
|
||||||
/// This method does not check whether the specified time lies in the future
|
|
||||||
/// of the current simulation time.
|
|
||||||
pub(crate) fn schedule_periodic_event_at_unchecked<M, F, T, S>(
|
|
||||||
time: MonotonicTime,
|
|
||||||
period: Duration,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
sender: Sender<M>,
|
|
||||||
scheduler_queue: &Mutex<SchedulerQueue>,
|
|
||||||
) where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
let channel_id = sender.channel_id();
|
|
||||||
|
|
||||||
let event_dispatcher = Box::new(PeriodicEventDispatcher::new(func, arg, sender, period));
|
|
||||||
|
|
||||||
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
|
||||||
scheduler_queue.insert((time, channel_id), event_dispatcher);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Schedules an event at a future time, returning an event key.
|
|
||||||
///
|
|
||||||
/// This method does not check whether the specified time lies in the future
|
|
||||||
/// of the current simulation time.
|
|
||||||
pub(crate) fn schedule_periodic_keyed_event_at_unchecked<M, F, T, S>(
|
|
||||||
time: MonotonicTime,
|
|
||||||
period: Duration,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
sender: Sender<M>,
|
|
||||||
scheduler_queue: &Mutex<SchedulerQueue>,
|
|
||||||
) -> EventKey
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
let event_key = EventKey::new();
|
|
||||||
let channel_id = sender.channel_id();
|
|
||||||
let event_dispatcher = Box::new(PeriodicKeyedEventDispatcher::new(
|
|
||||||
event_key.clone(),
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
period,
|
|
||||||
));
|
|
||||||
|
|
||||||
let mut scheduler_queue = scheduler_queue.lock().unwrap();
|
|
||||||
scheduler_queue.insert((time, channel_id), event_dispatcher);
|
|
||||||
|
|
||||||
event_key
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Trait for objects that can be converted to a future dispatching a scheduled
|
|
||||||
/// event.
|
|
||||||
pub(crate) trait ScheduledEvent: Send {
|
|
||||||
/// Reports whether the associated event was cancelled.
|
|
||||||
fn is_cancelled(&self) -> bool;
|
|
||||||
|
|
||||||
/// Returns a boxed clone of this event and the repetition period if this is
|
|
||||||
/// a periodic even, otherwise returns `None`.
|
|
||||||
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)>;
|
|
||||||
|
|
||||||
/// Returns a boxed future dispatching the associated event.
|
|
||||||
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>>;
|
|
||||||
|
|
||||||
/// Spawns the future that dispatches the associated event onto the provided
|
|
||||||
/// executor.
|
|
||||||
///
|
|
||||||
/// This method is typically more efficient that spawning the boxed future
|
|
||||||
/// from `into_future` since it can directly spawn the unboxed future.
|
|
||||||
fn spawn_and_forget(self: Box<Self>, executor: &Executor);
|
|
||||||
}
|
|
||||||
|
|
||||||
pin_project! {
|
|
||||||
/// Object that can be converted to a future dispatching a non-cancellable
|
|
||||||
/// event.
|
|
||||||
///
|
|
||||||
/// Note that this particular event dispatcher is in fact already a future:
|
|
||||||
/// since the future cannot be cancelled and the dispatcher does not need to
|
|
||||||
/// be cloned, there is no need to defer the construction of the future.
|
|
||||||
/// This makes `into_future` a trivial cast, which saves a boxing operation.
|
|
||||||
pub(crate) struct EventDispatcher<F> {
|
|
||||||
#[pin]
|
|
||||||
fut: F,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Constructs a new `EventDispatcher`.
|
|
||||||
///
|
|
||||||
/// Due to some limitations of type inference or of my understanding of it, the
|
|
||||||
/// constructor for this event dispatchers is a freestanding function.
|
|
||||||
fn new_event_dispatcher<M, F, T, S>(
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
sender: Sender<M>,
|
|
||||||
) -> EventDispatcher<impl Future<Output = ()>>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
{
|
|
||||||
let fut = dispatch_event(func, arg, sender);
|
|
||||||
|
|
||||||
EventDispatcher { fut }
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F> Future for EventDispatcher<F>
|
|
||||||
where
|
|
||||||
F: Future,
|
|
||||||
{
|
|
||||||
type Output = F::Output;
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
||||||
self.project().fut.poll(cx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F> ScheduledEvent for EventDispatcher<F>
|
|
||||||
where
|
|
||||||
F: Future<Output = ()> + Send + 'static,
|
|
||||||
{
|
|
||||||
fn is_cancelled(&self) -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
|
||||||
// No need for boxing, type coercion is enough here.
|
|
||||||
Box::into_pin(self)
|
|
||||||
}
|
|
||||||
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
|
|
||||||
executor.spawn_and_forget(*self);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Object that can be converted to a future dispatching a non-cancellable periodic
|
|
||||||
/// event.
|
|
||||||
pub(crate) struct PeriodicEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
{
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
sender: Sender<M>,
|
|
||||||
period: Duration,
|
|
||||||
_input_kind: PhantomData<S>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M, F, T, S> PeriodicEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
{
|
|
||||||
/// Constructs a new `PeriodicEventDispatcher`.
|
|
||||||
fn new(func: F, arg: T, sender: Sender<M>, period: Duration) -> Self {
|
|
||||||
Self {
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
period,
|
|
||||||
_input_kind: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M, F, T, S> ScheduledEvent for PeriodicEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
fn is_cancelled(&self) -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
|
|
||||||
let event = Box::new(Self::new(
|
|
||||||
self.func.clone(),
|
|
||||||
self.arg.clone(),
|
|
||||||
self.sender.clone(),
|
|
||||||
self.period,
|
|
||||||
));
|
|
||||||
|
|
||||||
Some((event, self.period))
|
|
||||||
}
|
|
||||||
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
|
||||||
let Self {
|
|
||||||
func, arg, sender, ..
|
|
||||||
} = *self;
|
|
||||||
|
|
||||||
Box::pin(dispatch_event(func, arg, sender))
|
|
||||||
}
|
|
||||||
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
|
|
||||||
let Self {
|
|
||||||
func, arg, sender, ..
|
|
||||||
} = *self;
|
|
||||||
|
|
||||||
let fut = dispatch_event(func, arg, sender);
|
|
||||||
executor.spawn_and_forget(fut);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Object that can be converted to a future dispatching a cancellable event.
|
|
||||||
pub(crate) struct KeyedEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
{
|
|
||||||
event_key: EventKey,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
sender: Sender<M>,
|
|
||||||
_input_kind: PhantomData<S>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M, F, T, S> KeyedEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
{
|
|
||||||
/// Constructs a new `KeyedEventDispatcher`.
|
|
||||||
fn new(event_key: EventKey, func: F, arg: T, sender: Sender<M>) -> Self {
|
|
||||||
Self {
|
|
||||||
event_key,
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
_input_kind: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M, F, T, S> ScheduledEvent for KeyedEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
fn is_cancelled(&self) -> bool {
|
|
||||||
self.event_key.is_cancelled()
|
|
||||||
}
|
|
||||||
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
|
||||||
let Self {
|
|
||||||
event_key,
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
..
|
|
||||||
} = *self;
|
|
||||||
|
|
||||||
Box::pin(dispatch_keyed_event(event_key, func, arg, sender))
|
|
||||||
}
|
|
||||||
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
|
|
||||||
let Self {
|
|
||||||
event_key,
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
..
|
|
||||||
} = *self;
|
|
||||||
|
|
||||||
let fut = dispatch_keyed_event(event_key, func, arg, sender);
|
|
||||||
executor.spawn_and_forget(fut);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Object that can be converted to a future dispatching a cancellable event.
|
|
||||||
pub(crate) struct PeriodicKeyedEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
{
|
|
||||||
event_key: EventKey,
|
|
||||||
func: F,
|
|
||||||
arg: T,
|
|
||||||
sender: Sender<M>,
|
|
||||||
period: Duration,
|
|
||||||
_input_kind: PhantomData<S>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M, F, T, S> PeriodicKeyedEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
{
|
|
||||||
/// Constructs a new `KeyedEventDispatcher`.
|
|
||||||
fn new(event_key: EventKey, func: F, arg: T, sender: Sender<M>, period: Duration) -> Self {
|
|
||||||
Self {
|
|
||||||
event_key,
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
period,
|
|
||||||
_input_kind: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<M, F, T, S> ScheduledEvent for PeriodicKeyedEventDispatcher<M, F, T, S>
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S> + Clone,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
S: Send + 'static,
|
|
||||||
{
|
|
||||||
fn is_cancelled(&self) -> bool {
|
|
||||||
self.event_key.is_cancelled()
|
|
||||||
}
|
|
||||||
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
|
|
||||||
let event = Box::new(Self::new(
|
|
||||||
self.event_key.clone(),
|
|
||||||
self.func.clone(),
|
|
||||||
self.arg.clone(),
|
|
||||||
self.sender.clone(),
|
|
||||||
self.period,
|
|
||||||
));
|
|
||||||
|
|
||||||
Some((event, self.period))
|
|
||||||
}
|
|
||||||
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
|
||||||
let Self {
|
|
||||||
event_key,
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
..
|
|
||||||
} = *self;
|
|
||||||
|
|
||||||
Box::pin(dispatch_keyed_event(event_key, func, arg, sender))
|
|
||||||
}
|
|
||||||
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
|
|
||||||
let Self {
|
|
||||||
event_key,
|
|
||||||
func,
|
|
||||||
arg,
|
|
||||||
sender,
|
|
||||||
..
|
|
||||||
} = *self;
|
|
||||||
|
|
||||||
let fut = dispatch_keyed_event(event_key, func, arg, sender);
|
|
||||||
executor.spawn_and_forget(fut);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Asynchronously dispatch a regular, non-cancellable event.
|
|
||||||
async fn dispatch_event<M, F, T, S>(func: F, arg: T, sender: Sender<M>)
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
{
|
|
||||||
let _ = sender
|
|
||||||
.send(
|
|
||||||
move |model: &mut M,
|
|
||||||
scheduler,
|
|
||||||
recycle_box: RecycleBox<()>|
|
|
||||||
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
|
|
||||||
let fut = func.call(model, arg, scheduler);
|
|
||||||
|
|
||||||
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Asynchronously dispatch a cancellable event.
|
|
||||||
async fn dispatch_keyed_event<M, F, T, S>(event_key: EventKey, func: F, arg: T, sender: Sender<M>)
|
|
||||||
where
|
|
||||||
M: Model,
|
|
||||||
F: for<'a> InputFn<'a, M, T, S>,
|
|
||||||
T: Send + Clone + 'static,
|
|
||||||
{
|
|
||||||
let _ = sender
|
|
||||||
.send(
|
|
||||||
move |model: &mut M,
|
|
||||||
scheduler,
|
|
||||||
recycle_box: RecycleBox<()>|
|
|
||||||
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
|
|
||||||
let fut = async move {
|
|
||||||
// Only perform the call if the event wasn't cancelled.
|
|
||||||
if !event_key.is_cancelled() {
|
|
||||||
func.call(model, arg, scheduler).await;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
coerce_box!(RecycleBox::recycle(recycle_box, fut))
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
@ -1,7 +1,9 @@
|
|||||||
pub(crate) mod bit;
|
pub(crate) mod bit;
|
||||||
pub(crate) mod futures;
|
pub(crate) mod cached_rw_lock;
|
||||||
|
pub(crate) mod indexed_priority_queue;
|
||||||
pub(crate) mod priority_queue;
|
pub(crate) mod priority_queue;
|
||||||
pub(crate) mod rng;
|
pub(crate) mod rng;
|
||||||
|
pub(crate) mod seq_futures;
|
||||||
pub(crate) mod slot;
|
pub(crate) mod slot;
|
||||||
pub(crate) mod spsc_queue;
|
|
||||||
pub(crate) mod sync_cell;
|
pub(crate) mod sync_cell;
|
||||||
|
pub(crate) mod task_set;
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
//! Bit manipulation and algorithms.
|
//! Bit manipulation and algorithms.
|
||||||
|
|
||||||
#![allow(unused)]
|
|
||||||
|
|
||||||
/// Find the position of the `Nᵗʰ` set bit starting the search from the least
|
/// Find the position of the `Nᵗʰ` set bit starting the search from the least
|
||||||
/// significant bit.
|
/// significant bit.
|
||||||
///
|
///
|
||||||
|
174
asynchronix/src/util/cached_rw_lock.rs
Normal file
174
asynchronix/src/util/cached_rw_lock.rs
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
use std::ops::{Deref, DerefMut};
|
||||||
|
|
||||||
|
use crate::loom_exports::sync::atomic::{AtomicUsize, Ordering};
|
||||||
|
use crate::loom_exports::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError};
|
||||||
|
|
||||||
|
/// A cached read-write lock.
|
||||||
|
///
|
||||||
|
/// This read-write lock maintains a local cache in each clone for read
|
||||||
|
/// access. Regular writes are always synchronized and performed on the shared
|
||||||
|
/// data. Regular reads are synchronized only when the shared data has been
|
||||||
|
/// modified since the local cache was last synchronized. The local cache can
|
||||||
|
/// alternatively be used as a scratchpad without invalidating the shared data,
|
||||||
|
/// in which case all changes to the scratchpad will be lost on the next
|
||||||
|
/// synchronization.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct CachedRwLock<T: Clone> {
|
||||||
|
value: T,
|
||||||
|
epoch: usize,
|
||||||
|
shared: Arc<Shared<T>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone> CachedRwLock<T> {
|
||||||
|
/// Creates a new cached read-write lock in an ulocked state.
|
||||||
|
pub(crate) fn new(t: T) -> Self {
|
||||||
|
let shared = t.clone();
|
||||||
|
Self {
|
||||||
|
value: t,
|
||||||
|
epoch: 0,
|
||||||
|
shared: Arc::new(Shared {
|
||||||
|
value: Mutex::new(shared),
|
||||||
|
epoch: AtomicUsize::new(0),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gives access to the local cache without synchronization.
|
||||||
|
pub(crate) fn read_unsync(&self) -> &T {
|
||||||
|
&self.value
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Synchronizes the local cache if it is behind the shared data and gives
|
||||||
|
/// access to it.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub(crate) fn read(&mut self) -> LockResult<&T> {
|
||||||
|
if self.shared.epoch.load(Ordering::Relaxed) != self.epoch {
|
||||||
|
match self.shared.value.lock() {
|
||||||
|
LockResult::Ok(shared) => {
|
||||||
|
self.value = shared.clone();
|
||||||
|
self.epoch = self.shared.epoch.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
LockResult::Err(_) => return LockResult::Err(PoisonError::new(&self.value)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LockResult::Ok(&self.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gives write access to the local cache without synchronization so it can
|
||||||
|
/// be used as a scratchpad.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub(crate) fn write_scratchpad_unsync(&mut self) -> &mut T {
|
||||||
|
&mut self.value
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Synchronizes the local cache if it is behind the shared data and gives
|
||||||
|
/// write access to it so it can be used as a scratchpad.
|
||||||
|
pub(crate) fn write_scratchpad(&mut self) -> LockResult<&mut T> {
|
||||||
|
if self.shared.epoch.load(Ordering::Relaxed) != self.epoch {
|
||||||
|
match self.shared.value.lock() {
|
||||||
|
LockResult::Ok(shared) => {
|
||||||
|
self.value = shared.clone();
|
||||||
|
self.epoch = self.shared.epoch.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
LockResult::Err(_) => return LockResult::Err(PoisonError::new(&mut self.value)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LockResult::Ok(&mut self.value)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Acquires a write lock on the shared data.
|
||||||
|
pub(crate) fn write(&mut self) -> LockResult<CachedRwLockWriteGuard<'_, T>> {
|
||||||
|
let guard = self.shared.value.lock();
|
||||||
|
let epoch = self.shared.epoch.load(Ordering::Relaxed) + 1;
|
||||||
|
self.shared.epoch.store(epoch, Ordering::Relaxed);
|
||||||
|
|
||||||
|
match guard {
|
||||||
|
LockResult::Ok(shared) => LockResult::Ok(CachedRwLockWriteGuard { guard: shared }),
|
||||||
|
LockResult::Err(poison) => LockResult::Err(PoisonError::new(CachedRwLockWriteGuard {
|
||||||
|
guard: poison.into_inner(),
|
||||||
|
})),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Shared<T> {
|
||||||
|
epoch: AtomicUsize,
|
||||||
|
value: Mutex<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write guard.
|
||||||
|
///
|
||||||
|
/// The lock is released when the guard is dropped.
|
||||||
|
pub(crate) struct CachedRwLockWriteGuard<'a, T: Clone> {
|
||||||
|
guard: MutexGuard<'a, T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone> Deref for CachedRwLockWriteGuard<'_, T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &T {
|
||||||
|
&self.guard
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone> DerefMut for CachedRwLockWriteGuard<'_, T> {
|
||||||
|
fn deref_mut(&mut self) -> &mut T {
|
||||||
|
&mut self.guard
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, asynchronix_loom))]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
use loom::model::Builder;
|
||||||
|
use loom::thread;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn loom_cached_rw_lock_write() {
|
||||||
|
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
||||||
|
const ITERATIONS_NUMBER: usize = 5;
|
||||||
|
|
||||||
|
let mut builder = Builder::new();
|
||||||
|
if builder.preemption_bound.is_none() {
|
||||||
|
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
||||||
|
}
|
||||||
|
|
||||||
|
builder.check(move || {
|
||||||
|
let mut writer0: CachedRwLock<usize> = CachedRwLock::new(0);
|
||||||
|
let mut writer1 = writer0.clone();
|
||||||
|
let mut reader = writer0.clone();
|
||||||
|
|
||||||
|
let th_w = thread::spawn(move || {
|
||||||
|
for _ in 0..ITERATIONS_NUMBER {
|
||||||
|
let mut guard = writer0.write().unwrap();
|
||||||
|
*guard = *guard + 1;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
let th_r = thread::spawn(move || {
|
||||||
|
let mut value = 0;
|
||||||
|
let mut prev_value;
|
||||||
|
for _ in 0..ITERATIONS_NUMBER {
|
||||||
|
prev_value = value;
|
||||||
|
value = *reader.write_scratchpad().unwrap();
|
||||||
|
assert!(
|
||||||
|
prev_value <= value,
|
||||||
|
"Previous value = {}, value = {}",
|
||||||
|
prev_value,
|
||||||
|
value
|
||||||
|
);
|
||||||
|
assert_eq!(value, reader.epoch);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
for _ in 0..ITERATIONS_NUMBER {
|
||||||
|
let mut guard = writer1.write().unwrap();
|
||||||
|
*guard = *guard + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
th_w.join().unwrap();
|
||||||
|
th_r.join().unwrap();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
696
asynchronix/src/util/indexed_priority_queue.rs
Normal file
696
asynchronix/src/util/indexed_priority_queue.rs
Normal file
@ -0,0 +1,696 @@
|
|||||||
|
//! Associative priority queue.
|
||||||
|
|
||||||
|
#![allow(unused)]
|
||||||
|
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
|
/// An associative container optimized for extraction of the value with the
|
||||||
|
/// lowest key and deletion of arbitrary key-value pairs.
|
||||||
|
///
|
||||||
|
/// This implementation has the same theoretical complexity for insert and pull
|
||||||
|
/// operations as a conventional array-based binary heap but does differ from
|
||||||
|
/// the latter in some important aspects:
|
||||||
|
///
|
||||||
|
/// - elements can be deleted in *O*(log(*N*)) time rather than *O*(*N*) time
|
||||||
|
/// using a unique index returned at insertion time.
|
||||||
|
/// - same-key elements are guaranteed to be pulled in FIFO order,
|
||||||
|
///
|
||||||
|
/// Under the hood, the priority queue relies on a binary heap cross-indexed
|
||||||
|
/// with values stored in a slab allocator. Each item of the binary heap
|
||||||
|
/// contains an index pointing to the associated slab-allocated node, as well as
|
||||||
|
/// the user-provided key. Each slab node contains the value associated to the
|
||||||
|
/// key and a back-pointing index to the binary heap. The heap items also
|
||||||
|
/// contain a unique epoch which allows same-key nodes to be sorted by insertion
|
||||||
|
/// order. The epoch is used as well to build unique indices that enable
|
||||||
|
/// efficient deletion of arbitrary key-value pairs.
|
||||||
|
///
|
||||||
|
/// The slab-based design is what makes *O*(log(*N*)) deletion possible, but it
|
||||||
|
/// does come with some trade-offs:
|
||||||
|
///
|
||||||
|
/// - its memory footprint is higher because it needs 2 extra pointer-sized
|
||||||
|
/// indices for each element to cross-index the heap and the slab,
|
||||||
|
/// - its computational footprint is higher because of the extra cost associated
|
||||||
|
/// with random slab access; that being said, array-based binary heaps are not
|
||||||
|
/// extremely cache-friendly to start with so unless the slab becomes very
|
||||||
|
/// fragmented, this is not expected to introduce more than a reasonable
|
||||||
|
/// constant-factor penalty compared to a conventional binary heap.
|
||||||
|
///
|
||||||
|
/// The computational penalty is partially offset by the fact that the value
|
||||||
|
/// never needs to be moved from the moment it is inserted until it is pulled.
|
||||||
|
///
|
||||||
|
/// Note that the `Copy` bound on they keys could be lifted but this would make
|
||||||
|
/// the implementation slightly less efficient unless `unsafe` is used.
|
||||||
|
pub(crate) struct IndexedPriorityQueue<K, V>
|
||||||
|
where
|
||||||
|
K: Copy + Clone + Ord,
|
||||||
|
{
|
||||||
|
heap: Vec<Item<K>>,
|
||||||
|
slab: Vec<Node<V>>,
|
||||||
|
first_free_node: Option<usize>,
|
||||||
|
next_epoch: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K: Copy + Ord, V> IndexedPriorityQueue<K, V> {
|
||||||
|
/// Creates an empty `PriorityQueue`.
|
||||||
|
pub(crate) fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
heap: Vec::new(),
|
||||||
|
slab: Vec::new(),
|
||||||
|
first_free_node: None,
|
||||||
|
next_epoch: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates an empty `PriorityQueue` with at least the specified capacity.
|
||||||
|
pub(crate) fn with_capacity(capacity: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
heap: Vec::with_capacity(capacity),
|
||||||
|
slab: Vec::with_capacity(capacity),
|
||||||
|
first_free_node: None,
|
||||||
|
next_epoch: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of key-value pairs in the priority queue.
|
||||||
|
pub(crate) fn len(&self) -> usize {
|
||||||
|
self.heap.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Inserts a new key-value pair and returns a unique insertion key.
|
||||||
|
///
|
||||||
|
/// This operation has *O*(log(*N*)) amortized worse-case theoretical
|
||||||
|
/// complexity and *O*(1) amortized theoretical complexity for a
|
||||||
|
/// sufficiently random heap.
|
||||||
|
pub(crate) fn insert(&mut self, key: K, value: V) -> InsertKey {
|
||||||
|
// Build a unique key from the user-provided key and a unique epoch.
|
||||||
|
let epoch = self.next_epoch;
|
||||||
|
assert_ne!(epoch, u64::MAX);
|
||||||
|
self.next_epoch += 1;
|
||||||
|
let unique_key = UniqueKey { key, epoch };
|
||||||
|
|
||||||
|
// Add a new node to the slab, either by re-using a free node or by
|
||||||
|
// appending a new one.
|
||||||
|
let slab_idx = match self.first_free_node {
|
||||||
|
Some(idx) => {
|
||||||
|
self.first_free_node = self.slab[idx].unwrap_next_free_node();
|
||||||
|
|
||||||
|
self.slab[idx] = Node::HeapNode(HeapNode {
|
||||||
|
value,
|
||||||
|
heap_idx: 0, // temporary value overridden in `sift_up`
|
||||||
|
});
|
||||||
|
|
||||||
|
idx
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
let idx = self.slab.len();
|
||||||
|
self.slab.push(Node::HeapNode(HeapNode {
|
||||||
|
value,
|
||||||
|
heap_idx: 0, // temporary value overridden in `sift_up`
|
||||||
|
}));
|
||||||
|
|
||||||
|
idx
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add a new node at the bottom of the heap.
|
||||||
|
let heap_idx = self.heap.len();
|
||||||
|
self.heap.push(Item {
|
||||||
|
key: unique_key, // temporary value overridden in `sift_up`
|
||||||
|
slab_idx: 0, // temporary value overridden in `sift_up`
|
||||||
|
});
|
||||||
|
|
||||||
|
// Sift up the new node.
|
||||||
|
self.sift_up(
|
||||||
|
Item {
|
||||||
|
key: unique_key,
|
||||||
|
slab_idx,
|
||||||
|
},
|
||||||
|
heap_idx,
|
||||||
|
);
|
||||||
|
|
||||||
|
InsertKey { slab_idx, epoch }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Pulls the value with the lowest key.
|
||||||
|
///
|
||||||
|
/// If there are several equal lowest keys, the value which was inserted
|
||||||
|
/// first is returned.
|
||||||
|
///
|
||||||
|
/// This operation has *O*(log(N)) non-amortized theoretical complexity.
|
||||||
|
pub(crate) fn pull(&mut self) -> Option<(K, V)> {
|
||||||
|
let item = self.heap.first()?;
|
||||||
|
let top_slab_idx = item.slab_idx;
|
||||||
|
let key = item.key.key;
|
||||||
|
|
||||||
|
// Free the top node, extracting its value.
|
||||||
|
let value = mem::replace(
|
||||||
|
&mut self.slab[top_slab_idx],
|
||||||
|
Node::FreeNode(FreeNode {
|
||||||
|
next: self.first_free_node,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.unwrap_value();
|
||||||
|
|
||||||
|
self.first_free_node = Some(top_slab_idx);
|
||||||
|
|
||||||
|
// Sift the last node at the bottom of the heap from the top of the heap.
|
||||||
|
let last_item = self.heap.pop().unwrap();
|
||||||
|
if last_item.slab_idx != top_slab_idx {
|
||||||
|
self.sift_down(last_item, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((key, value))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Peeks a reference to the key-value pair with the lowest key, leaving it
|
||||||
|
/// in the queue.
|
||||||
|
///
|
||||||
|
/// If there are several equal lowest keys, a reference to the key-value
|
||||||
|
/// pair which was inserted first is returned.
|
||||||
|
///
|
||||||
|
/// This operation has *O*(1) non-amortized theoretical complexity.
|
||||||
|
pub(crate) fn peek(&self) -> Option<(&K, &V)> {
|
||||||
|
let item = self.heap.first()?;
|
||||||
|
let top_slab_idx = item.slab_idx;
|
||||||
|
let key = &item.key.key;
|
||||||
|
let value = self.slab[top_slab_idx].unwrap_value_ref();
|
||||||
|
|
||||||
|
Some((key, value))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Peeks a reference to the lowest key, leaving it in the queue.
|
||||||
|
///
|
||||||
|
/// If there are several equal lowest keys, a reference to the key which was
|
||||||
|
/// inserted first is returned.
|
||||||
|
///
|
||||||
|
/// This operation has *O*(1) non-amortized theoretical complexity.
|
||||||
|
pub(crate) fn peek_key(&self) -> Option<&K> {
|
||||||
|
let item = self.heap.first()?;
|
||||||
|
|
||||||
|
Some(&item.key.key)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes the key-value pair associated to the provided insertion key if
|
||||||
|
/// it is still in the queue and returns it.
|
||||||
|
///
|
||||||
|
/// Using an insertion key returned from another `PriorityQueue` is a logic
|
||||||
|
/// error and could result in the deletion of an arbitrary key-value pair.
|
||||||
|
///
|
||||||
|
/// This operation has guaranteed *O*(log(*N*)) theoretical complexity.
|
||||||
|
pub(crate) fn extract(&mut self, insert_key: InsertKey) -> Option<(K, V)> {
|
||||||
|
let slab_idx = insert_key.slab_idx;
|
||||||
|
|
||||||
|
// Check that (i) there is a node at this index, (ii) this node is in
|
||||||
|
// the heap and (iii) this node has the correct epoch.
|
||||||
|
match self.slab.get(slab_idx) {
|
||||||
|
None | Some(Node::FreeNode(_)) => return None,
|
||||||
|
Some(Node::HeapNode(node)) => {
|
||||||
|
if self.heap[node.heap_idx].key.epoch != insert_key.epoch {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Free the node, extracting its content.
|
||||||
|
let node = mem::replace(
|
||||||
|
&mut self.slab[slab_idx],
|
||||||
|
Node::FreeNode(FreeNode {
|
||||||
|
next: self.first_free_node,
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
.unwrap_heap_node();
|
||||||
|
|
||||||
|
self.first_free_node = Some(slab_idx);
|
||||||
|
|
||||||
|
// Save the key before the node is removed from the heap.
|
||||||
|
let key = self.heap[node.heap_idx].key.key;
|
||||||
|
|
||||||
|
// If the last item of the heap is not the one to be deleted, sift it up
|
||||||
|
// or down as appropriate starting from the vacant spot.
|
||||||
|
let last_item = self.heap.pop().unwrap();
|
||||||
|
if let Some(item) = self.heap.get(node.heap_idx) {
|
||||||
|
if last_item.key < item.key {
|
||||||
|
self.sift_up(last_item, node.heap_idx);
|
||||||
|
} else {
|
||||||
|
self.sift_down(last_item, node.heap_idx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some((key, node.value))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Take a heap item and, starting at `heap_idx`, move it up the heap while
|
||||||
|
/// a parent has a larger key.
|
||||||
|
#[inline]
|
||||||
|
fn sift_up(&mut self, item: Item<K>, heap_idx: usize) {
|
||||||
|
let mut child_heap_idx = heap_idx;
|
||||||
|
let key = &item.key;
|
||||||
|
|
||||||
|
while child_heap_idx != 0 {
|
||||||
|
let parent_heap_idx = (child_heap_idx - 1) / 2;
|
||||||
|
|
||||||
|
// Stop when the key is larger or equal to the parent's.
|
||||||
|
if key >= &self.heap[parent_heap_idx].key {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the parent down one level.
|
||||||
|
self.heap[child_heap_idx] = self.heap[parent_heap_idx];
|
||||||
|
let parent_slab_idx = self.heap[parent_heap_idx].slab_idx;
|
||||||
|
*self.slab[parent_slab_idx].unwrap_heap_index_mut() = child_heap_idx;
|
||||||
|
|
||||||
|
// Stop when the key is larger or equal to the parent's.
|
||||||
|
if key >= &self.heap[parent_heap_idx].key {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Make the former parent the new child.
|
||||||
|
child_heap_idx = parent_heap_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the original item to the current child.
|
||||||
|
self.heap[child_heap_idx] = item;
|
||||||
|
*self.slab[item.slab_idx].unwrap_heap_index_mut() = child_heap_idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Take a heap item and, starting at `heap_idx`, move it down the heap
|
||||||
|
/// while a child has a smaller key.
|
||||||
|
#[inline]
|
||||||
|
fn sift_down(&mut self, item: Item<K>, heap_idx: usize) {
|
||||||
|
let mut parent_heap_idx = heap_idx;
|
||||||
|
let mut child_heap_idx = 2 * parent_heap_idx + 1;
|
||||||
|
let key = &item.key;
|
||||||
|
|
||||||
|
while child_heap_idx < self.heap.len() {
|
||||||
|
// If the sibling exists and has a smaller key, make it the
|
||||||
|
// candidate for swapping.
|
||||||
|
if let Some(other_child) = self.heap.get(child_heap_idx + 1) {
|
||||||
|
child_heap_idx += (self.heap[child_heap_idx].key > other_child.key) as usize;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop when the key is smaller or equal to the child with the smallest key.
|
||||||
|
if key <= &self.heap[child_heap_idx].key {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the child up one level.
|
||||||
|
self.heap[parent_heap_idx] = self.heap[child_heap_idx];
|
||||||
|
let child_slab_idx = self.heap[child_heap_idx].slab_idx;
|
||||||
|
*self.slab[child_slab_idx].unwrap_heap_index_mut() = parent_heap_idx;
|
||||||
|
|
||||||
|
// Make the child the new parent.
|
||||||
|
parent_heap_idx = child_heap_idx;
|
||||||
|
child_heap_idx = 2 * parent_heap_idx + 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move the original item to the current parent.
|
||||||
|
self.heap[parent_heap_idx] = item;
|
||||||
|
*self.slab[item.slab_idx].unwrap_heap_index_mut() = parent_heap_idx;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<K: Copy + Ord, V> Default for IndexedPriorityQueue<K, V> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data related to a single key-value pair stored in the heap.
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
|
struct Item<K: Copy> {
|
||||||
|
// A unique key by which the heap is sorted.
|
||||||
|
key: UniqueKey<K>,
|
||||||
|
// An index pointing to the corresponding node in the slab.
|
||||||
|
slab_idx: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Data related to a single key-value pair stored in the slab.
|
||||||
|
enum Node<V> {
|
||||||
|
FreeNode(FreeNode),
|
||||||
|
HeapNode(HeapNode<V>),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<V> Node<V> {
|
||||||
|
/// Unwraps the `FreeNode::next` field.
|
||||||
|
fn unwrap_next_free_node(&self) -> Option<usize> {
|
||||||
|
match self {
|
||||||
|
Self::FreeNode(n) => n.next,
|
||||||
|
_ => panic!("the node was expected to be a free node"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unwraps a `HeapNode`.
|
||||||
|
fn unwrap_heap_node(self) -> HeapNode<V> {
|
||||||
|
match self {
|
||||||
|
Self::HeapNode(n) => n,
|
||||||
|
_ => panic!("the node was expected to be a heap node"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unwraps the `HeapNode::value` field.
|
||||||
|
fn unwrap_value(self) -> V {
|
||||||
|
match self {
|
||||||
|
Self::HeapNode(n) => n.value,
|
||||||
|
_ => panic!("the node was expected to be a heap node"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unwraps the `HeapNode::value` field.
|
||||||
|
fn unwrap_value_ref(&self) -> &V {
|
||||||
|
match self {
|
||||||
|
Self::HeapNode(n) => &n.value,
|
||||||
|
_ => panic!("the node was expected to be a heap node"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unwraps a mutable reference to the `HeapNode::heap_idx` field.
|
||||||
|
fn unwrap_heap_index_mut(&mut self) -> &mut usize {
|
||||||
|
match self {
|
||||||
|
Self::HeapNode(n) => &mut n.heap_idx,
|
||||||
|
_ => panic!("the node was expected to be a heap node"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A node that is no longer in the binary heap.
|
||||||
|
struct FreeNode {
|
||||||
|
// An index pointing to the next free node, if any.
|
||||||
|
next: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A node currently in the binary heap.
|
||||||
|
struct HeapNode<V> {
|
||||||
|
// The value associated to this node.
|
||||||
|
value: V,
|
||||||
|
// Index of the node in the heap.
|
||||||
|
heap_idx: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A unique insertion key that can be used for key-value pair deletion.
|
||||||
|
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq)]
|
||||||
|
pub(crate) struct InsertKey {
|
||||||
|
// An index pointing to a node in the slab.
|
||||||
|
slab_idx: usize,
|
||||||
|
// The epoch when the node was inserted.
|
||||||
|
epoch: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl InsertKey {
|
||||||
|
// Creates an `InsertKey` directly from its raw components.
|
||||||
|
//
|
||||||
|
// This method is safe: the worse than can happen is for the key to be
|
||||||
|
// invalid, in which case it will simply be rejected by
|
||||||
|
// `IndexedPriorityQueue::extract`.
|
||||||
|
pub(crate) fn from_raw_parts(slab_idx: usize, epoch: u64) -> Self {
|
||||||
|
Self { slab_idx, epoch }
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decomposes an `InsertKey` into its raw components.
|
||||||
|
pub(crate) fn into_raw_parts(self) -> (usize, u64) {
|
||||||
|
(self.slab_idx, self.epoch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A unique key made of the user-provided key complemented by a unique epoch.
|
||||||
|
///
|
||||||
|
/// Implementation note: `UniqueKey` automatically derives `PartialOrd`, which
|
||||||
|
/// implies that lexicographic order between `key` and `epoch` must be preserved
|
||||||
|
/// to make sure that `key` has a higher sorting priority than `epoch`.
|
||||||
|
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
|
||||||
|
struct UniqueKey<K: Copy + Clone> {
|
||||||
|
/// The user-provided key.
|
||||||
|
key: K,
|
||||||
|
/// A unique epoch that indicates the insertion date.
|
||||||
|
epoch: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(all(test, not(asynchronix_loom)))]
|
||||||
|
mod tests {
|
||||||
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
enum Op<K, V> {
|
||||||
|
Insert(K, V),
|
||||||
|
InsertAndMark(K, V),
|
||||||
|
Pull(Option<(K, V)>),
|
||||||
|
ExtractMarked(Option<(K, V)>),
|
||||||
|
}
|
||||||
|
|
||||||
|
fn check<K: Copy + Clone + Ord + Debug, V: Eq + Debug>(
|
||||||
|
operations: impl Iterator<Item = Op<K, V>>,
|
||||||
|
) {
|
||||||
|
let mut queue = IndexedPriorityQueue::new();
|
||||||
|
let mut marked = None;
|
||||||
|
|
||||||
|
for op in operations {
|
||||||
|
match op {
|
||||||
|
Op::Insert(key, value) => {
|
||||||
|
queue.insert(key, value);
|
||||||
|
}
|
||||||
|
Op::InsertAndMark(key, value) => {
|
||||||
|
marked = Some(queue.insert(key, value));
|
||||||
|
}
|
||||||
|
Op::Pull(kv) => {
|
||||||
|
assert_eq!(queue.pull(), kv);
|
||||||
|
}
|
||||||
|
Op::ExtractMarked(kv) => {
|
||||||
|
assert_eq!(
|
||||||
|
queue.extract(marked.take().expect("no item was marked for deletion")),
|
||||||
|
kv
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexed_priority_queue_smoke() {
|
||||||
|
let operations = [
|
||||||
|
Op::Insert(5, 'a'),
|
||||||
|
Op::Insert(2, 'b'),
|
||||||
|
Op::Insert(3, 'c'),
|
||||||
|
Op::Insert(4, 'd'),
|
||||||
|
Op::Insert(9, 'e'),
|
||||||
|
Op::Insert(1, 'f'),
|
||||||
|
Op::Insert(8, 'g'),
|
||||||
|
Op::Insert(0, 'h'),
|
||||||
|
Op::Insert(7, 'i'),
|
||||||
|
Op::Insert(6, 'j'),
|
||||||
|
Op::Pull(Some((0, 'h'))),
|
||||||
|
Op::Pull(Some((1, 'f'))),
|
||||||
|
Op::Pull(Some((2, 'b'))),
|
||||||
|
Op::Pull(Some((3, 'c'))),
|
||||||
|
Op::Pull(Some((4, 'd'))),
|
||||||
|
Op::Pull(Some((5, 'a'))),
|
||||||
|
Op::Pull(Some((6, 'j'))),
|
||||||
|
Op::Pull(Some((7, 'i'))),
|
||||||
|
Op::Pull(Some((8, 'g'))),
|
||||||
|
Op::Pull(Some((9, 'e'))),
|
||||||
|
];
|
||||||
|
|
||||||
|
check(operations.into_iter());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexed_priority_queue_interleaved() {
|
||||||
|
let operations = [
|
||||||
|
Op::Insert(2, 'a'),
|
||||||
|
Op::Insert(7, 'b'),
|
||||||
|
Op::Insert(5, 'c'),
|
||||||
|
Op::Pull(Some((2, 'a'))),
|
||||||
|
Op::Insert(4, 'd'),
|
||||||
|
Op::Pull(Some((4, 'd'))),
|
||||||
|
Op::Insert(8, 'e'),
|
||||||
|
Op::Insert(2, 'f'),
|
||||||
|
Op::Pull(Some((2, 'f'))),
|
||||||
|
Op::Pull(Some((5, 'c'))),
|
||||||
|
Op::Pull(Some((7, 'b'))),
|
||||||
|
Op::Insert(5, 'g'),
|
||||||
|
Op::Insert(3, 'h'),
|
||||||
|
Op::Pull(Some((3, 'h'))),
|
||||||
|
Op::Pull(Some((5, 'g'))),
|
||||||
|
Op::Pull(Some((8, 'e'))),
|
||||||
|
Op::Pull(None),
|
||||||
|
];
|
||||||
|
|
||||||
|
check(operations.into_iter());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexed_priority_queue_equal_keys() {
|
||||||
|
let operations = [
|
||||||
|
Op::Insert(4, 'a'),
|
||||||
|
Op::Insert(1, 'b'),
|
||||||
|
Op::Insert(3, 'c'),
|
||||||
|
Op::Pull(Some((1, 'b'))),
|
||||||
|
Op::Insert(4, 'd'),
|
||||||
|
Op::Insert(8, 'e'),
|
||||||
|
Op::Insert(3, 'f'),
|
||||||
|
Op::Pull(Some((3, 'c'))),
|
||||||
|
Op::Pull(Some((3, 'f'))),
|
||||||
|
Op::Pull(Some((4, 'a'))),
|
||||||
|
Op::Insert(8, 'g'),
|
||||||
|
Op::Pull(Some((4, 'd'))),
|
||||||
|
Op::Pull(Some((8, 'e'))),
|
||||||
|
Op::Pull(Some((8, 'g'))),
|
||||||
|
Op::Pull(None),
|
||||||
|
];
|
||||||
|
|
||||||
|
check(operations.into_iter());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexed_priority_queue_extract_valid() {
|
||||||
|
let operations = [
|
||||||
|
Op::Insert(8, 'a'),
|
||||||
|
Op::Insert(1, 'b'),
|
||||||
|
Op::Insert(3, 'c'),
|
||||||
|
Op::InsertAndMark(3, 'd'),
|
||||||
|
Op::Insert(2, 'e'),
|
||||||
|
Op::Pull(Some((1, 'b'))),
|
||||||
|
Op::Insert(4, 'f'),
|
||||||
|
Op::ExtractMarked(Some((3, 'd'))),
|
||||||
|
Op::Insert(5, 'g'),
|
||||||
|
Op::Pull(Some((2, 'e'))),
|
||||||
|
Op::Pull(Some((3, 'c'))),
|
||||||
|
Op::Pull(Some((4, 'f'))),
|
||||||
|
Op::Pull(Some((5, 'g'))),
|
||||||
|
Op::Pull(Some((8, 'a'))),
|
||||||
|
Op::Pull(None),
|
||||||
|
];
|
||||||
|
|
||||||
|
check(operations.into_iter());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexed_priority_queue_extract_invalid() {
|
||||||
|
let operations = [
|
||||||
|
Op::Insert(0, 'a'),
|
||||||
|
Op::Insert(7, 'b'),
|
||||||
|
Op::InsertAndMark(2, 'c'),
|
||||||
|
Op::Insert(4, 'd'),
|
||||||
|
Op::Pull(Some((0, 'a'))),
|
||||||
|
Op::Insert(2, 'e'),
|
||||||
|
Op::Pull(Some((2, 'c'))),
|
||||||
|
Op::Insert(4, 'f'),
|
||||||
|
Op::ExtractMarked(None),
|
||||||
|
Op::Pull(Some((2, 'e'))),
|
||||||
|
Op::Pull(Some((4, 'd'))),
|
||||||
|
Op::Pull(Some((4, 'f'))),
|
||||||
|
Op::Pull(Some((7, 'b'))),
|
||||||
|
Op::Pull(None),
|
||||||
|
];
|
||||||
|
|
||||||
|
check(operations.into_iter());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn indexed_priority_queue_fuzz() {
|
||||||
|
use std::cell::Cell;
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use crate::util::rng::Rng;
|
||||||
|
|
||||||
|
// Number of fuzzing operations.
|
||||||
|
const ITER: usize = if cfg!(miri) { 1000 } else { 10_000_000 };
|
||||||
|
|
||||||
|
// Inclusive upper bound for randomly generated keys.
|
||||||
|
const MAX_KEY: u64 = 99;
|
||||||
|
|
||||||
|
// Probabilistic weight of each of the 4 operations.
|
||||||
|
//
|
||||||
|
// The weight for pull values should probably stay close to the sum of
|
||||||
|
// the two insertion weights to prevent queue size runaway.
|
||||||
|
const INSERT_WEIGHT: u64 = 5;
|
||||||
|
const INSERT_AND_MARK_WEIGHT: u64 = 1;
|
||||||
|
const PULL_WEIGHT: u64 = INSERT_WEIGHT + INSERT_AND_MARK_WEIGHT;
|
||||||
|
const DELETE_MARKED_WEIGHT: u64 = 1;
|
||||||
|
|
||||||
|
// Defines 4 basic operations on the priority queue, each of them being
|
||||||
|
// performed on both the tested implementation and on a shadow queue
|
||||||
|
// implemented with a `BTreeMap`. Any mismatch between the outcomes of
|
||||||
|
// pull and delete operations between the two queues triggers a panic.
|
||||||
|
let epoch: Cell<usize> = Cell::new(0);
|
||||||
|
let marked: Cell<Option<InsertKey>> = Cell::new(None);
|
||||||
|
let shadow_marked: Cell<Option<(u64, usize)>> = Cell::new(None);
|
||||||
|
|
||||||
|
let insert_fn = |queue: &mut IndexedPriorityQueue<u64, u64>,
|
||||||
|
shadow_queue: &mut BTreeMap<(u64, usize), u64>,
|
||||||
|
key,
|
||||||
|
value| {
|
||||||
|
queue.insert(key, value);
|
||||||
|
shadow_queue.insert((key, epoch.get()), value);
|
||||||
|
epoch.set(epoch.get() + 1);
|
||||||
|
};
|
||||||
|
|
||||||
|
let insert_and_mark_fn = |queue: &mut IndexedPriorityQueue<u64, u64>,
|
||||||
|
shadow_queue: &mut BTreeMap<(u64, usize), u64>,
|
||||||
|
key,
|
||||||
|
value| {
|
||||||
|
marked.set(Some(queue.insert(key, value)));
|
||||||
|
shadow_queue.insert((key, epoch.get()), value);
|
||||||
|
shadow_marked.set(Some((key, epoch.get())));
|
||||||
|
epoch.set(epoch.get() + 1);
|
||||||
|
};
|
||||||
|
|
||||||
|
let pull_fn = |queue: &mut IndexedPriorityQueue<u64, u64>,
|
||||||
|
shadow_queue: &mut BTreeMap<(u64, usize), u64>| {
|
||||||
|
let value = queue.pull();
|
||||||
|
let shadow_value = match shadow_queue.iter().next() {
|
||||||
|
Some((&unique_key, &value)) => {
|
||||||
|
shadow_queue.remove(&unique_key);
|
||||||
|
Some((unique_key.0, value))
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
assert_eq!(value, shadow_value);
|
||||||
|
};
|
||||||
|
|
||||||
|
let delete_marked_fn =
|
||||||
|
|queue: &mut IndexedPriorityQueue<u64, u64>,
|
||||||
|
shadow_queue: &mut BTreeMap<(u64, usize), u64>| {
|
||||||
|
let success = match marked.take() {
|
||||||
|
Some(delete_key) => Some(queue.extract(delete_key).is_some()),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
let shadow_success = match shadow_marked.take() {
|
||||||
|
Some(delete_key) => Some(shadow_queue.remove(&delete_key).is_some()),
|
||||||
|
None => None,
|
||||||
|
};
|
||||||
|
assert_eq!(success, shadow_success);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Fuzz away.
|
||||||
|
let mut queue = IndexedPriorityQueue::new();
|
||||||
|
let mut shadow_queue = BTreeMap::new();
|
||||||
|
|
||||||
|
let rng = Rng::new(12345);
|
||||||
|
const TOTAL_WEIGHT: u64 =
|
||||||
|
INSERT_WEIGHT + INSERT_AND_MARK_WEIGHT + PULL_WEIGHT + DELETE_MARKED_WEIGHT;
|
||||||
|
|
||||||
|
for _ in 0..ITER {
|
||||||
|
// Randomly choose one of the 4 possible operations, respecting the
|
||||||
|
// probability weights.
|
||||||
|
let mut op = rng.gen_bounded(TOTAL_WEIGHT);
|
||||||
|
if op < INSERT_WEIGHT {
|
||||||
|
let key = rng.gen_bounded(MAX_KEY + 1);
|
||||||
|
let val = rng.gen();
|
||||||
|
insert_fn(&mut queue, &mut shadow_queue, key, val);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
op -= INSERT_WEIGHT;
|
||||||
|
if op < INSERT_AND_MARK_WEIGHT {
|
||||||
|
let key = rng.gen_bounded(MAX_KEY + 1);
|
||||||
|
let val = rng.gen();
|
||||||
|
insert_and_mark_fn(&mut queue, &mut shadow_queue, key, val);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
op -= INSERT_AND_MARK_WEIGHT;
|
||||||
|
if op < PULL_WEIGHT {
|
||||||
|
pull_fn(&mut queue, &mut shadow_queue);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
delete_marked_fn(&mut queue, &mut shadow_queue);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -111,7 +111,7 @@ impl<K: Copy + Ord, V> PriorityQueue<K, V> {
|
|||||||
|
|
||||||
#[cfg(all(test, not(asynchronix_loom)))]
|
#[cfg(all(test, not(asynchronix_loom)))]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::PriorityQueue;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn priority_smoke() {
|
fn priority_smoke() {
|
||||||
|
@ -1,7 +1,5 @@
|
|||||||
//! Pseudo-random number generation.
|
//! Pseudo-random number generation.
|
||||||
|
|
||||||
#![allow(unused)]
|
|
||||||
|
|
||||||
use std::cell::Cell;
|
use std::cell::Cell;
|
||||||
|
|
||||||
/// A pseudo-random generator for 64-bit integers based on Wang Yi's Wyrand.
|
/// A pseudo-random generator for 64-bit integers based on Wang Yi's Wyrand.
|
||||||
|
@ -1,11 +1,7 @@
|
|||||||
//! Futures and future-related functions.
|
//! Sequential composition of futures into a single future.
|
||||||
|
|
||||||
#![allow(unused)]
|
|
||||||
|
|
||||||
use std::future::Future;
|
use std::future::Future;
|
||||||
use std::pin::Pin;
|
use std::pin::Pin;
|
||||||
use std::sync::atomic::AtomicBool;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::task::{Context, Poll};
|
use std::task::{Context, Poll};
|
||||||
|
|
||||||
/// An owned future which sequentially polls a collection of futures.
|
/// An owned future which sequentially polls a collection of futures.
|
||||||
@ -53,39 +49,3 @@ impl<F: Future + Unpin> Future for SeqFuture<F> {
|
|||||||
Poll::Pending
|
Poll::Pending
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
trait RevocableFuture: Future {
|
|
||||||
fn is_revoked() -> bool;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct NeverRevokedFuture<F> {
|
|
||||||
inner: F,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<F: Future> NeverRevokedFuture<F> {
|
|
||||||
fn new(fut: F) -> Self {
|
|
||||||
Self { inner: fut }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T: Future> Future for NeverRevokedFuture<T> {
|
|
||||||
type Output = T::Output;
|
|
||||||
|
|
||||||
#[inline(always)]
|
|
||||||
fn poll(
|
|
||||||
self: std::pin::Pin<&mut Self>,
|
|
||||||
cx: &mut std::task::Context<'_>,
|
|
||||||
) -> std::task::Poll<Self::Output> {
|
|
||||||
unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Future> RevocableFuture for NeverRevokedFuture<T> {
|
|
||||||
fn is_revoked() -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ConcurrentlyRevocableFuture<F> {
|
|
||||||
inner: F,
|
|
||||||
is_revoked: Arc<AtomicBool>,
|
|
||||||
}
|
|
@ -1,8 +1,6 @@
|
|||||||
//! A primitive similar to a one-shot channel but without any signaling
|
//! A primitive similar to a one-shot channel but without any signaling
|
||||||
//! capability.
|
//! capability.
|
||||||
|
|
||||||
#![allow(unused)]
|
|
||||||
|
|
||||||
use std::error::Error;
|
use std::error::Error;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
@ -327,8 +325,6 @@ pub(crate) fn slot<T>() -> (SlotWriter<T>, SlotReader<T>) {
|
|||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
||||||
use std::io::Read;
|
|
||||||
use std::sync::Arc;
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@ -358,9 +354,9 @@ mod tests {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn slot_multi_threaded_write() {
|
fn slot_multi_threaded_write() {
|
||||||
let (mut writer, mut reader) = slot();
|
let (writer, mut reader) = slot();
|
||||||
|
|
||||||
let th = thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
assert!(writer.write(42).is_ok());
|
assert!(writer.write(42).is_ok());
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -370,15 +366,13 @@ mod tests {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
th.join().unwrap();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn slot_multi_threaded_drop_writer() {
|
fn slot_multi_threaded_drop_writer() {
|
||||||
let (mut writer, mut reader) = slot::<i32>();
|
let (writer, mut reader) = slot::<i32>();
|
||||||
|
|
||||||
let th = thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
drop(writer);
|
drop(writer);
|
||||||
});
|
});
|
||||||
|
|
||||||
@ -389,8 +383,6 @@ mod tests {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
th.join().unwrap();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,393 +0,0 @@
|
|||||||
//! Single-producer single-consumer unbounded FIFO queue that stores values in
|
|
||||||
//! fixed-size memory segments.
|
|
||||||
|
|
||||||
#![allow(unused)]
|
|
||||||
|
|
||||||
use std::cell::Cell;
|
|
||||||
use std::error::Error;
|
|
||||||
use std::fmt;
|
|
||||||
use std::marker::PhantomData;
|
|
||||||
use std::mem::MaybeUninit;
|
|
||||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
|
||||||
use std::ptr::{self, NonNull};
|
|
||||||
use std::sync::atomic::Ordering;
|
|
||||||
|
|
||||||
use crossbeam_utils::CachePadded;
|
|
||||||
|
|
||||||
use crate::loom_exports::cell::UnsafeCell;
|
|
||||||
use crate::loom_exports::sync::atomic::{AtomicBool, AtomicPtr};
|
|
||||||
use crate::loom_exports::sync::Arc;
|
|
||||||
|
|
||||||
/// The number of slots in a single segment.
|
|
||||||
const SEGMENT_LEN: usize = 32;
|
|
||||||
|
|
||||||
/// A slot containing a single value.
|
|
||||||
struct Slot<T> {
|
|
||||||
has_value: AtomicBool,
|
|
||||||
value: UnsafeCell<MaybeUninit<T>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Default for Slot<T> {
|
|
||||||
fn default() -> Self {
|
|
||||||
Slot {
|
|
||||||
has_value: AtomicBool::new(false),
|
|
||||||
value: UnsafeCell::new(MaybeUninit::uninit()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A memory segment containing `SEGMENT_LEN` slots.
|
|
||||||
struct Segment<T> {
|
|
||||||
/// Address of the next segment.
|
|
||||||
///
|
|
||||||
/// A null pointer means that the next segment is not allocated yet.
|
|
||||||
next_segment: AtomicPtr<Segment<T>>,
|
|
||||||
data: [Slot<T>; SEGMENT_LEN],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Segment<T> {
|
|
||||||
/// Allocates a new segment.
|
|
||||||
fn allocate_new() -> NonNull<Self> {
|
|
||||||
let segment = Self {
|
|
||||||
next_segment: AtomicPtr::new(ptr::null_mut()),
|
|
||||||
data: Default::default(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Safety: the pointer is non-null since it comes from a box.
|
|
||||||
unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(segment))) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The head of the queue from which values are popped.
|
|
||||||
struct Head<T> {
|
|
||||||
/// Pointer to the segment at the head of the queue.
|
|
||||||
segment: NonNull<Segment<T>>,
|
|
||||||
/// Index of the next value to be read.
|
|
||||||
///
|
|
||||||
/// If the index is equal to the segment length, it is necessary to move to
|
|
||||||
/// the next segment before the next value can be read.
|
|
||||||
next_read_idx: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The tail of the queue to which values are pushed.
|
|
||||||
struct Tail<T> {
|
|
||||||
/// Pointer to the segment at the tail of the queue.
|
|
||||||
segment: NonNull<Segment<T>>,
|
|
||||||
/// Index of the next value to be written.
|
|
||||||
///
|
|
||||||
/// If the index is equal to the segment length, a new segment must be
|
|
||||||
/// allocated before a new value can be written.
|
|
||||||
next_write_idx: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A single-producer, single-consumer unbounded FIFO queue.
|
|
||||||
struct Queue<T> {
|
|
||||||
head: CachePadded<UnsafeCell<Head<T>>>,
|
|
||||||
tail: CachePadded<UnsafeCell<Tail<T>>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Queue<T> {
|
|
||||||
/// Creates a new queue.
|
|
||||||
fn new() -> Self {
|
|
||||||
let segment = Segment::allocate_new();
|
|
||||||
|
|
||||||
let head = Head {
|
|
||||||
segment,
|
|
||||||
next_read_idx: 0,
|
|
||||||
};
|
|
||||||
let tail = Tail {
|
|
||||||
segment,
|
|
||||||
next_write_idx: 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
Self {
|
|
||||||
head: CachePadded::new(UnsafeCell::new(head)),
|
|
||||||
tail: CachePadded::new(UnsafeCell::new(tail)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Pushes a new value.
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// The method cannot be called from multiple threads concurrently.
|
|
||||||
unsafe fn push(&self, value: T) {
|
|
||||||
// Safety: this is the only thread accessing the tail.
|
|
||||||
let tail = self.tail.with_mut(|p| &mut *p);
|
|
||||||
|
|
||||||
// If the whole segment has been written, allocate a new segment.
|
|
||||||
if tail.next_write_idx == SEGMENT_LEN {
|
|
||||||
let old_segment = tail.segment;
|
|
||||||
tail.segment = Segment::allocate_new();
|
|
||||||
|
|
||||||
// Safety: the old segment is still allocated since the consumer
|
|
||||||
// cannot deallocate it before `next_segment` is set to a non-null
|
|
||||||
// value.
|
|
||||||
old_segment
|
|
||||||
.as_ref()
|
|
||||||
.next_segment
|
|
||||||
.store(tail.segment.as_ptr(), Ordering::Release);
|
|
||||||
|
|
||||||
tail.next_write_idx = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Safety: the tail segment is allocated since the consumer cannot
|
|
||||||
// deallocate it before `next_segment` is set to a non-null value.
|
|
||||||
let data = &tail.segment.as_ref().data[tail.next_write_idx];
|
|
||||||
|
|
||||||
// Safety: we have exclusive access to the slot value since the consumer
|
|
||||||
// cannot access it before `has_value` is set to true.
|
|
||||||
data.value.with_mut(|p| (*p).write(value));
|
|
||||||
|
|
||||||
// Ordering: this Release store synchronizes with the Acquire load in
|
|
||||||
// `pop` and ensures that the value is visible to the consumer once
|
|
||||||
// `has_value` reads `true`.
|
|
||||||
data.has_value.store(true, Ordering::Release);
|
|
||||||
|
|
||||||
tail.next_write_idx += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Pops a new value.
|
|
||||||
///
|
|
||||||
/// # Safety
|
|
||||||
///
|
|
||||||
/// The method cannot be called from multiple threads concurrently.
|
|
||||||
unsafe fn pop(&self) -> Option<T> {
|
|
||||||
// Safety: this is the only thread accessing the head.
|
|
||||||
let head = self.head.with_mut(|p| &mut *p);
|
|
||||||
|
|
||||||
// If the whole segment has been read, try to move to the next segment.
|
|
||||||
if head.next_read_idx == SEGMENT_LEN {
|
|
||||||
// Read the next segment or return `None` if it is not ready yet.
|
|
||||||
//
|
|
||||||
// Safety: the head segment is still allocated since we are the only
|
|
||||||
// thread that can deallocate it.
|
|
||||||
let next_segment = head.segment.as_ref().next_segment.load(Ordering::Acquire);
|
|
||||||
let next_segment = NonNull::new(next_segment)?;
|
|
||||||
|
|
||||||
// Deallocate the old segment.
|
|
||||||
//
|
|
||||||
// Safety: the pointer was initialized from a box and the segment is
|
|
||||||
// still allocated since we are the only thread that can deallocate
|
|
||||||
// it.
|
|
||||||
let _ = Box::from_raw(head.segment.as_ptr());
|
|
||||||
|
|
||||||
// Update the segment and the next index.
|
|
||||||
head.segment = next_segment;
|
|
||||||
head.next_read_idx = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
let data = &head.segment.as_ref().data[head.next_read_idx];
|
|
||||||
|
|
||||||
// Ordering: this Acquire load synchronizes with the Release store in
|
|
||||||
// `push` and ensures that the value is visible once `has_value` reads
|
|
||||||
// `true`.
|
|
||||||
if !data.has_value.load(Ordering::Acquire) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Safety: since `has_value` is `true` then we have exclusive ownership
|
|
||||||
// of the value and we know that it was initialized.
|
|
||||||
let value = data.value.with(|p| (*p).assume_init_read());
|
|
||||||
|
|
||||||
head.next_read_idx += 1;
|
|
||||||
|
|
||||||
Some(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Drop for Queue<T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
unsafe {
|
|
||||||
// Drop all values.
|
|
||||||
while self.pop().is_some() {}
|
|
||||||
|
|
||||||
// All values have been dropped: the last segment can be freed.
|
|
||||||
|
|
||||||
// Safety: this is the only thread accessing the head since both the
|
|
||||||
// consumer and producer have been dropped.
|
|
||||||
let head = self.head.with_mut(|p| &mut *p);
|
|
||||||
|
|
||||||
// Safety: the pointer was initialized from a box and the segment is
|
|
||||||
// still allocated since we are the only thread that can deallocate
|
|
||||||
// it.
|
|
||||||
let _ = Box::from_raw(head.segment.as_ptr());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe impl<T: Send> Send for Queue<T> {}
|
|
||||||
unsafe impl<T: Send> Sync for Queue<T> {}
|
|
||||||
|
|
||||||
impl<T> UnwindSafe for Queue<T> {}
|
|
||||||
impl<T> RefUnwindSafe for Queue<T> {}
|
|
||||||
|
|
||||||
/// A handle to a single-producer, single-consumer queue that can push values.
|
|
||||||
pub(crate) struct Producer<T> {
|
|
||||||
queue: Arc<Queue<T>>,
|
|
||||||
_non_sync_phantom: PhantomData<Cell<()>>,
|
|
||||||
}
|
|
||||||
impl<T> Producer<T> {
|
|
||||||
/// Pushes a value to the queue.
|
|
||||||
pub(crate) fn push(&self, value: T) -> Result<(), PushError> {
|
|
||||||
if Arc::strong_count(&self.queue) == 1 {
|
|
||||||
return Err(PushError {});
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe { self.queue.push(value) };
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
|
|
||||||
/// Error returned when a push failed due to the consumer being dropped.
|
|
||||||
pub(crate) struct PushError {}
|
|
||||||
|
|
||||||
impl fmt::Display for PushError {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
||||||
write!(f, "sending message into a closed mailbox")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Error for PushError {}
|
|
||||||
|
|
||||||
/// A handle to a single-producer, single-consumer queue that can pop values.
|
|
||||||
pub(crate) struct Consumer<T> {
|
|
||||||
queue: Arc<Queue<T>>,
|
|
||||||
_non_sync_phantom: PhantomData<Cell<()>>,
|
|
||||||
}
|
|
||||||
impl<T> Consumer<T> {
|
|
||||||
/// Pops a value from the queue.
|
|
||||||
pub(crate) fn pop(&self) -> Option<T> {
|
|
||||||
unsafe { self.queue.pop() }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates the producer and consumer handles of a single-producer,
|
|
||||||
/// single-consumer queue.
|
|
||||||
pub(crate) fn spsc_queue<T>() -> (Producer<T>, Consumer<T>) {
|
|
||||||
let queue = Arc::new(Queue::new());
|
|
||||||
|
|
||||||
let producer = Producer {
|
|
||||||
queue: queue.clone(),
|
|
||||||
_non_sync_phantom: PhantomData,
|
|
||||||
};
|
|
||||||
let consumer = Consumer {
|
|
||||||
queue,
|
|
||||||
_non_sync_phantom: PhantomData,
|
|
||||||
};
|
|
||||||
|
|
||||||
(producer, consumer)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Loom tests.
|
|
||||||
#[cfg(all(test, not(asynchronix_loom)))]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
use std::thread;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn spsc_queue_basic() {
|
|
||||||
const VALUE_COUNT: usize = if cfg!(miri) { 1000 } else { 100_000 };
|
|
||||||
|
|
||||||
let (producer, consumer) = spsc_queue();
|
|
||||||
|
|
||||||
let th = thread::spawn(move || {
|
|
||||||
for i in 0..VALUE_COUNT {
|
|
||||||
let value = loop {
|
|
||||||
if let Some(v) = consumer.pop() {
|
|
||||||
break v;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
assert_eq!(value, i);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
for i in 0..VALUE_COUNT {
|
|
||||||
producer.push(i).unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
th.join().unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Loom tests.
|
|
||||||
#[cfg(all(test, asynchronix_loom))]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
use loom::model::Builder;
|
|
||||||
use loom::thread;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn loom_spsc_queue_basic() {
|
|
||||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
|
||||||
const VALUE_COUNT: usize = 10;
|
|
||||||
|
|
||||||
let mut builder = Builder::new();
|
|
||||||
if builder.preemption_bound.is_none() {
|
|
||||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
|
||||||
}
|
|
||||||
|
|
||||||
builder.check(move || {
|
|
||||||
let (producer, consumer) = spsc_queue();
|
|
||||||
|
|
||||||
let th = thread::spawn(move || {
|
|
||||||
let mut value = 0;
|
|
||||||
for _ in 0..VALUE_COUNT {
|
|
||||||
if let Some(v) = consumer.pop() {
|
|
||||||
assert_eq!(v, value);
|
|
||||||
value += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
for i in 0..VALUE_COUNT {
|
|
||||||
let _ = producer.push(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
th.join().unwrap();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn loom_spsc_queue_new_segment() {
|
|
||||||
const DEFAULT_PREEMPTION_BOUND: usize = 4;
|
|
||||||
const VALUE_COUNT_BEFORE: usize = 5;
|
|
||||||
const VALUE_COUNT_AFTER: usize = 5;
|
|
||||||
|
|
||||||
let mut builder = Builder::new();
|
|
||||||
if builder.preemption_bound.is_none() {
|
|
||||||
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
|
|
||||||
}
|
|
||||||
|
|
||||||
builder.check(move || {
|
|
||||||
let (producer, consumer) = spsc_queue();
|
|
||||||
|
|
||||||
// Fill up the first segment except for the last `VALUE_COUNT_BEFORE` slots.
|
|
||||||
for i in 0..(SEGMENT_LEN - VALUE_COUNT_BEFORE) {
|
|
||||||
producer.push(i).unwrap();
|
|
||||||
consumer.pop();
|
|
||||||
}
|
|
||||||
|
|
||||||
let th = thread::spawn(move || {
|
|
||||||
let mut value = SEGMENT_LEN - VALUE_COUNT_BEFORE;
|
|
||||||
for _ in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) {
|
|
||||||
if let Some(v) = consumer.pop() {
|
|
||||||
assert_eq!(v, value);
|
|
||||||
value += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
for i in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) {
|
|
||||||
let _ = producer.push(i);
|
|
||||||
}
|
|
||||||
|
|
||||||
th.join().unwrap();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
@ -143,7 +143,6 @@ impl<T: TearableAtomic> SyncCell<T> {
|
|||||||
|
|
||||||
/// A handle to a `SyncCell` that enables synchronized reads from multiple
|
/// A handle to a `SyncCell` that enables synchronized reads from multiple
|
||||||
/// threads.
|
/// threads.
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct SyncCellReader<T: TearableAtomic> {
|
pub(crate) struct SyncCellReader<T: TearableAtomic> {
|
||||||
inner: Arc<Inner<T>>,
|
inner: Arc<Inner<T>>,
|
||||||
}
|
}
|
||||||
@ -186,6 +185,14 @@ impl<T: TearableAtomic> SyncCellReader<T> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<T: TearableAtomic> Clone for SyncCellReader<T> {
|
||||||
|
fn clone(&self) -> Self {
|
||||||
|
Self {
|
||||||
|
inner: self.inner.clone(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// An error returned when attempting to perform a read operation concurrently
|
/// An error returned when attempting to perform a read operation concurrently
|
||||||
/// with a write operation.
|
/// with a write operation.
|
||||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
|
//! Primitive for the efficient management of concurrent tasks.
|
||||||
|
|
||||||
use std::sync::atomic::Ordering;
|
use std::sync::atomic::Ordering;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
@ -21,31 +23,36 @@ const COUNTDOWN_MASK: u64 = !INDEX_MASK;
|
|||||||
/// scheduled tasks.
|
/// scheduled tasks.
|
||||||
const COUNTDOWN_ONE: u64 = 1 << 32;
|
const COUNTDOWN_ONE: u64 = 1 << 32;
|
||||||
|
|
||||||
/// A set of tasks that may be scheduled cheaply and can be requested to wake a
|
/// A primitive that simplifies the management of a set of tasks scheduled
|
||||||
/// parent task only when a given amount of tasks have been scheduled.
|
/// concurrently.
|
||||||
///
|
///
|
||||||
/// This object maintains both a list of all active tasks and a list of the
|
/// A `TaskSet` maintains both a vector-based list of tasks (or more accurately,
|
||||||
/// subset of active tasks currently scheduled. The latter is stored in a
|
/// task waker handles) and a linked list of the subset of tasks that are
|
||||||
/// Treiber stack which links tasks through indices rather than pointers. Using
|
/// currently scheduled. The latter is stored in a vector-based Treiber stack
|
||||||
/// indices has two advantages: (i) it enables a fully safe implementation and
|
/// which links tasks through indices rather than pointers. Using indices has
|
||||||
/// (ii) it makes it possible to use a single CAS to simultaneously move the
|
/// two advantages: (i) it makes a fully safe implementation possible and (ii)
|
||||||
/// head and decrement the outstanding amount of tasks to be scheduled before
|
/// it can take advantage of a single CAS to simultaneously move the head and
|
||||||
/// the parent task is notified.
|
/// decrement the outstanding amount of tasks to be scheduled before the parent
|
||||||
pub(super) struct TaskSet {
|
/// task is notified.
|
||||||
/// Set of all active tasks, scheduled or not.
|
///
|
||||||
|
/// This can be used to implement primitives similar to `FuturesOrdered` or
|
||||||
|
/// `FuturesUnordered` in the `futures` crate.
|
||||||
|
///
|
||||||
|
/// The `notify_count` argument of `TaskSet::take_scheduled()` can be set to
|
||||||
|
/// more than 1 to wake the parent task less frequently. For instance, if
|
||||||
|
/// `notify_count` is set to the number of pending sub-tasks, the parent task
|
||||||
|
/// will only be woken once all subtasks have been woken.
|
||||||
|
|
||||||
|
pub(crate) struct TaskSet {
|
||||||
|
/// Set of all tasks, scheduled or not.
|
||||||
///
|
///
|
||||||
/// In some rare cases, the back of the vector can also contain inactive
|
/// In some cases, the use of `resize()` to shrink the task set may leave
|
||||||
/// (retired) tasks.
|
/// inactive tasks at the back of the vector, in which case the length of
|
||||||
|
/// the vector will exceed `task_count`.
|
||||||
tasks: Vec<Arc<Task>>,
|
tasks: Vec<Arc<Task>>,
|
||||||
/// Head of the Treiber stack for scheduled tasks.
|
/// Shared Treiber stack head and parent task notifier.
|
||||||
///
|
shared: Arc<Shared>,
|
||||||
/// The lower bits specify the index of the last scheduled task, if any,
|
/// Count of all tasks, scheduled or not.
|
||||||
/// whereas the upper bits specify the countdown of tasks still to be
|
|
||||||
/// scheduled before the parent task is notified.
|
|
||||||
head: Arc<AtomicU64>,
|
|
||||||
/// A notifier used to wake the parent task.
|
|
||||||
notifier: WakeSource,
|
|
||||||
/// Count of all active tasks, scheduled or not.
|
|
||||||
task_count: usize,
|
task_count: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,35 +60,71 @@ impl TaskSet {
|
|||||||
/// Creates an initially empty set of tasks associated to the parent task
|
/// Creates an initially empty set of tasks associated to the parent task
|
||||||
/// which notifier is provided.
|
/// which notifier is provided.
|
||||||
#[allow(clippy::assertions_on_constants)]
|
#[allow(clippy::assertions_on_constants)]
|
||||||
pub(super) fn new(notifier: WakeSource) -> Self {
|
pub(crate) fn new(notifier: WakeSource) -> Self {
|
||||||
// Only 32-bit targets and above are supported.
|
// Only 32-bit targets and above are supported.
|
||||||
assert!(usize::BITS >= u32::BITS);
|
assert!(usize::BITS >= u32::BITS);
|
||||||
|
|
||||||
Self {
|
Self {
|
||||||
tasks: Vec::new(),
|
tasks: Vec::new(),
|
||||||
head: Arc::new(AtomicU64::new(EMPTY as u64)),
|
shared: Arc::new(Shared {
|
||||||
notifier,
|
head: AtomicU64::new(EMPTY as u64),
|
||||||
|
notifier,
|
||||||
|
}),
|
||||||
task_count: 0,
|
task_count: 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Steals scheduled tasks if any and returns an iterator over their
|
/// Creates a set of `len` tasks associated to the parent task which
|
||||||
/// indices, otherwise returns `None` and requests a notification to be sent
|
/// notifier is provided.
|
||||||
/// after `notify_count` tasks have been scheduled.
|
#[allow(clippy::assertions_on_constants)]
|
||||||
|
pub(crate) fn with_len(notifier: WakeSource, len: usize) -> Self {
|
||||||
|
// Only 32-bit targets and above are supported.
|
||||||
|
assert!(usize::BITS >= u32::BITS);
|
||||||
|
|
||||||
|
assert!(len <= EMPTY as usize && len <= SLEEPING as usize);
|
||||||
|
let len = len as u32;
|
||||||
|
|
||||||
|
let shared = Arc::new(Shared {
|
||||||
|
head: AtomicU64::new(EMPTY as u64),
|
||||||
|
notifier,
|
||||||
|
});
|
||||||
|
|
||||||
|
let tasks: Vec<_> = (0..len)
|
||||||
|
.map(|idx| {
|
||||||
|
Arc::new(Task {
|
||||||
|
idx,
|
||||||
|
shared: shared.clone(),
|
||||||
|
next: AtomicU32::new(SLEEPING),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Self {
|
||||||
|
tasks,
|
||||||
|
shared,
|
||||||
|
task_count: len as usize,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Take all scheduled tasks and returns an iterator over their indices, or
|
||||||
|
/// if there are no currently scheduled tasks returns `None` and requests a
|
||||||
|
/// notification to be sent after `notify_count` tasks have been scheduled.
|
||||||
///
|
///
|
||||||
/// In all cases, the list of scheduled tasks is guaranteed to be empty
|
/// In all cases, the list of scheduled tasks will be empty right after this
|
||||||
/// after this call.
|
/// call.
|
||||||
///
|
///
|
||||||
/// If some tasks were stolen, no notification is requested.
|
/// If there were scheduled tasks, no notification is requested because this
|
||||||
|
/// method is expected to be called repeatedly until it returns `None`.
|
||||||
|
/// Failure to do so will result in missed notifications.
|
||||||
///
|
///
|
||||||
/// If no tasks were stolen, the notification is guaranteed to be triggered
|
/// If no tasks were scheduled, the notification is guaranteed to be
|
||||||
/// no later than after `notify_count` tasks have been scheduled, though it
|
/// triggered no later than after `notify_count` tasks have been scheduled,
|
||||||
/// may in some cases be triggered earlier. If the specified `notify_count`
|
/// though it may in some cases be triggered earlier. If the specified
|
||||||
/// is zero then no notification is requested.
|
/// `notify_count` is zero then no notification is requested.
|
||||||
pub(super) fn steal_scheduled(&self, notify_count: usize) -> Option<TaskIterator<'_>> {
|
pub(crate) fn take_scheduled(&self, notify_count: usize) -> Option<TaskIterator<'_>> {
|
||||||
let countdown = u32::try_from(notify_count).unwrap();
|
let countdown = u32::try_from(notify_count).unwrap();
|
||||||
|
|
||||||
let mut head = self.head.load(Ordering::Relaxed);
|
let mut head = self.shared.head.load(Ordering::Relaxed);
|
||||||
loop {
|
loop {
|
||||||
let new_head = if head & INDEX_MASK == EMPTY as u64 {
|
let new_head = if head & INDEX_MASK == EMPTY as u64 {
|
||||||
(countdown as u64 * COUNTDOWN_ONE) | EMPTY as u64
|
(countdown as u64 * COUNTDOWN_ONE) | EMPTY as u64
|
||||||
@ -93,7 +136,7 @@ impl TaskSet {
|
|||||||
// operations in `Task::wake_by_ref` and ensures that all memory
|
// operations in `Task::wake_by_ref` and ensures that all memory
|
||||||
// operations performed during and before the tasks were scheduled
|
// operations performed during and before the tasks were scheduled
|
||||||
// become visible.
|
// become visible.
|
||||||
match self.head.compare_exchange_weak(
|
match self.shared.head.compare_exchange_weak(
|
||||||
head,
|
head,
|
||||||
new_head,
|
new_head,
|
||||||
Ordering::Acquire,
|
Ordering::Acquire,
|
||||||
@ -122,22 +165,22 @@ impl TaskSet {
|
|||||||
/// notification is currently requested.
|
/// notification is currently requested.
|
||||||
///
|
///
|
||||||
/// All discarded tasks are put in the sleeping (unscheduled) state.
|
/// All discarded tasks are put in the sleeping (unscheduled) state.
|
||||||
pub(super) fn discard_scheduled(&self) {
|
pub(crate) fn discard_scheduled(&self) {
|
||||||
if self.head.load(Ordering::Relaxed) != EMPTY as u64 {
|
if self.shared.head.load(Ordering::Relaxed) != EMPTY as u64 {
|
||||||
// Dropping the iterator ensures that all tasks are put in the
|
// Dropping the iterator ensures that all tasks are put in the
|
||||||
// sleeping state.
|
// sleeping state.
|
||||||
let _ = self.steal_scheduled(0);
|
let _ = self.take_scheduled(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Modify the number of active tasks.
|
/// Set the number of active tasks.
|
||||||
///
|
///
|
||||||
/// Note that this method may discard all scheduled tasks.
|
/// Note that this method may discard already scheduled tasks.
|
||||||
///
|
///
|
||||||
/// # Panic
|
/// # Panic
|
||||||
///
|
///
|
||||||
/// This method will panic if `len` is greater than `u32::MAX - 1`.
|
/// This method will panic if `len` is greater than `u32::MAX - 1`.
|
||||||
pub(super) fn resize(&mut self, len: usize) {
|
pub(crate) fn resize(&mut self, len: usize) {
|
||||||
assert!(len <= EMPTY as usize && len <= SLEEPING as usize);
|
assert!(len <= EMPTY as usize && len <= SLEEPING as usize);
|
||||||
|
|
||||||
self.task_count = len;
|
self.task_count = len;
|
||||||
@ -149,37 +192,46 @@ impl TaskSet {
|
|||||||
|
|
||||||
self.tasks.push(Arc::new(Task {
|
self.tasks.push(Arc::new(Task {
|
||||||
idx,
|
idx,
|
||||||
notifier: self.notifier.clone(),
|
shared: self.shared.clone(),
|
||||||
next: AtomicU32::new(SLEEPING),
|
next: AtomicU32::new(SLEEPING),
|
||||||
head: self.head.clone(),
|
|
||||||
}));
|
}));
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Try to remove inactive tasks.
|
// Try to shrink the vector of tasks.
|
||||||
//
|
//
|
||||||
// The main issue when shrinking the set of active tasks is that stale
|
// The main issue when shrinking the vector of tasks is that stale
|
||||||
// wakers may still be around and may at any moment be scheduled and
|
// wakers may still be around and may at any moment be scheduled and
|
||||||
// insert their index in the list of scheduled tasks. If it cannot be
|
// insert their task index in the list of scheduled tasks. If it cannot
|
||||||
// guaranteed that this will not happen, then a reference to that task
|
// be guaranteed that this will not happen, then the vector of tasks
|
||||||
// must be kept or the iterator for scheduled tasks will panic when
|
// cannot be shrunk further, otherwise the iterator for scheduled tasks
|
||||||
// indexing a stale task.
|
// will later fail when reaching a task with an invalid index.
|
||||||
//
|
//
|
||||||
// To prevent an inactive task from being spuriously scheduled, it is
|
// We follow a 2-steps strategy:
|
||||||
// enough to pretend that the task is already scheduled by setting its
|
//
|
||||||
// `next` field to anything else than `SLEEPING`. However, this could
|
// 1) remove all tasks currently in the list of scheduled task and set
|
||||||
// race if the task has just set its `next` field but has not yet
|
// them to `SLEEPING` state in case some of them might have an index
|
||||||
// updated the head of the list of scheduled tasks, so this can only be
|
// that will be invalidated when the vector of tasks is shrunk;
|
||||||
// done reliably if the task is currently sleeping.
|
//
|
||||||
|
// 2) attempt to iteratively shrink the vector of tasks by removing
|
||||||
|
// tasks starting from the back of the vector:
|
||||||
|
// - If a task is in the `SLEEPING` state, then its `next` pointer is
|
||||||
|
// changed to an arbitrary value other than`SLEEPING`, but the task
|
||||||
|
// is not inserted in the list of scheduled tasks; this way, the
|
||||||
|
// task will be effectively rendered inactive. The task can now be
|
||||||
|
// removed from the vector.
|
||||||
|
// - If a task is found in a non-`SLEEPING` state (meaning that there
|
||||||
|
// was a race and the task was scheduled after step 1) then abandon
|
||||||
|
// further shrinking and leave this task in the vector; the iterator
|
||||||
|
// for scheduled tasks mitigates such situation by only yielding
|
||||||
|
// task indices that are within the expected range.
|
||||||
|
|
||||||
// All scheduled tasks are first unscheduled in case some of them are
|
// Step 1: unscheduled tasks that may be scheduled.
|
||||||
// now inactive.
|
|
||||||
self.discard_scheduled();
|
self.discard_scheduled();
|
||||||
|
|
||||||
// The position of tasks in the set must stay consistent with their
|
// Step 2: attempt to remove tasks starting at the back of the vector.
|
||||||
// associated index so tasks are popped from the back.
|
|
||||||
while self.tasks.len() > len {
|
while self.tasks.len() > len {
|
||||||
// There is at least one task since `len()` was non-zero.
|
// There is at least one task since `len()` was non-zero.
|
||||||
let task = self.tasks.last().unwrap();
|
let task = self.tasks.last().unwrap();
|
||||||
@ -200,11 +252,11 @@ impl TaskSet {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if one or more tasks are currently scheduled.
|
/// Returns `true` if one or more sub-tasks are currently scheduled.
|
||||||
pub(super) fn has_scheduled(&self) -> bool {
|
pub(crate) fn has_scheduled(&self) -> bool {
|
||||||
// Ordering: the content of the head is only used as an advisory flag so
|
// Ordering: the content of the head is only used as an advisory flag so
|
||||||
// Relaxed ordering is sufficient.
|
// Relaxed ordering is sufficient.
|
||||||
self.head.load(Ordering::Relaxed) & INDEX_MASK != EMPTY as u64
|
self.shared.head.load(Ordering::Relaxed) & INDEX_MASK != EMPTY as u64
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a reference to the waker associated to the active task with the
|
/// Returns a reference to the waker associated to the active task with the
|
||||||
@ -214,29 +266,40 @@ impl TaskSet {
|
|||||||
///
|
///
|
||||||
/// This method will panic if there is no active task with the provided
|
/// This method will panic if there is no active task with the provided
|
||||||
/// index.
|
/// index.
|
||||||
pub(super) fn waker_of(&self, idx: usize) -> WakerRef {
|
pub(crate) fn waker_of(&self, idx: usize) -> WakerRef {
|
||||||
assert!(idx < self.task_count);
|
assert!(idx < self.task_count);
|
||||||
|
|
||||||
waker_ref(&self.tasks[idx])
|
waker_ref(&self.tasks[idx])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn len(&self) -> usize {
|
||||||
|
self.task_count
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Internals shared between a `TaskSet` and its associated `Task`s.
|
||||||
|
struct Shared {
|
||||||
|
/// Head of the Treiber stack for scheduled tasks.
|
||||||
|
///
|
||||||
|
/// The lower 32 bits specify the index of the last scheduled task (the
|
||||||
|
/// actual head), if any, whereas the upper 32 bits specify the countdown of
|
||||||
|
/// tasks still to be scheduled before the parent task is notified.
|
||||||
|
head: AtomicU64,
|
||||||
|
/// A notifier used to wake the parent task.
|
||||||
|
notifier: WakeSource,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An asynchronous task associated with the future of a sender.
|
/// An asynchronous task associated with the future of a sender.
|
||||||
pub(super) struct Task {
|
struct Task {
|
||||||
/// Index of this task.
|
/// Index of this task.
|
||||||
idx: u32,
|
idx: u32,
|
||||||
/// A notifier triggered once a certain number of tasks have been scheduled.
|
|
||||||
notifier: WakeSource,
|
|
||||||
/// Index of the next task in the list of scheduled tasks.
|
/// Index of the next task in the list of scheduled tasks.
|
||||||
next: AtomicU32,
|
next: AtomicU32,
|
||||||
/// Head of the list of scheduled tasks.
|
/// Head of the list of scheduled tasks.
|
||||||
head: Arc<AtomicU64>,
|
shared: Arc<Shared>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ArcWake for Task {
|
impl ArcWake for Task {
|
||||||
fn wake(self: Arc<Self>) {
|
|
||||||
Self::wake_by_ref(&self);
|
|
||||||
}
|
|
||||||
fn wake_by_ref(arc_self: &Arc<Self>) {
|
fn wake_by_ref(arc_self: &Arc<Self>) {
|
||||||
let mut next = arc_self.next.load(Ordering::Relaxed);
|
let mut next = arc_self.next.load(Ordering::Relaxed);
|
||||||
|
|
||||||
@ -251,7 +314,7 @@ impl ArcWake for Task {
|
|||||||
// CAS on the head already ensure that all memory operations
|
// CAS on the head already ensure that all memory operations
|
||||||
// that precede this call to `wake_by_ref` become visible when
|
// that precede this call to `wake_by_ref` become visible when
|
||||||
// the tasks are stolen.
|
// the tasks are stolen.
|
||||||
let head = arc_self.head.load(Ordering::Relaxed);
|
let head = arc_self.shared.head.load(Ordering::Relaxed);
|
||||||
match arc_self.next.compare_exchange_weak(
|
match arc_self.next.compare_exchange_weak(
|
||||||
SLEEPING,
|
SLEEPING,
|
||||||
(head & INDEX_MASK) as u32,
|
(head & INDEX_MASK) as u32,
|
||||||
@ -297,7 +360,7 @@ impl ArcWake for Task {
|
|||||||
// that the value of the `next` field as well as all memory
|
// that the value of the `next` field as well as all memory
|
||||||
// operations that precede this call to `wake_by_ref` become visible
|
// operations that precede this call to `wake_by_ref` become visible
|
||||||
// when the tasks are stolen.
|
// when the tasks are stolen.
|
||||||
match arc_self.head.compare_exchange_weak(
|
match arc_self.shared.head.compare_exchange_weak(
|
||||||
head,
|
head,
|
||||||
new_head,
|
new_head,
|
||||||
Ordering::Release,
|
Ordering::Release,
|
||||||
@ -307,7 +370,7 @@ impl ArcWake for Task {
|
|||||||
// If the countdown has just been cleared, it is necessary
|
// If the countdown has just been cleared, it is necessary
|
||||||
// to send a notification.
|
// to send a notification.
|
||||||
if countdown == COUNTDOWN_ONE {
|
if countdown == COUNTDOWN_ONE {
|
||||||
arc_self.notifier.notify();
|
arc_self.shared.notifier.notify();
|
||||||
}
|
}
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@ -339,7 +402,7 @@ impl ArcWake for Task {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// An iterator over scheduled tasks.
|
/// An iterator over scheduled tasks.
|
||||||
pub(super) struct TaskIterator<'a> {
|
pub(crate) struct TaskIterator<'a> {
|
||||||
task_list: &'a TaskSet,
|
task_list: &'a TaskSet,
|
||||||
next_index: u32,
|
next_index: u32,
|
||||||
}
|
}
|
@ -2,9 +2,10 @@
|
|||||||
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use asynchronix::model::{Model, Output};
|
use asynchronix::model::{Context, Model};
|
||||||
use asynchronix::simulation::{Mailbox, SimInit};
|
use asynchronix::ports::{EventBuffer, Output};
|
||||||
use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
|
use asynchronix::simulation::{ActionKey, Mailbox, SimInit};
|
||||||
|
use asynchronix::time::MonotonicTime;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn model_schedule_event() {
|
fn model_schedule_event() {
|
||||||
@ -13,9 +14,9 @@ fn model_schedule_event() {
|
|||||||
output: Output<()>,
|
output: Output<()>,
|
||||||
}
|
}
|
||||||
impl TestModel {
|
impl TestModel {
|
||||||
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
|
fn trigger(&mut self, _: (), context: &Context<Self>) {
|
||||||
scheduler
|
context
|
||||||
.schedule_event(scheduler.time() + Duration::from_secs(2), Self::action, ())
|
.schedule_event(context.time() + Duration::from_secs(2), Self::action, ())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
}
|
}
|
||||||
async fn action(&mut self) {
|
async fn action(&mut self) {
|
||||||
@ -27,13 +28,14 @@ fn model_schedule_event() {
|
|||||||
let mut model = TestModel::default();
|
let mut model = TestModel::default();
|
||||||
let mbox = Mailbox::new();
|
let mbox = Mailbox::new();
|
||||||
|
|
||||||
let mut output = model.output.connect_stream().0;
|
let mut output = EventBuffer::new();
|
||||||
|
model.output.connect_sink(&output);
|
||||||
let addr = mbox.address();
|
let addr = mbox.address();
|
||||||
|
|
||||||
let t0 = MonotonicTime::EPOCH;
|
let t0 = MonotonicTime::EPOCH;
|
||||||
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
|
let mut simu = SimInit::new().add_model(model, mbox, "").init(t0);
|
||||||
|
|
||||||
simu.send_event(TestModel::trigger, (), addr);
|
simu.process_event(TestModel::trigger, (), addr);
|
||||||
simu.step();
|
simu.step();
|
||||||
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
||||||
assert!(output.next().is_some());
|
assert!(output.next().is_some());
|
||||||
@ -46,15 +48,15 @@ fn model_cancel_future_keyed_event() {
|
|||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct TestModel {
|
struct TestModel {
|
||||||
output: Output<i32>,
|
output: Output<i32>,
|
||||||
key: Option<EventKey>,
|
key: Option<ActionKey>,
|
||||||
}
|
}
|
||||||
impl TestModel {
|
impl TestModel {
|
||||||
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
|
fn trigger(&mut self, _: (), context: &Context<Self>) {
|
||||||
scheduler
|
context
|
||||||
.schedule_event(scheduler.time() + Duration::from_secs(1), Self::action1, ())
|
.schedule_event(context.time() + Duration::from_secs(1), Self::action1, ())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
self.key = scheduler
|
self.key = context
|
||||||
.schedule_keyed_event(scheduler.time() + Duration::from_secs(2), Self::action2, ())
|
.schedule_keyed_event(context.time() + Duration::from_secs(2), Self::action2, ())
|
||||||
.ok();
|
.ok();
|
||||||
}
|
}
|
||||||
async fn action1(&mut self) {
|
async fn action1(&mut self) {
|
||||||
@ -71,13 +73,14 @@ fn model_cancel_future_keyed_event() {
|
|||||||
let mut model = TestModel::default();
|
let mut model = TestModel::default();
|
||||||
let mbox = Mailbox::new();
|
let mbox = Mailbox::new();
|
||||||
|
|
||||||
let mut output = model.output.connect_stream().0;
|
let mut output = EventBuffer::new();
|
||||||
|
model.output.connect_sink(&output);
|
||||||
let addr = mbox.address();
|
let addr = mbox.address();
|
||||||
|
|
||||||
let t0 = MonotonicTime::EPOCH;
|
let t0 = MonotonicTime::EPOCH;
|
||||||
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
|
let mut simu = SimInit::new().add_model(model, mbox, "").init(t0);
|
||||||
|
|
||||||
simu.send_event(TestModel::trigger, (), addr);
|
simu.process_event(TestModel::trigger, (), addr);
|
||||||
simu.step();
|
simu.step();
|
||||||
assert_eq!(simu.time(), t0 + Duration::from_secs(1));
|
assert_eq!(simu.time(), t0 + Duration::from_secs(1));
|
||||||
assert_eq!(output.next(), Some(1));
|
assert_eq!(output.next(), Some(1));
|
||||||
@ -91,15 +94,15 @@ fn model_cancel_same_time_keyed_event() {
|
|||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct TestModel {
|
struct TestModel {
|
||||||
output: Output<i32>,
|
output: Output<i32>,
|
||||||
key: Option<EventKey>,
|
key: Option<ActionKey>,
|
||||||
}
|
}
|
||||||
impl TestModel {
|
impl TestModel {
|
||||||
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
|
fn trigger(&mut self, _: (), context: &Context<Self>) {
|
||||||
scheduler
|
context
|
||||||
.schedule_event(scheduler.time() + Duration::from_secs(2), Self::action1, ())
|
.schedule_event(context.time() + Duration::from_secs(2), Self::action1, ())
|
||||||
.unwrap();
|
.unwrap();
|
||||||
self.key = scheduler
|
self.key = context
|
||||||
.schedule_keyed_event(scheduler.time() + Duration::from_secs(2), Self::action2, ())
|
.schedule_keyed_event(context.time() + Duration::from_secs(2), Self::action2, ())
|
||||||
.ok();
|
.ok();
|
||||||
}
|
}
|
||||||
async fn action1(&mut self) {
|
async fn action1(&mut self) {
|
||||||
@ -116,13 +119,14 @@ fn model_cancel_same_time_keyed_event() {
|
|||||||
let mut model = TestModel::default();
|
let mut model = TestModel::default();
|
||||||
let mbox = Mailbox::new();
|
let mbox = Mailbox::new();
|
||||||
|
|
||||||
let mut output = model.output.connect_stream().0;
|
let mut output = EventBuffer::new();
|
||||||
|
model.output.connect_sink(&output);
|
||||||
let addr = mbox.address();
|
let addr = mbox.address();
|
||||||
|
|
||||||
let t0 = MonotonicTime::EPOCH;
|
let t0 = MonotonicTime::EPOCH;
|
||||||
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
|
let mut simu = SimInit::new().add_model(model, mbox, "").init(t0);
|
||||||
|
|
||||||
simu.send_event(TestModel::trigger, (), addr);
|
simu.process_event(TestModel::trigger, (), addr);
|
||||||
simu.step();
|
simu.step();
|
||||||
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
||||||
assert_eq!(output.next(), Some(1));
|
assert_eq!(output.next(), Some(1));
|
||||||
@ -138,10 +142,10 @@ fn model_schedule_periodic_event() {
|
|||||||
output: Output<i32>,
|
output: Output<i32>,
|
||||||
}
|
}
|
||||||
impl TestModel {
|
impl TestModel {
|
||||||
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
|
fn trigger(&mut self, _: (), context: &Context<Self>) {
|
||||||
scheduler
|
context
|
||||||
.schedule_periodic_event(
|
.schedule_periodic_event(
|
||||||
scheduler.time() + Duration::from_secs(2),
|
context.time() + Duration::from_secs(2),
|
||||||
Duration::from_secs(3),
|
Duration::from_secs(3),
|
||||||
Self::action,
|
Self::action,
|
||||||
42,
|
42,
|
||||||
@ -157,13 +161,14 @@ fn model_schedule_periodic_event() {
|
|||||||
let mut model = TestModel::default();
|
let mut model = TestModel::default();
|
||||||
let mbox = Mailbox::new();
|
let mbox = Mailbox::new();
|
||||||
|
|
||||||
let mut output = model.output.connect_stream().0;
|
let mut output = EventBuffer::new();
|
||||||
|
model.output.connect_sink(&output);
|
||||||
let addr = mbox.address();
|
let addr = mbox.address();
|
||||||
|
|
||||||
let t0 = MonotonicTime::EPOCH;
|
let t0 = MonotonicTime::EPOCH;
|
||||||
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
|
let mut simu = SimInit::new().add_model(model, mbox, "").init(t0);
|
||||||
|
|
||||||
simu.send_event(TestModel::trigger, (), addr);
|
simu.process_event(TestModel::trigger, (), addr);
|
||||||
|
|
||||||
// Move to the next events at t0 + 2s + k*3s.
|
// Move to the next events at t0 + 2s + k*3s.
|
||||||
for k in 0..10 {
|
for k in 0..10 {
|
||||||
@ -182,13 +187,13 @@ fn model_cancel_periodic_event() {
|
|||||||
#[derive(Default)]
|
#[derive(Default)]
|
||||||
struct TestModel {
|
struct TestModel {
|
||||||
output: Output<()>,
|
output: Output<()>,
|
||||||
key: Option<EventKey>,
|
key: Option<ActionKey>,
|
||||||
}
|
}
|
||||||
impl TestModel {
|
impl TestModel {
|
||||||
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
|
fn trigger(&mut self, _: (), context: &Context<Self>) {
|
||||||
self.key = scheduler
|
self.key = context
|
||||||
.schedule_keyed_periodic_event(
|
.schedule_keyed_periodic_event(
|
||||||
scheduler.time() + Duration::from_secs(2),
|
context.time() + Duration::from_secs(2),
|
||||||
Duration::from_secs(3),
|
Duration::from_secs(3),
|
||||||
Self::action,
|
Self::action,
|
||||||
(),
|
(),
|
||||||
@ -206,13 +211,14 @@ fn model_cancel_periodic_event() {
|
|||||||
let mut model = TestModel::default();
|
let mut model = TestModel::default();
|
||||||
let mbox = Mailbox::new();
|
let mbox = Mailbox::new();
|
||||||
|
|
||||||
let mut output = model.output.connect_stream().0;
|
let mut output = EventBuffer::new();
|
||||||
|
model.output.connect_sink(&output);
|
||||||
let addr = mbox.address();
|
let addr = mbox.address();
|
||||||
|
|
||||||
let t0 = MonotonicTime::EPOCH;
|
let t0 = MonotonicTime::EPOCH;
|
||||||
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
|
let mut simu = SimInit::new().add_model(model, mbox, "").init(t0);
|
||||||
|
|
||||||
simu.send_event(TestModel::trigger, (), addr);
|
simu.process_event(TestModel::trigger, (), addr);
|
||||||
|
|
||||||
simu.step();
|
simu.step();
|
||||||
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
|
||||||
|
@ -2,8 +2,11 @@
|
|||||||
|
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use asynchronix::model::{Model, Output};
|
#[cfg(not(miri))]
|
||||||
use asynchronix::simulation::{Address, EventStream, Mailbox, SimInit, Simulation};
|
use asynchronix::model::Context;
|
||||||
|
use asynchronix::model::Model;
|
||||||
|
use asynchronix::ports::{EventBuffer, Output};
|
||||||
|
use asynchronix::simulation::{Address, Mailbox, SimInit, Simulation};
|
||||||
use asynchronix::time::MonotonicTime;
|
use asynchronix::time::MonotonicTime;
|
||||||
|
|
||||||
// Input-to-output pass-through model.
|
// Input-to-output pass-through model.
|
||||||
@ -26,15 +29,16 @@ impl<T: Clone + Send + 'static> Model for PassThroughModel<T> {}
|
|||||||
/// output) running as fast as possible.
|
/// output) running as fast as possible.
|
||||||
fn passthrough_bench<T: Clone + Send + 'static>(
|
fn passthrough_bench<T: Clone + Send + 'static>(
|
||||||
t0: MonotonicTime,
|
t0: MonotonicTime,
|
||||||
) -> (Simulation, Address<PassThroughModel<T>>, EventStream<T>) {
|
) -> (Simulation, Address<PassThroughModel<T>>, EventBuffer<T>) {
|
||||||
// Bench assembly.
|
// Bench assembly.
|
||||||
let mut model = PassThroughModel::new();
|
let mut model = PassThroughModel::new();
|
||||||
let mbox = Mailbox::new();
|
let mbox = Mailbox::new();
|
||||||
|
|
||||||
let out_stream = model.output.connect_stream().0;
|
let out_stream = EventBuffer::new();
|
||||||
|
model.output.connect_sink(&out_stream);
|
||||||
let addr = mbox.address();
|
let addr = mbox.address();
|
||||||
|
|
||||||
let simu = SimInit::new().add_model(model, mbox).init(t0);
|
let simu = SimInit::new().add_model(model, mbox, "").init(t0);
|
||||||
|
|
||||||
(simu, addr, out_stream)
|
(simu, addr, out_stream)
|
||||||
}
|
}
|
||||||
@ -217,21 +221,9 @@ impl TimestampModel {
|
|||||||
}
|
}
|
||||||
#[cfg(not(miri))]
|
#[cfg(not(miri))]
|
||||||
impl Model for TimestampModel {
|
impl Model for TimestampModel {
|
||||||
fn init(
|
async fn init(mut self, _: &Context<Self>) -> asynchronix::model::InitializedModel<Self> {
|
||||||
mut self,
|
self.stamp.send((Instant::now(), SystemTime::now())).await;
|
||||||
_scheduler: &asynchronix::time::Scheduler<Self>,
|
self.into()
|
||||||
) -> std::pin::Pin<
|
|
||||||
Box<
|
|
||||||
dyn futures_util::Future<Output = asynchronix::model::InitializedModel<Self>>
|
|
||||||
+ Send
|
|
||||||
+ '_,
|
|
||||||
>,
|
|
||||||
> {
|
|
||||||
Box::pin(async {
|
|
||||||
self.stamp.send((Instant::now(), SystemTime::now())).await;
|
|
||||||
|
|
||||||
self.into()
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -243,18 +235,20 @@ fn timestamp_bench(
|
|||||||
) -> (
|
) -> (
|
||||||
Simulation,
|
Simulation,
|
||||||
Address<TimestampModel>,
|
Address<TimestampModel>,
|
||||||
EventStream<(Instant, SystemTime)>,
|
EventBuffer<(Instant, SystemTime)>,
|
||||||
) {
|
) {
|
||||||
// Bench assembly.
|
// Bench assembly.
|
||||||
let mut model = TimestampModel::default();
|
let mut model = TimestampModel::default();
|
||||||
let mbox = Mailbox::new();
|
let mbox = Mailbox::new();
|
||||||
|
|
||||||
let stamp_stream = model.stamp.connect_stream().0;
|
let stamp_stream = EventBuffer::new();
|
||||||
|
model.stamp.connect_sink(&stamp_stream);
|
||||||
let addr = mbox.address();
|
let addr = mbox.address();
|
||||||
|
|
||||||
let simu = SimInit::new()
|
let simu = SimInit::new()
|
||||||
.add_model(model, mbox)
|
.add_model(model, mbox, "")
|
||||||
.init_with_clock(t0, clock);
|
.set_clock(clock)
|
||||||
|
.init(t0);
|
||||||
|
|
||||||
(simu, addr, stamp_stream)
|
(simu, addr, stamp_stream)
|
||||||
}
|
}
|
||||||
@ -263,7 +257,7 @@ fn timestamp_bench(
|
|||||||
#[test]
|
#[test]
|
||||||
fn simulation_system_clock_from_instant() {
|
fn simulation_system_clock_from_instant() {
|
||||||
let t0 = MonotonicTime::EPOCH;
|
let t0 = MonotonicTime::EPOCH;
|
||||||
const TOLERANCE: f64 = 0.0005; // [s]
|
const TOLERANCE: f64 = 0.005; // [s]
|
||||||
|
|
||||||
// The reference simulation time is set in the past of t0 so that the
|
// The reference simulation time is set in the past of t0 so that the
|
||||||
// simulation starts in the future when the reference wall clock time is
|
// simulation starts in the future when the reference wall clock time is
|
||||||
|
Loading…
x
Reference in New Issue
Block a user