1
0
forked from ROMEO/nexosim

Compare commits

...

161 Commits
romeo ... irs

Author SHA1 Message Date
d4494cf0d3 ported romeo patch from v0.2.2
original patch is 93e643a5fde01a9ca0538d9813957687c541de65
2025-03-14 16:22:17 +01:00
Serge Barral
30779d975c
Merge pull request #83 from asynchronics/prepare-release-v0.3.1
Prepare release v0.3.1
2025-01-28 12:38:28 +01:00
Jaŭhien Piatlicki
f1bae2bc83 Prepare release v0.3.1 2025-01-28 11:30:43 +01:00
Jauhien Piatlicki
0804ba714e
Merge pull request #82 from asynchronics/feature-blocking-event-queue
Add blocking event queue.
2025-01-28 11:22:13 +01:00
Jaŭhien Piatlicki
7ff6f4c6c3 Changes after review. 2025-01-28 11:08:17 +01:00
Jaŭhien Piatlicki
8a1a6cf354 Add blocking event queue. 2025-01-27 14:37:32 +01:00
Jauhien Piatlicki
86df51bea8
Merge pull request #75 from Ben-PH/clippy_all
(chore) clear up some `clippy --all-targets` warns
2025-01-23 09:40:58 +01:00
Ben-PH
5a1d2e7341 Update CI 2025-01-22 15:11:42 +01:00
Ben-PH
3530cc3405 derive Default for State in observables e.g. 2025-01-22 14:50:43 +01:00
Ben-PH
b77a4eaa5f auto-fix clipp::manual_map|option_map_unit_fn 2025-01-22 14:47:16 +01:00
Ben-PH
b9eea5f325 auto-fix clipp::bool_assert_comparison 2025-01-22 14:45:43 +01:00
Ben-PH
38f3b82143 auto-fix clippy::let_and_return 2025-01-22 14:44:24 +01:00
Ben-PH
1b3966b5ae auto-fix clippy::uneccesary_fold 2025-01-22 14:42:13 +01:00
Ben-PH
c75d92145e auto-fix clippy::redundant_pattern_matching 2025-01-22 14:40:17 +01:00
Serge Barral
ff7d9de5e2 Prepare release v0.3.0 2025-01-21 03:04:49 +01:00
Serge Barral
2a4b389977
Merge pull request #81 from asynchronics/feature/release_cleanups
Improve documentation, fix README example
2025-01-21 02:17:06 +01:00
Serge Barral
fa8b5cf034 Improve documentation, fix README example 2025-01-21 02:11:23 +01:00
Serge Barral
4531c21f02
Merge pull request #80 from asynchronics/feature/must_use_sync_status
Make the clock's SyncStatus #[must_use]
2025-01-19 17:26:14 +01:00
Serge Barral
a71231f398 Make the clock's SyncStatus #[must_use] 2025-01-19 17:23:38 +01:00
Serge Barral
c243410783
Merge pull request #79 from asynchronics/feature/halt_server_support
Add server support for halting the simulation
2025-01-19 16:12:00 +01:00
Serge Barral
3a23d81446 Add server support for halting the simulation 2025-01-19 15:46:21 +01:00
Serge Barral
37bef4f31b
Merge pull request #78 from asynchronics/feature/unix_domain_sockets
Unix socket server support + rename grpc -> server
2025-01-19 14:49:32 +01:00
Serge Barral
e526071a29 Unix socket server support + rename grpc -> server 2025-01-19 14:42:50 +01:00
Jauhien Piatlicki
81c1d61290
Merge pull request #77 from asynchronics/feature-simulation-halt
Make step_unbounded return an error when halted.
2025-01-17 12:59:31 +01:00
Jaŭhien Piatlicki
fea1ccc1c4 Changes after review. 2025-01-17 12:18:18 +01:00
Jaŭhien Piatlicki
2e58288b04 Make step_unbounded return an error when halted. 2025-01-17 12:13:47 +01:00
Jauhien Piatlicki
5117cc7ca0
Merge pull request #76 from asynchronics/feature-simulation-halt
Make ExecutionError non-exhaustive.
2025-01-15 16:22:27 +01:00
Jaŭhien Piatlicki
be6f5b4897 Make ExecutionError non-exhaustive. 2025-01-15 16:13:45 +01:00
Serge Barral
a29c7f9bfb
Merge pull request #74 from asynchronics/feature-simulation-halt
Add possibility to halt simulation
2025-01-15 16:03:36 +01:00
Jaŭhien Piatlicki
a223a14cc6 Change after review. 2025-01-15 15:59:33 +01:00
Jaŭhien Piatlicki
922d93bd01 Change method name 2025-01-15 14:38:31 +01:00
Jaŭhien Piatlicki
beaefe1d9e Changes after review 2025-01-15 13:38:27 +01:00
Jaŭhien Piatlicki
4111d49295 Provide dummy context for testing. 2025-01-15 13:29:36 +01:00
Jaŭhien Piatlicki
27ec1396df Add infinite step and an example. 2025-01-15 13:10:37 +01:00
Jaŭhien Piatlicki
1b08f10e42 Add possibility to halt simulation 2025-01-13 16:00:45 +01:00
Jauhien Piatlicki
2e0653e1e3
Merge pull request #73 from asynchronics/feature/grpc_rework
Feature/grpc rework
2025-01-12 22:22:22 +01:00
Serge Barral
7a95a4d0f4 OVarious gRPC improvements
This patch in particular allows asynchronous gRPC access to the scheduler
and to the monitoring functions.
2025-01-09 17:51:39 +01:00
Serge Barral
43407741eb Implement Clock for references to and boxed Clock 2025-01-09 17:08:18 +01:00
Serge Barral
8de53aff1f Make source event/query creation methods immutable 2025-01-09 17:08:18 +01:00
Serge Barral
a87bf493b3 Rename gRPC package as simulation.v1 2025-01-09 17:08:18 +01:00
Jauhien Piatlicki
42192ddfe3
Merge pull request #72 from asynchronics/move-uni-example
Move UniRequestor example to util crate
2024-12-12 12:34:15 +01:00
Jaŭhien Piatlicki
26e3e5c76b Move UniRequestor example to util crate 2024-12-12 12:27:59 +01:00
Serge Barral
4623765ca2
Merge pull request #71 from asynchronics/combinator
Add replier adaptor
2024-12-11 15:38:01 +01:00
Jaŭhien Piatlicki
d63bcdf4f0 Fix clippy warnings after version update 2024-12-09 15:01:21 +01:00
Jaŭhien Piatlicki
97c855293d Implement Default for ReplierAdaptor 2024-12-06 15:50:11 +01:00
Jaŭhien Piatlicki
0274e62eb0 Add replier adaptor 2024-12-06 15:29:18 +01:00
Jauhien Piatlicki
31be2b0c75
Merge pull request #70 from asynchronics/feature/report_missing_recipient
Report an error if a message cannot be delivered
2024-11-27 13:05:50 +01:00
Serge Barral
d4192e83ed Report an error if a message cannot be delivered 2024-11-26 16:19:22 +01:00
Serge Barral
d88c527cb7
Merge pull request #69 from asynchronics/feature-unirequestor
Add UniRequestor port
2024-11-26 12:22:26 +01:00
Jaŭhien Piatlicki
bb7923f689 Changes after review 2024-11-25 10:17:48 +01:00
Jaŭhien Piatlicki
c06233ad74 Change after review 2024-11-25 09:54:42 +01:00
Jaŭhien Piatlicki
5b2c872e15 Add UniRequestor port 2024-11-22 20:26:23 +01:00
Jauhien Piatlicki
224aea59e5
Merge pull request #68 from asynchronics/feature/detect_lost_messages
Detect lost messages
2024-11-19 12:59:49 +01:00
Serge Barral
f73a99bc4e
Update CHANGELOG.md 2024-11-18 23:46:05 +01:00
Serge Barral
b1a02bd07f Detect lost messages
Messages sent to a mailbox that wasn't added to the simulation are now
specifically detected. Earlier this would be wrongly reported as a
deadlock.
2024-11-18 18:56:23 +01:00
Serge Barral
0da9e6649d
Update README.md 2024-11-16 21:25:12 +01:00
Serge Barral
3fbceaed5d
Merge pull request #67 from asynchronics/release/0.3.0-beta.0
Prepare release v0.3.0-beta.0
2024-11-16 20:45:30 +01:00
Serge Barral
09b3c44eab Prepare release v0.3.0-beta.0 2024-11-16 20:40:08 +01:00
Serge Barral
7e07f3f949
Merge pull request #66 from asynchronics/feature/grpc-api-simplification
Simplify gRPC bench API
2024-11-16 20:36:27 +01:00
Serge Barral
f89624daf0 Simplify gRPC bench API 2024-11-16 20:33:44 +01:00
Serge Barral
0a10f256bc
Update README.md 2024-11-16 15:41:09 +01:00
Serge Barral
a6c3217fdf
Merge pull request #65 from asynchronics/feature/rename_crate
Rename crate to NeXosim
2024-11-16 12:32:05 +01:00
Serge Barral
5ab898ad79 Rename crate to NeXosim 2024-11-16 12:26:36 +01:00
Serge Barral
a96a4dc0bd
Merge pull request #64 from asynchronics/feature/grpc_api_change
Make the gRPC init more general
2024-11-15 23:26:40 +01:00
Serge Barral
84ad02a248 Make the gRPC init more general
Instead of producing a SimInit object, a bench is now expected to return
a fully constructed simulation with its scheduler.

This means that the client does not necessarily need to provide the
starting time for the simulation. This start time may be hardcoded in
the bench, or may be taken as a parameter for the bench configuration.

This change make it possible for benches to do more, for instance to
pre-schedule some events, or to do less, for instance by hardcoding the
simulation time rather than accept an arbitrary simulation time.
2024-11-15 23:23:17 +01:00
Jauhien Piatlicki
c749a49154
Merge pull request #63 from asynchronics/feature/misc_api_changes
Feature/misc api changes
2024-11-15 22:46:05 +01:00
Serge Barral
3c1056d699 Remove the LineId API
This was probably never used.
2024-11-15 17:15:25 +01:00
Serge Barral
0f1d876aed Return both simulation and scheduler at init 2024-11-15 16:52:24 +01:00
Serge Barral
f4686af49a Finalize the Context and BuildContext API
The API style is now more uniform: both are passed by mutable ref, and
only expose accessors. Additionally, the methods that were initially
accessed through the scheduler field are now directly implemented on
`Context`.
2024-11-15 16:12:07 +01:00
Serge Barral
b1896dbde9 Order scheduled messages by their origin
Previously, the scheduler key used the target model as subkey to order
messages that target the same model.

Now this subkey is the origin model rather than the target, or in the
case of the global scheduler, 0. This doesn't change anythin in practice
for the local scheduler since the origin and target models were the
same, but for the global scheduler this provides additional guarranties.

For instance, if the global scheduler is used to schedule an event
targetting model A and then an event targetting model B where the latter
triggers a message to A, it is now guarranteed that the first message
will get to A before the second.
2024-11-15 14:39:51 +01:00
Serge Barral
1cefe4b2f6
Merge pull request #61 from asynchronics/feature/unified_step_until
Merge step_by and step_until into a unique method
2024-11-14 17:19:13 +01:00
Serge Barral
b5aea810ae Merge step_by and step_until into a unique method
Now that `step_by` returns an error anyway (it was unfaillible before),
there is no more incentive to keep it as a separate method.
The `step_until` method now accepts an `impl Deadline`, which covers
both cases (`Duration` and `MonotonicTime`).
2024-11-14 17:01:33 +01:00
Jauhien Piatlicki
95aac7721c
Merge pull request #56 from asynchronics/document-utils
Document observable states
2024-11-14 00:20:13 +01:00
Jaŭhien Piatlicki
f7d3e48a1f Remove docstring 2024-11-14 00:01:55 +01:00
Jaŭhien Piatlicki
1af4de1832 Remove println from examples 2024-11-13 23:59:50 +01:00
Jaŭhien Piatlicki
1a0dff0f6e Document observable states 2024-11-13 23:59:50 +01:00
Jauhien Piatlicki
a533b3e6c1
Merge pull request #60 from asynchronics/feature/catch_panics
Report panics as errors + identify panicking model
2024-11-13 23:56:51 +01:00
Serge Barral
ba1e668447 Report panics as errors + identify panicking model
The build context is now passed as a mutable reference due to the need
to mutate data when adding a model.

Contains small unrelated cleanups and documentation improvements too.
2024-11-13 19:40:58 +01:00
Jauhien Piatlicki
e6f77ea8e5
Merge pull request #58 from asynchronics/feature/clock-synchronization
Check clock sync with configurable tolerance
2024-11-12 13:21:58 +01:00
Serge Barral
49e713262b Check clock sync with configurable tolerance 2024-11-12 11:34:17 +01:00
Serge Barral
b690055848
Merge pull request #59 from asynchronics/fix/loom_ci_path_on_pr
Add path filtering for Loom CI also on PRs
2024-11-12 11:24:33 +01:00
Serge Barral
44e86b81d1 Add path filtering for Loom CI also on PRs 2024-11-12 11:21:58 +01:00
Serge Barral
0c2f92d4cf
Merge pull request #57 from asynchronics/feature/timeout
Add support for simulation timeouts
2024-11-09 12:18:41 +01:00
Serge Barral
e6901386cf Add support for simulation timeouts 2024-11-09 12:17:27 +01:00
Jauhien Piatlicki
c6fd4d90c4
Merge pull request #55 from asynchronics/feature/multiexecutor-integration-tests
Run integration tests on both ST and MT executors
2024-11-08 12:12:36 +01:00
Serge Barral
abbfb64628 Run integration tests on both ST and MT executors 2024-11-08 11:08:47 +01:00
Jauhien Piatlicki
35e7e17814
Merge pull request #54 from asynchronics/feature/protomodel
Introduce ProtoModel trait, remove Model::setup
2024-11-05 23:36:51 +01:00
Serge Barral
039fefad47 Introduce ProtoModel trait, remove Model::setup
The external_input example has been as well adapted and (at least
temporarily) simplifiedi/modified to remove the dependencies on
`atomic_wait` and `mio`.
2024-11-05 16:16:42 +01:00
Serge Barral
06079bd5cd
Merge pull request #53 from asynchronics/feature/util
Add asynchronix-util crate
2024-10-31 17:03:51 +01:00
Jaŭhien Piatlicki
0732a7ef54 Changes after review 2024-10-31 15:59:35 +01:00
Jaŭhien Piatlicki
087f3c84cc Add README to asynchronix-util 2024-10-31 14:57:41 +01:00
Jaŭhien Piatlicki
634614a2a1 Add observable states utility 2024-10-31 14:43:30 +01:00
Jauhien Piatlicki
8f7057689c
Merge pull request #51 from asynchronics/feature/deadlock-detection
Feature/deadlock detection
2024-10-29 11:02:55 +01:00
Serge Barral
e7b64524e0 Report deadlocked models and their malbox size 2024-10-28 12:25:02 +01:00
Serge Barral
1cfaa00f9e Make execution failible, impl deadlock detection
TODO: return the list of models involved in a deadlock.

Note that Many execution errors are not implemented at all at the
moment and will need separate PRs, namely:
- Terminated
- ModelError
- Panic
2024-10-20 12:35:44 +02:00
Serge Barral
e7889c8e9b
Merge pull request #48 from SMassalski/main
Fix missing derive clone statement for Requestor
2024-10-17 17:29:37 +02:00
SMassalski
ef17e56b10 Fix missing derive clone statement for Requestor 2024-10-17 16:20:44 +02:00
Jauhien Piatlicki
ef43f99a9c
Merge pull request #47 from asynchronics/feature/tracing
Feature/tracing
2024-09-13 16:20:35 +02:00
Serge Barral
1dfb79f596 Add small example of tracing logging 2024-09-13 16:08:49 +02:00
Serge Barral
7487a264ab Add tracing support for simulation timestamps 2024-09-12 15:35:31 +02:00
Serge Barral
e376f17c7c Add model tracing spans + tracing feature flag 2024-09-10 11:12:49 +02:00
Serge Barral
c7d86b9df1
Merge pull request #46 from asynchronics/feature/upgrade-dependencies
Upgrade some dependencies
2024-09-08 23:25:08 +02:00
Serge Barral
b500d071ec Upgrade some dependencies 2024-09-08 23:21:32 +02:00
Serge Barral
7aa6bd10a6
Merge pull request #45 from asynchronics/fix/ci-doc-and-ci-checkout
Update checkout to v4, build docs with nightly
2024-09-08 20:23:52 +02:00
Serge Barral
de97b7cf0e Update checkout to v4, build docs with nightly 2024-09-08 20:15:15 +02:00
Serge Barral
d898fb3f05
Merge pull request #44 from asynchronics/feature/improve-docgen
Feature/improve docgen
2024-09-08 17:40:09 +02:00
Serge Barral
2a8a3738cb Use better supported UTF symbols for arrow heads 2024-09-08 17:38:58 +02:00
Serge Barral
1c0c8ed529 Add automatic feature documentation for docs.rs 2024-09-08 17:31:39 +02:00
Serge Barral
bf7fb9b28b
Merge pull request #43 from asynchronics/feature/refactor-rpc
Simplify gRPC backend arch + remove wasm backend
2024-09-08 17:08:01 +02:00
Serge Barral
6b43fcf704 Simplify gRPC backend arch + remove wasm backend 2024-09-08 17:03:56 +02:00
Serge Barral
3ccf05335b
Merge pull request #41 from asynchronics/feature/loom-as-dev-dependency
Move loom as a dev-dependency
2024-09-07 19:56:21 +02:00
Serge Barral
114c148114 Move loom as a dev-dependency 2024-09-07 19:53:30 +02:00
Jauhien Piatlicki
e75edcbd33
Merge pull request #35 from asynchronics/feature/connect_map
Take message by ref in (filter)map_connect closures
2024-08-16 11:30:10 +02:00
Serge Barral
1b1db5e0b8 Take message ref in (filter)map_connect closures
This avoids preemptive cloning when the closures don't consume the
message, which is common when the filtering closure returns `None`.
2024-08-16 11:15:58 +02:00
Jauhien Piatlicki
1f3e04e796
Merge pull request #33 from asynchronics/feature/connect_map
Add tests for filter_map_connect (source & output)
2024-08-07 21:36:00 +02:00
Serge Barral
525f708d55 Add tests for filter_map_connect (source & output) 2024-08-07 17:36:51 +02:00
Jauhien Piatlicki
252ada4946
Merge pull request #32 from asynchronics/feature/connect_map
Add map/filter_map variants of the `connect` method
2024-08-07 14:56:27 +02:00
Serge Barral
c4d93f5c31 Disable Loom tests for broadcaster due to Loom bug 2024-08-07 10:29:13 +02:00
Serge Barral
b544bcee92 Simplify task_set & satisfy clippy 2024-08-07 10:23:10 +02:00
Serge Barral
b5187ded44 Optimize filtered connections from outputs 2024-08-07 10:11:53 +02:00
Serge Barral
2270a94b8d Simplify output broadcaster implementation 2024-08-05 09:56:13 +02:00
Serge Barral
7f244d2334 Add map/filter_map variants for source connection 2024-08-05 09:11:51 +02:00
Serge Barral
3527d62b41 Remove unnecessary trait bounds + improve doc 2024-08-03 19:29:29 +02:00
Serge Barral
0ec781e18b Add filter_map variants for output port connection 2024-08-03 11:47:57 +02:00
Serge Barral
9a2cfe8e77 Add support for mapped connections from ports 2024-08-02 18:53:07 +02:00
Jauhien Piatlicki
d9099c4bfa
Merge pull request #31 from asynchronics/feature-asynchronuous-scheduling
More idiomatic loop
2024-08-02 16:23:47 +02:00
Jaŭhien Piatlicki
3b4a3e9b0e More idiomatic loop 2024-08-02 16:19:36 +02:00
Jaŭhien Piatlicki
5d61abd729 Fix method visibility 2024-08-02 16:13:21 +02:00
Serge Barral
a163e5a1e1
Merge pull request #30 from asynchronics/feature-asynchronuous-scheduling
Change scheduler interface and add external inputs example.
2024-08-02 14:11:57 +02:00
Jaŭhien Piatlicki
6e3d5bb132 Change scheduler interface and add external inputs example.
Relevant for issue #13.
2024-08-02 14:06:49 +02:00
Serge Barral
a6a2c85129
Merge pull request #29 from asynchronics/feature/grpc-init-parameters
Accept an arbitrary argument for remote init
2024-07-31 16:00:29 +02:00
Serge Barral
1b0395f693 Accept an arbitrary argument for remote init 2024-07-29 18:41:25 +02:00
Serge Barral
7e5f623ac5
Merge pull request #27 from asynchronics/feature-auto-action-key
Add AutoActionKey
2024-06-26 13:45:54 +02:00
Jaŭhien Piatlicki
8b015b2eba Add AutoActionKey 2024-06-26 13:42:24 +02:00
Serge Barral
cb7caa10e9
Merge pull request #26 from asynchronics/feature/cbor-instead-of-msgpack
Replace MessagePack by CBOR
2024-06-19 12:07:26 +02:00
Serge Barral
8ec5cd9e9b Replace MessagePack by CBOR
CBOR looks very similar but seems more future-proof as it was
standardized by the IETF in RFC 8949.
2024-06-19 12:00:59 +02:00
Serge Barral
4039d96127
Merge pull request #25 from asynchronics/feature/split-source-and-sink
Feature/split source and sink
2024-06-12 11:37:11 +02:00
Serge Barral
0abc520e4b Split sinks and source registries
This makes it possible to concurrently control and monitor the
simulation when using gRPC.
Accordingly, the gRPC server now runs on 2 threads so it can serve
control and monitoring requests concurrently.
2024-06-12 11:20:34 +02:00
Serge Barral
f731d40add WIP 2024-06-11 11:27:16 +02:00
Serge Barral
a7e691c002
Merge pull request #24 from asynchronics/dev
Merge private dev branch into main
2024-05-27 23:50:26 +02:00
Serge Barral
4f494312be
Merge pull request #11 from asynchronics/feature/wasm-compatibility
Feature/wasm compatibility
2024-05-27 23:36:17 +02:00
Serge Barral
59d2af51ba Expose the Protobuf simulation service to WASM/JS 2024-05-27 23:33:02 +02:00
Serge Barral
77e6e569ff Add same-thread executor support 2024-05-27 23:12:45 +02:00
Jauhien Piatlicki
88d954dde5
Merge pull request #10 from asynchronics/feature-named-model-instances
Feature: named model instances
2024-05-17 14:50:43 +02:00
Jaŭhien Piatlicki
195bcdceba Add instance name to model contexts 2024-05-10 14:32:07 +02:00
Jauhien Piatlicki
4b5195f981
Merge pull request #5 from asynchronics/feature-submodels
Add test and improve example
2024-05-10 10:01:50 +02:00
Jaŭhien Piatlicki
02eec1b277 Add test and improve example 2024-05-08 11:20:33 +02:00
Serge Barral
287b3b713c
Merge pull request #9 from asynchronics/fix/ci-failures
Fix comments
2024-05-07 12:49:57 +02:00
Serge Barral
2fa159a87f Fix comments 2024-05-07 12:44:42 +02:00
Serge Barral
8467b35f03
Merge pull request #7 from asynchronics/fix/ci-failures
Fix/ci failures
2024-05-07 01:58:05 +02:00
Serge Barral
af3d68e76f Force the waker VTable to be uniquely instantiated
From Rust 1.78, `Waker::will_wake` tests equality by comparing the VTable
pointers rather than the content of the VTable.

Unfortunately, this exposes some instability in the code generation
which sometimes causes several VTables to be instantiated in memory for
the same generic parameters. This can in turn defeat `Waker::will_wake`
if e.g. `Waker::clone` and `Waker::wake_by_*` end up with different
pointers.

The problemt is hopefully addressed by preventing inlining of the VTable
generation function. A test has been added to try to detect regression,
though the test may not be 100% reliable.
2024-05-07 01:37:47 +02:00
Serge Barral
e4b108c6b7 Satisfy newest clippy 2024-05-06 16:45:07 +02:00
Serge Barral
a24e5df62e
Merge pull request #4 from asynchronics/feature-submodels
Implement clonable outputs and add submodels example
2024-04-30 10:19:46 +02:00
Jaŭhien Piatlicki
0734dc2fac Implement clonable outputs and add submodels example 2024-04-29 21:00:12 +02:00
Serge Barral
e7c0c5f217
Merge pull request #3 from asynchronics/fix/grpc-and-codegen-format
Fix/grpc and codegen format
2024-04-26 16:14:59 +02:00
Serge Barral
9b4f69c17e gRPC: small fix + minor changes 2024-04-26 16:10:00 +02:00
Serge Barral
4de071eaf3
Merge pull request #2 from asynchronics/feature/improved-ci
Feature/improved ci
2024-04-26 13:55:20 +02:00
Serge Barral
9956c4fa3f CI: add --all-features and force check on dev 2024-04-26 13:43:06 +02:00
Jauhien Piatlicki
97b173a081
Merge pull request #1 from asynchronics/feature-submodels
Add setup step
2024-04-26 11:55:03 +02:00
Jaŭhien Piatlicki
7526ffbcea Add setup step. 2024-04-26 11:48:11 +02:00
Serge Barral
e84e802f09 Initial (g)RPC implementation 2024-04-25 11:12:54 +02:00
123 changed files with 16936 additions and 5814 deletions

View File

@ -17,10 +17,10 @@ jobs:
matrix:
rust:
- stable
- 1.64.0
- 1.77.0
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install toolchain
uses: dtolnay/rust-toolchain@master
@ -30,63 +30,128 @@ jobs:
- name: Run cargo check
run: cargo check --all-features
build-wasm:
name: Build wasm32
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v4
- name: Install toolchain
uses: dtolnay/rust-toolchain@stable
with:
targets: wasm32-unknown-unknown
- name: Run cargo build (wasm)
run: cargo build --target wasm32-unknown-unknown
test:
name: Test suite
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install toolchain
uses: dtolnay/rust-toolchain@stable
- name: Run cargo test
run: cargo test
run: cargo test --all-features
loom-dry-run:
name: Loom dry run
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install toolchain
uses: dtolnay/rust-toolchain@stable
- name: Dry-run cargo test (Loom)
run: cargo test --no-run --tests
run: cargo test --no-run --lib --all-features
env:
RUSTFLAGS: --cfg asynchronix_loom
RUSTFLAGS: --cfg nexosim_loom
miri:
name: Miri
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install toolchain
uses: dtolnay/rust-toolchain@nightly
with:
components: miri
- name: Run cargo miri tests
run: cargo miri test --tests --lib
- name: Run cargo miri tests (single-threaded executor)
run: cargo miri test --tests --lib --all-features
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
- name: Run cargo miri tests (multi-threaded executor)
run: cargo miri test --tests --lib --all-features
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
- name: Run cargo miri example1
- name: Run cargo miri espresso_machine (single-threaded executor)
run: cargo miri run --example espresso_machine
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
- name: Run cargo miri espresso_machine (multi-threaded executor)
run: cargo miri run --example espresso_machine
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
- name: Run cargo miri example2
- name: Run cargo miri power_supply (single-threaded executor)
run: cargo miri run --example power_supply
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
- name: Run cargo miri power_supply (multi-threaded executor)
run: cargo miri run --example power_supply
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
- name: Run cargo miri example3
- name: Run cargo miri stepper_motor (single-threaded executor)
run: cargo miri run --example stepper_motor
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
- name: Run cargo miri stepper_motor (multi-threaded executor)
run: cargo miri run --example stepper_motor
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
- name: Run cargo miri assembly (single-threaded executor)
run: cargo miri run --example assembly
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
- name: Run cargo miri assembly (multi-threaded executor)
run: cargo miri run --example assembly
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
- name: Run cargo miri uni_requestor (single-threaded executor)
run: cargo miri run --example uni_requestor
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
- name: Run cargo miri uni_requestor (multi-threaded executor)
run: cargo miri run --example uni_requestor
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
- name: Run cargo miri observables (single-threaded executor)
run: cargo miri run --example observables
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=1
- name: Run cargo miri observables (multi-threaded executor)
run: cargo miri run --example observables
env:
MIRIFLAGS: -Zmiri-strict-provenance -Zmiri-disable-isolation -Zmiri-num-cpus=4
@ -95,7 +160,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install toolchain
uses: dtolnay/rust-toolchain@stable
@ -104,17 +169,22 @@ jobs:
run: cargo fmt --all -- --check
- name: Run cargo clippy
run: cargo clippy
run: |
cargo clippy --tests --all-features
# See PR#75: https://github.com/asynchronics/nexosim/pull/75
cargo clippy --examples --all-features -- -A clippy::new_without_default -A clippy::manual_async_fn
docs:
name: Docs
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install toolchain
uses: dtolnay/rust-toolchain@stable
uses: dtolnay/rust-toolchain@nightly
- name: Run cargo doc
run: cargo doc --no-deps --document-private-items
run: cargo doc --no-deps --all-features --document-private-items
env:
RUSTDOCFLAGS: --cfg docsrs -Dwarnings

View File

@ -1,20 +1,36 @@
name: Loom
on:
workflow_dispatch:
pull_request:
paths:
- 'nexosim/src/channel.rs'
- 'nexosim/src/channel/**'
- 'nexosim/src/executor/task.rs'
- 'nexosim/src/executor/task/**'
- 'nexosim/src/loom_exports.rs'
- 'nexosim/src/ports/output/broadcaster.rs'
- 'nexosim/src/ports/output/broadcaster/**'
- 'nexosim/src/ports/source/broadcaster.rs'
- 'nexosim/src/ports/source/broadcaster/**'
- 'nexosim/src/util/cached_rw_lock.rs'
- 'nexosim/src/util/slot.rs'
- 'nexosim/src/util/sync_cell.rs'
push:
branches: [ main ]
paths:
- 'asynchronix/src/channel.rs'
- 'asynchronix/src/channel/**'
- 'asynchronix/src/executor/task.rs'
- 'asynchronix/src/executor/task/**'
- 'asynchronix/src/loom_exports.rs'
- 'asynchronix/src/model/ports/broadcaster.rs'
- 'asynchronix/src/model/ports/broadcaster/**'
- 'asynchronix/src/util/slot.rs'
- 'asynchronix/src/util/spsc_queue.rs'
- 'asynchronix/src/util/sync_cell.rs'
- 'nexosim/src/channel.rs'
- 'nexosim/src/channel/**'
- 'nexosim/src/executor/task.rs'
- 'nexosim/src/executor/task/**'
- 'nexosim/src/loom_exports.rs'
- 'nexosim/src/ports/output/broadcaster.rs'
- 'nexosim/src/ports/output/broadcaster/**'
- 'nexosim/src/ports/source/broadcaster.rs'
- 'nexosim/src/ports/source/broadcaster/**'
- 'nexosim/src/util/cached_rw_lock.rs'
- 'nexosim/src/util/slot.rs'
- 'nexosim/src/util/sync_cell.rs'
jobs:
loom:
@ -22,12 +38,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout sources
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install toolchain
uses: dtolnay/rust-toolchain@stable
- name: Run cargo test (Loom)
run: cargo test --tests --release
run: cargo test --lib --release
env:
RUSTFLAGS: --cfg asynchronix_loom
RUSTFLAGS: --cfg nexosim_loom

View File

@ -1,10 +1,117 @@
# 0.3.1 (2025-01-28)
- Add a blocking event queue ([#82]).
[#82]: https://github.com/asynchronics/nexosim/pull/82
# 0.3.0 (2025-01-20)
The final 0.3.0 release features a very large number of improvements and API
changes, including all those in the beta release and a couple more.
This release is not compatible with the 0.2.* releases, but porting models and benches should be relatively straightforward.
### Added (mostly API-breaking changes)
- Add a gRPC server for local (Unix Domain Sockets) and remote (http/2)
execution ([#12], [#24], [#25], [#26], [#29], [#43], [#78], [#79])
- Single-threaded executor supporting compilation to WebAssembly ([#24])
- Add support for the `tracing` crate ([#47])
- Make `Output`s and `Requestor`s `Clone`-able ([#30], [#48])
- Make the global `Scheduler` an owned `Clone`-able type that can be sent to
other threads ([#30])
- Add an automatically managed action key for scheduled actions/events ([#27])
- Enable connection of different input/output pairs with `map_connect()` methods
on `Output` and `Requestor` ([#32])
- Streamline the creation of data buses (SPI, CAN, MIL-STD-1553, SpaceWire etc.)
with `filter_map_connect()` methods on `Output` and `Requestor` ([#32])
- Implement deadlock detection ([#51])
- Streamline the builder pattern for models with a `ProtoModel` trait ([#54])
- Implement execution timeout ([#57])
- Return an error when a real-time simulation clock looses synchronization
([#58])
- Catch model panics and report them as errors ([#60])
- Provide additional ordering guaranties when using the global scheduler ([#62])
- Remove `LineId` line disconnection API ([#63])
- Implement detection of lost and undelivered messages ([#68], [#70])
- Provide a `UniRequestor` type for unary requestors ([#69])
- Add support for intentionally halting an ongoing simulation and add a
`Simulation::step_unbounded` method ([#74], [#77])
[#68]: https://github.com/asynchronics/nexosim/pull/68
[#69]: https://github.com/asynchronics/nexosim/pull/69
[#70]: https://github.com/asynchronics/nexosim/pull/70
[#74]: https://github.com/asynchronics/nexosim/pull/74
[#77]: https://github.com/asynchronics/nexosim/pull/77
[#78]: https://github.com/asynchronics/nexosim/pull/78
[#79]: https://github.com/asynchronics/nexosim/pull/79
# 0.3.0-beta.0 (2024-11-16)
This beta release features a very large number of improvements and API changes,
including:
- Add a gRPC server for remote execution ([#12], [#24], [#25], [#26], [#29],
[#43])
- Single-threaded executor supporting compilation to WebAssembly ([#24])
- Add support for the `tracing` crate ([#47])
- Make `Output`s and `Requestor`s `Clone`-able ([#30], [#48])
- Make the global `Scheduler` an owned `Clone`-able type ([#30])
- Add an automatically managed action key for scheduled actions/events ([#27])
- Enable connection of different input/output pairs with `map_connect()` methods
on `Output` and `Requestor` ([#32])
- Streamline the creation of data buses (SPI, CAN, MIL-STD-1553, SpaceWire etc.)
with `filter_map_connect()` methods on `Output` and `Requestor` ([#32])
- Implement deadlock detection ([#51])
- Streamline the builder pattern for models with a `ProtoModel` trait ([#54])
- Implement execution timeout ([#57])
- Return an error when a real-time simulation clock looses synchronization
([#58])
- Catch model panics and report them as errors ([#60])
- Provide additional ordering guaranties when using the global scheduler ([#62])
- Remove `LineId` line disconnection API ([#63])
[#12]: https://github.com/asynchronics/nexosim/pull/12
[#24]: https://github.com/asynchronics/nexosim/pull/24
[#25]: https://github.com/asynchronics/nexosim/pull/25
[#26]: https://github.com/asynchronics/nexosim/pull/26
[#27]: https://github.com/asynchronics/nexosim/pull/27
[#29]: https://github.com/asynchronics/nexosim/pull/29
[#30]: https://github.com/asynchronics/nexosim/pull/30
[#32]: https://github.com/asynchronics/nexosim/pull/32
[#43]: https://github.com/asynchronics/nexosim/pull/43
[#47]: https://github.com/asynchronics/nexosim/pull/47
[#48]: https://github.com/asynchronics/nexosim/pull/48
[#51]: https://github.com/asynchronics/nexosim/pull/51
[#54]: https://github.com/asynchronics/nexosim/pull/54
[#57]: https://github.com/asynchronics/nexosim/pull/57
[#58]: https://github.com/asynchronics/nexosim/pull/58
[#60]: https://github.com/asynchronics/nexosim/pull/60
[#62]: https://github.com/asynchronics/nexosim/pull/62
[#63]: https://github.com/asynchronics/nexosim/pull/63
# 0.2.4 (2024-11-16)
- Add crate rename notice
# 0.2.3 (2024-08-24)
- Force the waker VTable to be uniquely instantiated to re-enable the
`will_wake` optimisation after its implementation was changed in `std` ([#38])
- Ignore broadcast error when sending to a closed `EventStream` ([#37])
[#37]: https://github.com/asynchronics/nexosim/pull/37
[#38]: https://github.com/asynchronics/nexosim/pull/38
# 0.2.2 (2024-04-04)
- Add `serde` feature and serialization support for `MonotonicTime` ([#19]).
- Update `multishot` dependency due to soundness issue in older version ([#23]).
[#19]: https://github.com/asynchronics/asynchronix/pull/19
[#23]: https://github.com/asynchronics/asynchronix/pull/23
[#19]: https://github.com/asynchronics/nexosim/pull/19
[#23]: https://github.com/asynchronics/nexosim/pull/23
# 0.2.1 (2024-03-06)
@ -13,8 +120,8 @@
- Add support for custom clocks and provide an optional real-time clock
([#9], [#15]).
[#9]: https://github.com/asynchronics/asynchronix/pull/9
[#15]: https://github.com/asynchronics/asynchronix/pull/15
[#9]: https://github.com/asynchronics/nexosim/pull/9
[#15]: https://github.com/asynchronics/nexosim/pull/15
### Misc
@ -33,9 +140,9 @@
`schedule_*event` method that accept either a `Duration` or a `MonotonicTime`
([#7]).
[#5]: https://github.com/asynchronics/asynchronix/pull/5
[#6]: https://github.com/asynchronics/asynchronix/pull/6
[#7]: https://github.com/asynchronics/asynchronix/pull/7
[#5]: https://github.com/asynchronics/nexosim/pull/5
[#6]: https://github.com/asynchronics/nexosim/pull/6
[#7]: https://github.com/asynchronics/nexosim/pull/7
# 0.1.0 (2023-01-16)

View File

@ -1,3 +1,3 @@
[workspace]
members = ["asynchronix"]
members = ["nexosim", "nexosim-util"]
resolver = "2"

View File

@ -1,6 +1,6 @@
The MIT License (MIT)
Copyright (c) 2024 Asynchronics sp. z o.o. and Asynchronix Contributors
Copyright (c) 2025 Asynchronics sp. z o.o. and NeXosim Contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@ -1,17 +1,23 @@
# Asynchronix
# NeXosim
Asynchronix is a developer-friendly, highly optimized discrete-event simulation
framework written in Rust. It is meant to scale from small, simple simulations
to very large simulation benches with complex time-driven state machines.
NeXosim (né Asynchronix) is a developer-friendly, highly optimized
discrete-event simulation framework written in Rust. It is meant to scale from
small, simple simulations to very large simulation benches with complex
time-driven state machines.
[![Cargo](https://img.shields.io/crates/v/asynchronix.svg)](https://crates.io/crates/asynchronix)
[![Documentation](https://docs.rs/asynchronix/badge.svg)](https://docs.rs/asynchronix)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/asynchronics/asynchronix#license)
## 🎉🥳 NeXosim 0.3.1 is out! 🚀🛰️
See the [changelog](CHANGELOG.md) for a summary of new features, or head to the extensive [API documentation][API] for the details.
[![Cargo](https://img.shields.io/crates/v/nexosim.svg)](https://crates.io/crates/nexosim)
[![Documentation](https://docs.rs/nexosim/badge.svg)](https://docs.rs/nexosim)
[![License](https://img.shields.io/badge/license-MIT%2FApache--2.0-blue.svg)](https://github.com/asynchronics/nexosim#license)
## Overview
Asynchronix is a simulator that leverages asynchronous programming to
NeXosim is a simulator that leverages asynchronous programming to
transparently and efficiently auto-parallelize simulations by means of a custom
multi-threaded executor.
@ -22,7 +28,7 @@ communicating with other models through message passing via connections defined
during bench assembly.
Although the main impetus for its development was the need for simulators able
to handle large cyberphysical systems, Asynchronix is a general-purpose
to handle large cyberphysical systems, NeXosim is a general-purpose
discrete-event simulator expected to be suitable for a wide range of simulation
activities. It draws from experience on spacecraft real-time simulators but
differs from existing tools in the space industry in a number of respects,
@ -35,7 +41,7 @@ including:
2) *developer-friendliness*: an ergonomic API and Rust's support for algebraic
types make it ideal for the "cyber" part in cyberphysical, i.e. for modelling
digital devices with even very complex state machines,
3) *open-source*: last but not least, Asynchronix is distributed under the very
3) *open-source*: last but not least, NeXosim is distributed under the very
permissive MIT and Apache 2 licenses, with the explicit intent to foster an
ecosystem where models can be easily exchanged without reliance on
proprietary APIs.
@ -49,18 +55,19 @@ The [API] documentation is relatively exhaustive and includes a practical
overview which should provide all necessary information to get started.
More fleshed out examples can also be found in the dedicated
[directory](asynchronix/examples).
[simulator](nexosim/examples) and [utilities](nexosim-util/examples)
directories.
[API]: https://docs.rs/asynchronix
[API]: https://docs.rs/nexosim
## Usage
Add this to your `Cargo.toml`:
To use the latest version, add to your `Cargo.toml`:
```toml
[dependencies]
asynchronix = "0.2.2"
nexosim = "0.3.1"
```
@ -72,23 +79,24 @@ asynchronix = "0.2.2"
//
// ┌──────────────┐ ┌──────────────┐
// │ │ │ │
// Input ●─────▶│ multiplier 1 ├─────▶│ multiplier 2 ├─────▶ Output
// Input ●─────►│ multiplier 1 ├─────►│ multiplier 2 ├─────► Output
// │ │ │ │
// └──────────────┘ └──────────────┘
use asynchronix::model::{Model, Output};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{MonotonicTime, Scheduler};
use std::time::Duration;
use nexosim::model::{Context, Model};
use nexosim::ports::{EventSlot, Output};
use nexosim::simulation::{Mailbox, SimInit};
use nexosim::time::MonotonicTime;
// A model that doubles its input and forwards it with a 1s delay.
#[derive(Default)]
pub struct DelayedMultiplier {
pub output: Output<f64>,
}
impl DelayedMultiplier {
pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
scheduler
.schedule_event(Duration::from_secs(1), Self::send, 2.0 * value)
pub fn input(&mut self, value: f64, ctx: &mut Context<Self>) {
ctx.schedule_event(Duration::from_secs(1), Self::send, 2.0 * value)
.unwrap();
}
async fn send(&mut self, value: f64) {
@ -109,33 +117,35 @@ multiplier1
.connect(DelayedMultiplier::input, &multiplier2_mbox);
// Keep handles to the main input and output.
let mut output_slot = multiplier2.output.connect_slot().0;
let mut output_slot = EventSlot::new();
multiplier2.output.connect_sink(&output_slot);
let input_address = multiplier1_mbox.address();
// Instantiate the simulator
let t0 = MonotonicTime::EPOCH; // arbitrary start time
let mut simu = SimInit::new()
.add_model(multiplier1, multiplier1_mbox)
.add_model(multiplier2, multiplier2_mbox)
.init(t0);
.add_model(multiplier1, multiplier1_mbox, "multiplier 1")
.add_model(multiplier2, multiplier2_mbox, "multiplier 2")
.init(t0)?
.0;
// Send a value to the first multiplier.
simu.send_event(DelayedMultiplier::input, 3.5, &input_address);
simu.process_event(DelayedMultiplier::input, 3.5, &input_address)?;
// Advance time to the next event.
simu.step();
simu.step()?;
assert_eq!(simu.time(), t0 + Duration::from_secs(1));
assert_eq!(output_slot.take(), None);
assert_eq!(output_slot.next(), None);
// Advance time to the next event.
simu.step();
simu.step()?;
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
assert_eq!(output_slot.take(), Some(14.0));
assert_eq!(output_slot.next(), Some(14.0));
```
# Implementation notes
Under the hood, Asynchronix is based on an asynchronous implementation of the
Under the hood, NeXosim is based on an asynchronous implementation of the
[actor model][actor_model], where each simulation model is an actor. The
messages actually exchanged between models are `async` closures which capture
the event's or request's value and take the model as `&mut self` argument. The
@ -153,13 +163,13 @@ asynchronous runtimes such as [Tokio][tokio], because the end of a set of
computations is technically a deadlock: the computation completes when all model
have nothing left to do and are blocked on an empty mailbox. Also, instead of
managing a conventional reactor, the runtime manages a priority queue containing
the posted events. For these reasons, Asynchronix relies on a fully custom
the posted events. For these reasons, NeXosim relies on a fully custom
runtime.
Even though the runtime was largely influenced by Tokio, it features additional
optimizations that make its faster than any other multi-threaded Rust executor
on the typically message-passing-heavy workloads seen in discrete-event
simulation (see [benchmark]). Asynchronix also improves over the state of the
simulation (see [benchmark]). NeXosim also improves over the state of the
art with a very fast custom MPSC channel, which performance has been
demonstrated through [Tachyonix][tachyonix], a general-purpose offshoot of this
channel.

View File

@ -1,59 +0,0 @@
[package]
name = "asynchronix"
# When incrementing version and releasing to crates.io:
# - Update crate version in this Cargo.toml
# - Update crate version in README.md
# - Update CHANGELOG.md
# - Update if necessary copyright notice in LICENSE-MIT
# - Create a "vX.Y.Z" git tag
authors = ["Serge Barral <serge.barral@asynchronics.com>"]
version = "0.2.2"
edition = "2021"
rust-version = "1.64"
license = "MIT OR Apache-2.0"
repository = "https://github.com/asynchronics/asynchronix"
readme = "../README.md"
description = """
A high performance asychronous compute framework for system simulation.
"""
categories = ["simulation", "aerospace", "science"]
keywords = ["simulation", "discrete-event", "systems", "cyberphysical", "real-time"]
autotests = false
[features]
serde = ["dep:serde"]
# API-unstable public exports meant for external test/benchmarking; development only.
dev-hooks = []
# Logging of performance-related statistics; development only.
dev-logs = []
[dependencies]
async-event = "0.1"
crossbeam-utils = "0.8"
diatomic-waker = "0.1"
futures-task = "0.3"
multishot = "0.3.2"
num_cpus = "1.13"
pin-project-lite = "0.2"
recycle-box = "0.2"
slab = "0.4"
spin_sleep = "1"
st3 = "0.4"
[dependencies.serde]
version = "1"
optional = true
features = ["derive"]
[target.'cfg(asynchronix_loom)'.dependencies]
loom = "0.5"
waker-fn = "1.1"
[dev-dependencies]
futures-util = "0.3"
futures-channel = "0.3"
futures-executor = "0.3"
[[test]]
name = "integration"
path = "tests/tests.rs"

View File

@ -1,7 +0,0 @@
use super::*;
#[cfg(not(asynchronix_loom))]
mod general;
#[cfg(asynchronix_loom)]
mod loom;

View File

@ -1,140 +0,0 @@
use futures_channel::{mpsc, oneshot};
use futures_util::StreamExt;
use super::*;
/// An object that runs an arbitrary closure when dropped.
struct RunOnDrop<F: FnOnce()> {
drop_fn: Option<F>,
}
impl<F: FnOnce()> RunOnDrop<F> {
/// Creates a new `RunOnDrop`.
fn new(drop_fn: F) -> Self {
Self {
drop_fn: Some(drop_fn),
}
}
}
impl<F: FnOnce()> Drop for RunOnDrop<F> {
fn drop(&mut self) {
self.drop_fn.take().map(|f| f());
}
}
#[test]
fn executor_deadlock() {
const NUM_THREADS: usize = 3;
let (_sender1, receiver1) = oneshot::channel::<()>();
let (_sender2, receiver2) = oneshot::channel::<()>();
let mut executor = Executor::new(NUM_THREADS);
static LAUNCH_COUNT: AtomicUsize = AtomicUsize::new(0);
static COMPLETION_COUNT: AtomicUsize = AtomicUsize::new(0);
executor.spawn_and_forget(async move {
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
let _ = receiver2.await;
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
});
executor.spawn_and_forget(async move {
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
let _ = receiver1.await;
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
});
executor.run();
// Check that the executor returns on deadlock, i.e. none of the task has
// completed.
assert_eq!(LAUNCH_COUNT.load(Ordering::Relaxed), 2);
assert_eq!(COMPLETION_COUNT.load(Ordering::Relaxed), 0);
}
#[test]
fn executor_deadlock_st() {
const NUM_THREADS: usize = 1;
let (_sender1, receiver1) = oneshot::channel::<()>();
let (_sender2, receiver2) = oneshot::channel::<()>();
let mut executor = Executor::new(NUM_THREADS);
static LAUNCH_COUNT: AtomicUsize = AtomicUsize::new(0);
static COMPLETION_COUNT: AtomicUsize = AtomicUsize::new(0);
executor.spawn_and_forget(async move {
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
let _ = receiver2.await;
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
});
executor.spawn_and_forget(async move {
LAUNCH_COUNT.fetch_add(1, Ordering::Relaxed);
let _ = receiver1.await;
COMPLETION_COUNT.fetch_add(1, Ordering::Relaxed);
});
executor.run();
// Check that the executor returnes on deadlock, i.e. none of the task has
// completed.
assert_eq!(LAUNCH_COUNT.load(Ordering::Relaxed), 2);
assert_eq!(COMPLETION_COUNT.load(Ordering::Relaxed), 0);
}
#[test]
fn executor_drop_cycle() {
const NUM_THREADS: usize = 3;
let (sender1, mut receiver1) = mpsc::channel(2);
let (sender2, mut receiver2) = mpsc::channel(2);
let (sender3, mut receiver3) = mpsc::channel(2);
let mut executor = Executor::new(NUM_THREADS);
static DROP_COUNT: AtomicUsize = AtomicUsize::new(0);
// Spawn 3 tasks that wake one another when dropped.
executor.spawn_and_forget({
let mut sender2 = sender2.clone();
let mut sender3 = sender3.clone();
async move {
let _guard = RunOnDrop::new(move || {
let _ = sender2.try_send(());
let _ = sender3.try_send(());
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
});
let _ = receiver1.next().await;
}
});
executor.spawn_and_forget({
let mut sender1 = sender1.clone();
let mut sender3 = sender3.clone();
async move {
let _guard = RunOnDrop::new(move || {
let _ = sender1.try_send(());
let _ = sender3.try_send(());
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
});
let _ = receiver2.next().await;
}
});
executor.spawn_and_forget({
let mut sender1 = sender1.clone();
let mut sender2 = sender2.clone();
async move {
let _guard = RunOnDrop::new(move || {
let _ = sender1.try_send(());
let _ = sender2.try_send(());
DROP_COUNT.fetch_add(1, Ordering::Relaxed);
});
let _ = receiver3.next().await;
}
});
executor.run();
// Make sure that all tasks are eventually dropped even though each task
// wakes the others when dropped.
drop(executor);
assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3);
}

View File

@ -1,25 +0,0 @@
use std::cell::Cell;
use std::sync::Arc;
use super::task::Runnable;
use super::ExecutorContext;
use super::LocalQueue;
/// A local worker with access to global executor resources.
pub(crate) struct Worker {
pub(super) local_queue: LocalQueue,
pub(super) fast_slot: Cell<Option<Runnable>>,
pub(super) executor_context: Arc<ExecutorContext>,
}
impl Worker {
/// Creates a new worker.
pub(super) fn new(local_queue: LocalQueue, executor_context: Arc<ExecutorContext>) -> Self {
Self {
local_queue,
fast_slot: Cell::new(None),
executor_context,
}
}
}

View File

@ -1,253 +0,0 @@
//! Model components.
//!
//! # Model trait
//!
//! Every model must implement the [`Model`] trait. This trait defines an
//! asynchronous initialization method, [`Model::init()`], which main purpose is
//! to enable models to perform specific actions only once all models have been
//! connected and migrated to the simulation, but before the simulation actually
//! starts.
//!
//! #### Examples
//!
//! A model that does not require initialization can simply use the default
//! implementation of the `Model` trait:
//!
//! ```
//! use asynchronix::model::Model;
//!
//! pub struct MyModel {
//! // ...
//! }
//! impl Model for MyModel {}
//! ```
//!
//! Otherwise, a custom `init()` method can be implemented:
//!
//! ```
//! use std::future::Future;
//! use std::pin::Pin;
//!
//! use asynchronix::model::{InitializedModel, Model};
//! use asynchronix::time::Scheduler;
//!
//! pub struct MyModel {
//! // ...
//! }
//! impl Model for MyModel {
//! fn init(
//! mut self,
//! scheduler: &Scheduler<Self>
//! ) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>>{
//! Box::pin(async move {
//! println!("...initialization...");
//!
//! self.into()
//! })
//! }
//! }
//! ```
//!
//! # Events and queries
//!
//! Models can exchange data via *events* and *queries*.
//!
//! Events are send-and-forget messages that can be broadcast from an *output
//! port* to an arbitrary number of *input ports* with a matching event type.
//!
//! Queries actually involve two messages: a *request* that can be broadcast
//! from a *requestor port* to an arbitrary number of *replier ports* with a
//! matching request type, and a *reply* sent in response to such request. The
//! response received by a requestor port is an iterator that yields as many
//! items (replies) as there are connected replier ports.
//!
//!
//! ### Output and requestor ports
//!
//! Output and requestor ports can be added to a model using composition, adding
//! [`Output`] and [`Requestor`] objects as members. They are parametrized by
//! the event, request and reply types.
//!
//! Models are expected to expose their output and requestor ports as public
//! members so they can be connected to input and replier ports when assembling
//! the simulation bench.
//!
//! #### Example
//!
//! ```
//! use asynchronix::model::{Model, Output, Requestor};
//!
//! pub struct MyModel {
//! pub my_output: Output<String>,
//! pub my_requestor: Requestor<u32, bool>,
//! }
//! impl MyModel {
//! // ...
//! }
//! impl Model for MyModel {}
//! ```
//!
//!
//! ### Input and replier ports
//!
//! Input ports and replier ports are methods that implement the [`InputFn`] or
//! [`ReplierFn`] traits with appropriate bounds on their argument and return
//! types.
//!
//! In practice, an input port method for an event of type `T` may have any of
//! the following signatures, where the futures returned by the `async` variants
//! must implement `Send`:
//!
//! ```ignore
//! fn(&mut self) // argument elided, implies `T=()`
//! fn(&mut self, T)
//! fn(&mut self, T, &Scheduler<Self>)
//! async fn(&mut self) // argument elided, implies `T=()`
//! async fn(&mut self, T)
//! async fn(&mut self, T, &Scheduler<Self>)
//! where
//! Self: Model,
//! T: Clone + Send + 'static,
//! R: Send + 'static,
//! ```
//!
//! The scheduler argument is useful for methods that need access to the
//! simulation time or that need to schedule an action at a future date.
//!
//! A replier port for a request of type `T` with a reply of type `R` may in
//! turn have any of the following signatures, where the futures must implement
//! `Send`:
//!
//! ```ignore
//! async fn(&mut self) -> R // argument elided, implies `T=()`
//! async fn(&mut self, T) -> R
//! async fn(&mut self, T, &Scheduler<Self>) -> R
//! where
//! Self: Model,
//! T: Clone + Send + 'static,
//! R: Send + 'static,
//! ```
//!
//! Output and replier ports will normally be exposed as public methods so they
//! can be connected to input and requestor ports when assembling the simulation
//! bench. However, input ports may instead be defined as private methods if
//! they are only used by the model itself to schedule future actions (see the
//! [`Scheduler`](crate::time::Scheduler) examples).
//!
//! Changing the signature of an input or replier port is not considered to
//! alter the public interface of a model provided that the event, request and
//! reply types remain the same.
//!
//! #### Example
//!
//! ```
//! use asynchronix::model::Model;
//! use asynchronix::time::Scheduler;
//!
//! pub struct MyModel {
//! // ...
//! }
//! impl MyModel {
//! pub fn my_input(&mut self, input: String, scheduler: &Scheduler<Self>) {
//! // ...
//! }
//! pub async fn my_replier(&mut self, request: u32) -> bool { // scheduler argument elided
//! // ...
//! # unimplemented!()
//! }
//! }
//! impl Model for MyModel {}
//! ```
//!
use std::future::Future;
use std::pin::Pin;
use crate::time::Scheduler;
pub use model_fn::{InputFn, ReplierFn};
pub use ports::{LineError, LineId, Output, Requestor};
pub mod markers;
mod model_fn;
mod ports;
/// Trait to be implemented by all models.
///
/// This trait enables models to perform specific actions in the
/// [`Model::init()`] method only once all models have been connected and
/// migrated to the simulation bench, but before the simulation actually starts.
/// A common use for `init` is to send messages to connected models at the
/// beginning of the simulation.
///
/// The `init` function converts the model to the opaque `InitializedModel` type
/// to prevent an already initialized model from being added to the simulation
/// bench.
pub trait Model: Sized + Send + 'static {
/// Performs asynchronous model initialization.
///
/// This asynchronous method is executed exactly once for all models of the
/// simulation when the
/// [`SimInit::init()`](crate::simulation::SimInit::init) method is called.
///
/// The default implementation simply converts the model to an
/// `InitializedModel` without any side effect.
///
/// *Note*: it is currently necessary to box the returned future; this
/// limitation will be lifted once Rust supports `async` methods in traits.
///
/// # Examples
///
/// ```
/// use std::future::Future;
/// use std::pin::Pin;
///
/// use asynchronix::model::{InitializedModel, Model};
/// use asynchronix::time::Scheduler;
///
/// pub struct MyModel {
/// // ...
/// }
///
/// impl Model for MyModel {
/// fn init(
/// self,
/// scheduler: &Scheduler<Self>
/// ) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>>{
/// Box::pin(async move {
/// println!("...initialization...");
///
/// self.into()
/// })
/// }
/// }
/// ```
// Removing the boxing constraint requires the
// `return_position_impl_trait_in_trait` and `async_fn_in_trait` features.
// Tracking issue: <https://github.com/rust-lang/rust/issues/91611>.
fn init(
self,
scheduler: &Scheduler<Self>,
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
Box::pin(async move {
let _ = scheduler; // suppress the unused argument warning
self.into()
})
}
}
/// Opaque type containing an initialized model.
///
/// A model can be converted to an `InitializedModel` using the `Into`/`From`
/// traits. The implementation of the simulation guarantees that the
/// [`Model::init()`] method will never be called on a model after conversion to
/// an `InitializedModel`.
#[derive(Debug)]
pub struct InitializedModel<M: Model>(pub(crate) M);
impl<M: Model> From<M> for InitializedModel<M> {
fn from(model: M) -> Self {
InitializedModel(model)
}
}

View File

@ -1,218 +0,0 @@
//! Model ports for event and query broadcasting.
//!
//! Models typically contain [`Output`] and/or [`Requestor`] ports, exposed as
//! public member variables. Output ports broadcast events to all connected
//! input ports, while requestor ports broadcast queries to, and retrieve
//! replies from, all connected replier ports.
//!
//! On the surface, output and requestor ports only differ in that sending a
//! query from a requestor port also returns an iterator over the replies from
//! all connected ports. Sending a query is more costly, however, because of the
//! need to wait until all connected models have processed the query. In
//! contrast, since events are buffered in the mailbox of the target model,
//! sending an event is a fire-and-forget operation. For this reason, output
//! ports should generally be preferred over requestor ports when possible.
use std::fmt;
use std::sync::{Arc, Mutex};
mod broadcaster;
mod sender;
use crate::model::{InputFn, Model, ReplierFn};
use crate::simulation::{Address, EventSlot, EventStream};
use crate::util::spsc_queue;
use broadcaster::Broadcaster;
use self::sender::{EventSender, EventSlotSender, EventStreamSender, QuerySender};
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
/// Unique identifier for a connection between two ports.
pub struct LineId(u64);
/// An output port.
///
/// `Output` ports can be connected to input ports, i.e. to asynchronous model
/// methods that return no value. They broadcast events to all connected input
/// ports.
pub struct Output<T: Clone + Send + 'static> {
broadcaster: Broadcaster<T, ()>,
next_line_id: u64,
}
impl<T: Clone + Send + 'static> Output<T> {
/// Creates a new, disconnected `Output` port.
pub fn new() -> Self {
Self::default()
}
/// Adds a connection to an input port of the model specified by the
/// address.
///
/// The input port must be an asynchronous method of a model of type `M`
/// taking as argument a value of type `T` plus, optionally, a scheduler
/// reference.
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>) -> LineId
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Copy,
S: Send + 'static,
{
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let sender = Box::new(EventSender::new(input, address.into().0));
self.broadcaster.add(sender, line_id);
line_id
}
/// Adds a connection to an event stream iterator.
pub fn connect_stream(&mut self) -> (EventStream<T>, LineId) {
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let (producer, consumer) = spsc_queue::spsc_queue();
let sender = Box::new(EventStreamSender::new(producer));
let event_stream = EventStream::new(consumer);
self.broadcaster.add(sender, line_id);
(event_stream, line_id)
}
/// Adds a connection to an event slot.
pub fn connect_slot(&mut self) -> (EventSlot<T>, LineId) {
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let slot = Arc::new(Mutex::new(None));
let sender = Box::new(EventSlotSender::new(slot.clone()));
let event_slot = EventSlot::new(slot);
self.broadcaster.add(sender, line_id);
(event_slot, line_id)
}
/// Removes the connection specified by the `LineId` parameter.
///
/// It is a logic error to specify a line identifier from another [`Output`]
/// or [`Requestor`] instance and may result in the disconnection of an
/// arbitrary endpoint.
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
if self.broadcaster.remove(line_id) {
Ok(())
} else {
Err(LineError {})
}
}
/// Removes all connections.
pub fn disconnect_all(&mut self) {
self.broadcaster.clear();
}
/// Broadcasts an event to all connected input ports.
pub async fn send(&mut self, arg: T) {
self.broadcaster.broadcast_event(arg).await.unwrap();
}
}
impl<T: Clone + Send + 'static> Default for Output<T> {
fn default() -> Self {
Self {
broadcaster: Broadcaster::default(),
next_line_id: 0,
}
}
}
impl<T: Clone + Send + 'static> fmt::Debug for Output<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Output ({} connected ports)", self.broadcaster.len())
}
}
/// A requestor port.
///
/// `Requestor` ports can be connected to replier ports, i.e. to asynchronous
/// model methods that return a value. They broadcast queries to all connected
/// replier ports.
pub struct Requestor<T: Clone + Send + 'static, R: Send + 'static> {
broadcaster: Broadcaster<T, R>,
next_line_id: u64,
}
impl<T: Clone + Send + 'static, R: Send + 'static> Requestor<T, R> {
/// Creates a new, disconnected `Requestor` port.
pub fn new() -> Self {
Self::default()
}
/// Adds a connection to a replier port of the model specified by the
/// address.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of type `R` and taking as argument a value of type `T`
/// plus, optionally, a scheduler reference.
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>) -> LineId
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Copy,
S: Send + 'static,
{
assert!(self.next_line_id != u64::MAX);
let line_id = LineId(self.next_line_id);
self.next_line_id += 1;
let sender = Box::new(QuerySender::new(replier, address.into().0));
self.broadcaster.add(sender, line_id);
line_id
}
/// Removes the connection specified by the `LineId` parameter.
///
/// It is a logic error to specify a line identifier from another [`Output`]
/// or [`Requestor`] instance and may result in the disconnection of an
/// arbitrary endpoint.
pub fn disconnect(&mut self, line_id: LineId) -> Result<(), LineError> {
if self.broadcaster.remove(line_id) {
Ok(())
} else {
Err(LineError {})
}
}
/// Removes all connections.
pub fn disconnect_all(&mut self) {
self.broadcaster.clear();
}
/// Broadcasts a query to all connected replier ports.
pub async fn send(&mut self, arg: T) -> impl Iterator<Item = R> + '_ {
self.broadcaster.broadcast_query(arg).await.unwrap()
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> Default for Requestor<T, R> {
fn default() -> Self {
Self {
broadcaster: Broadcaster::default(),
next_line_id: 0,
}
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for Requestor<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Requestor ({} connected ports)", self.broadcaster.len())
}
}
/// Error raised when the specified line cannot be found.
#[derive(Copy, Clone, Debug)]
pub struct LineError {}

View File

@ -1,743 +0,0 @@
use std::future::Future;
use std::mem::ManuallyDrop;
use std::pin::Pin;
use std::task::{Context, Poll};
use diatomic_waker::WakeSink;
use recycle_box::{coerce_box, RecycleBox};
use super::sender::{SendError, Sender};
use super::LineId;
use task_set::TaskSet;
mod task_set;
/// An object that can efficiently broadcast messages to several addresses.
///
/// This object maintains a list of senders associated to each target address.
/// When a message is broadcasted, the sender futures are awaited in parallel.
/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate
/// does, but with some key differences:
///
/// - tasks and future storage are reusable to avoid repeated allocation, so
/// allocation occurs only after a new sender is added,
/// - the outputs of all sender futures are returned all at once rather than
/// with an asynchronous iterator (a.k.a. async stream); the implementation
/// exploits this behavior by waking the main broadcast future only when all
/// sender futures have been awaken, which strongly reduces overhead since
/// waking a sender task does not actually schedule it on the executor.
pub(super) struct Broadcaster<T: Clone + 'static, R: 'static> {
/// The list of senders with their associated line identifier.
senders: Vec<(LineId, Box<dyn Sender<T, R>>)>,
/// Fields explicitly borrowed by the `BroadcastFuture`.
shared: Shared<R>,
}
impl<T: Clone + 'static> Broadcaster<T, ()> {
/// Broadcasts an event to all addresses.
pub(super) async fn broadcast_event(&mut self, arg: T) -> Result<(), BroadcastError> {
match self.senders.as_mut_slice() {
// No sender.
[] => Ok(()),
// One sender.
[sender] => sender.1.send(arg).await.map_err(|_| BroadcastError {}),
// Multiple senders.
_ => self.broadcast(arg).await,
}
}
}
impl<T: Clone + 'static, R> Broadcaster<T, R> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1`.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>, id: LineId) {
self.senders.push((id, sender));
self.shared.futures_env.push(FutureEnv {
storage: None,
output: None,
});
self.shared.task_set.resize(self.senders.len());
}
/// Removes the first sender with the specified identifier, if any.
///
/// Returns `true` if there was indeed a sender associated to the specified
/// identifier.
pub(super) fn remove(&mut self, id: LineId) -> bool {
if let Some(pos) = self.senders.iter().position(|s| s.0 == id) {
self.senders.swap_remove(pos);
self.shared.futures_env.swap_remove(pos);
self.shared.task_set.resize(self.senders.len());
return true;
}
false
}
/// Removes all senders.
pub(super) fn clear(&mut self) {
self.senders.clear();
self.shared.futures_env.clear();
self.shared.task_set.resize(0);
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.senders.len()
}
/// Broadcasts a query to all addresses and collect all responses.
pub(super) async fn broadcast_query(
&mut self,
arg: T,
) -> Result<impl Iterator<Item = R> + '_, BroadcastError> {
match self.senders.as_mut_slice() {
// No sender.
[] => {}
// One sender.
[sender] => {
let output = sender.1.send(arg).await.map_err(|_| BroadcastError {})?;
self.shared.futures_env[0].output = Some(output);
}
// Multiple senders.
_ => self.broadcast(arg).await?,
};
// At this point all outputs should be available so `unwrap` can be
// called on the output of each future.
let outputs = self
.shared
.futures_env
.iter_mut()
.map(|t| t.output.take().unwrap());
Ok(outputs)
}
/// Efficiently broadcasts a message or a query to multiple addresses.
///
/// This method does not collect the responses from queries.
fn broadcast(&mut self, arg: T) -> BroadcastFuture<'_, R> {
let futures_count = self.senders.len();
let mut futures = recycle_vec(self.shared.storage.take().unwrap_or_default());
// Broadcast the message and collect all futures.
for (i, (sender, futures_env)) in self
.senders
.iter_mut()
.zip(self.shared.futures_env.iter_mut())
.enumerate()
{
let future_cache = futures_env
.storage
.take()
.unwrap_or_else(|| RecycleBox::new(()));
// Move the argument rather than clone it for the last future.
if i + 1 == futures_count {
let future: RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + '_> =
coerce_box!(RecycleBox::recycle(future_cache, sender.1.send(arg)));
futures.push(RecycleBox::into_pin(future));
break;
}
let future: RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + '_> = coerce_box!(
RecycleBox::recycle(future_cache, sender.1.send(arg.clone()))
);
futures.push(RecycleBox::into_pin(future));
}
// Generate the global future.
BroadcastFuture::new(&mut self.shared, futures)
}
}
impl<T: Clone + 'static, R> Default for Broadcaster<T, R> {
/// Creates an empty `Broadcaster` object.
fn default() -> Self {
let wake_sink = WakeSink::new();
let wake_src = wake_sink.source();
Self {
senders: Vec::new(),
shared: Shared {
wake_sink,
task_set: TaskSet::new(wake_src),
futures_env: Vec::new(),
storage: None,
},
}
}
}
/// Data related to a sender future.
struct FutureEnv<R> {
/// Cached storage for the future.
storage: Option<RecycleBox<()>>,
/// Output of the associated future.
output: Option<R>,
}
/// A type-erased `Send` future wrapped in a `RecycleBox`.
type RecycleBoxFuture<'a, R> = RecycleBox<dyn Future<Output = Result<R, SendError>> + Send + 'a>;
/// Fields of `Broadcaster` that are explicitly borrowed by a `BroadcastFuture`.
struct Shared<R> {
/// Thread-safe waker handle.
wake_sink: WakeSink,
/// Tasks associated to the sender futures.
task_set: TaskSet,
/// Data related to the sender futures.
futures_env: Vec<FutureEnv<R>>,
/// Cached storage for the sender futures.
///
/// When it exists, the cached storage is always an empty vector but it
/// typically has a non-zero capacity. Its purpose is to reuse the
/// previously allocated capacity when creating new sender futures.
storage: Option<Vec<Pin<RecycleBoxFuture<'static, R>>>>,
}
/// A future aggregating the outputs of a collection of sender futures.
///
/// The idea is to join all sender futures as efficiently as possible, meaning:
///
/// - the sender futures are polled simultaneously rather than waiting for their
/// completion in a sequential manner,
/// - this future is never woken if it can be proven that at least one of the
/// individual sender task will still be awaken,
/// - the storage allocated for the sender futures is always returned to the
/// `Broadcast` object so it can be reused by the next future,
/// - the happy path (all futures immediately ready) is very fast.
pub(super) struct BroadcastFuture<'a, R> {
/// Reference to the shared fields of the `Broadcast` object.
shared: &'a mut Shared<R>,
/// List of all send futures.
futures: ManuallyDrop<Vec<Pin<RecycleBoxFuture<'a, R>>>>,
/// The total count of futures that have not yet been polled to completion.
pending_futures_count: usize,
/// State of completion of the future.
state: FutureState,
}
impl<'a, R> BroadcastFuture<'a, R> {
/// Creates a new `BroadcastFuture`.
fn new(shared: &'a mut Shared<R>, futures: Vec<Pin<RecycleBoxFuture<'a, R>>>) -> Self {
let futures_count = futures.len();
assert!(shared.futures_env.len() == futures_count);
for futures_env in shared.futures_env.iter_mut() {
// Drop the previous output if necessary.
futures_env.output.take();
}
BroadcastFuture {
shared,
futures: ManuallyDrop::new(futures),
state: FutureState::Uninit,
pending_futures_count: futures_count,
}
}
}
impl<'a, R> Drop for BroadcastFuture<'a, R> {
fn drop(&mut self) {
// Safety: this is safe since `self.futures` is never accessed after it
// is moved out.
let mut futures = unsafe { ManuallyDrop::take(&mut self.futures) };
// Recycle the future-containing boxes.
for (future, futures_env) in futures.drain(..).zip(self.shared.futures_env.iter_mut()) {
futures_env.storage = Some(RecycleBox::vacate_pinned(future));
}
// Recycle the vector that contained the futures.
self.shared.storage = Some(recycle_vec(futures));
}
}
impl<'a, R> Future for BroadcastFuture<'a, R> {
type Output = Result<(), BroadcastError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = &mut *self;
assert_ne!(this.state, FutureState::Completed);
// Poll all sender futures once if this is the first time the broadcast
// future is polled.
if this.state == FutureState::Uninit {
// Prevent spurious wake-ups.
this.shared.task_set.discard_scheduled();
for task_idx in 0..this.futures.len() {
let future_env = &mut this.shared.futures_env[task_idx];
let future = &mut this.futures[task_idx];
let task_waker_ref = this.shared.task_set.waker_of(task_idx);
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
match future.as_mut().poll(task_cx_ref) {
Poll::Ready(Ok(output)) => {
future_env.output = Some(output);
this.pending_futures_count -= 1;
}
Poll::Ready(Err(_)) => {
this.state = FutureState::Completed;
return Poll::Ready(Err(BroadcastError {}));
}
Poll::Pending => {}
}
}
if this.pending_futures_count == 0 {
this.state = FutureState::Completed;
return Poll::Ready(Ok(()));
}
this.state = FutureState::Pending;
}
// Repeatedly poll the futures of all scheduled tasks until there are no
// more scheduled tasks.
loop {
// Only register the waker if it is probable that we won't find any
// scheduled task.
if !this.shared.task_set.has_scheduled() {
this.shared.wake_sink.register(cx.waker());
}
// Retrieve the indices of the scheduled tasks if any. If there are
// no scheduled tasks, `Poll::Pending` is returned and this future
// will be awaken again when enough tasks have been scheduled.
let scheduled_tasks = match this
.shared
.task_set
.steal_scheduled(this.pending_futures_count)
{
Some(st) => st,
None => return Poll::Pending,
};
for task_idx in scheduled_tasks {
let future_env = &mut this.shared.futures_env[task_idx];
// Do not poll completed futures.
if future_env.output.is_some() {
continue;
}
let future = &mut this.futures[task_idx];
let task_waker_ref = this.shared.task_set.waker_of(task_idx);
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
match future.as_mut().poll(task_cx_ref) {
Poll::Ready(Ok(output)) => {
future_env.output = Some(output);
this.pending_futures_count -= 1;
}
Poll::Ready(Err(_)) => {
this.state = FutureState::Completed;
return Poll::Ready(Err(BroadcastError {}));
}
Poll::Pending => {}
}
}
if this.pending_futures_count == 0 {
this.state = FutureState::Completed;
return Poll::Ready(Ok(()));
}
}
}
}
/// Error returned when a message could not be delivered.
#[derive(Debug)]
pub(super) struct BroadcastError {}
#[derive(Debug, PartialEq)]
enum FutureState {
Uninit,
Pending,
Completed,
}
/// Drops all items in a vector and returns an empty vector of another type,
/// preserving the allocation and capacity of the original vector provided that
/// the layouts of `T` and `U` are compatible.
///
/// # Panics
///
/// This will panic in debug mode if the layouts are incompatible.
fn recycle_vec<T, U>(mut v: Vec<T>) -> Vec<U> {
debug_assert_eq!(
std::alloc::Layout::new::<T>(),
std::alloc::Layout::new::<U>()
);
let cap = v.capacity();
// No unsafe here: this just relies on an optimization in the `collect`
// method.
v.clear();
let v_out: Vec<U> = v.into_iter().map(|_| unreachable!()).collect();
debug_assert_eq!(v_out.capacity(), cap);
v_out
}
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::thread;
use futures_executor::block_on;
use crate::channel::Receiver;
use crate::time::Scheduler;
use crate::time::{MonotonicTime, TearableAtomicTime};
use crate::util::priority_queue::PriorityQueue;
use crate::util::sync_cell::SyncCell;
use super::super::*;
use super::*;
struct Counter {
inner: Arc<AtomicUsize>,
}
impl Counter {
fn new(counter: Arc<AtomicUsize>) -> Self {
Self { inner: counter }
}
async fn inc(&mut self, by: usize) {
self.inner.fetch_add(by, Ordering::Relaxed);
}
async fn fetch_inc(&mut self, by: usize) -> usize {
let res = self.inner.fetch_add(by, Ordering::Relaxed);
res
}
}
impl Model for Counter {}
#[test]
fn broadcast_event_smoke() {
const N_RECV: usize = 4;
let mut mailboxes = Vec::new();
let mut broadcaster = Broadcaster::default();
for id in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(EventSender::new(Counter::inc, address));
broadcaster.add(sender, LineId(id as u64));
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
block_on(broadcaster.broadcast_event(1)).unwrap();
});
let counter = Arc::new(AtomicUsize::new(0));
let th_recv: Vec<_> = mailboxes
.into_iter()
.map(|mut mailbox| {
thread::spawn({
let mut counter = Counter::new(counter.clone());
move || {
let dummy_address = Receiver::new(1).sender();
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
let dummy_time =
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
let dummy_scheduler =
Scheduler::new(dummy_address, dummy_priority_queue, dummy_time);
block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap();
}
})
})
.collect();
th_broadcast.join().unwrap();
for th in th_recv {
th.join().unwrap();
}
assert_eq!(counter.load(Ordering::Relaxed), N_RECV);
}
#[test]
fn broadcast_query_smoke() {
const N_RECV: usize = 4;
let mut mailboxes = Vec::new();
let mut broadcaster = Broadcaster::default();
for id in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(QuerySender::new(Counter::fetch_inc, address));
broadcaster.add(sender, LineId(id as u64));
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
let iter = block_on(broadcaster.broadcast_query(1)).unwrap();
let sum = iter.fold(0, |acc, val| acc + val);
assert_eq!(sum, N_RECV * (N_RECV - 1) / 2); // sum of {0, 1, 2, ..., (N_RECV - 1)}
});
let counter = Arc::new(AtomicUsize::new(0));
let th_recv: Vec<_> = mailboxes
.into_iter()
.map(|mut mailbox| {
thread::spawn({
let mut counter = Counter::new(counter.clone());
move || {
let dummy_address = Receiver::new(1).sender();
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
let dummy_time =
SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
let dummy_scheduler =
Scheduler::new(dummy_address, dummy_priority_queue, dummy_time);
block_on(mailbox.recv(&mut counter, &dummy_scheduler)).unwrap();
thread::sleep(std::time::Duration::from_millis(100));
}
})
})
.collect();
th_broadcast.join().unwrap();
for th in th_recv {
th.join().unwrap();
}
assert_eq!(counter.load(Ordering::Relaxed), N_RECV);
}
}
#[cfg(all(test, asynchronix_loom))]
mod tests {
use futures_channel::mpsc;
use futures_util::StreamExt;
use loom::model::Builder;
use loom::sync::atomic::{AtomicBool, Ordering};
use loom::thread;
use waker_fn::waker_fn;
use super::super::sender::RecycledFuture;
use super::*;
// An event that may be waken spuriously.
struct TestEvent<R> {
receiver: mpsc::UnboundedReceiver<Option<R>>,
fut_storage: Option<RecycleBox<()>>,
}
impl<R: Send> Sender<(), R> for TestEvent<R> {
fn send(&mut self, _arg: ()) -> RecycledFuture<'_, Result<R, SendError>> {
let fut_storage = &mut self.fut_storage;
let receiver = &mut self.receiver;
RecycledFuture::new(fut_storage, async {
let mut stream = Box::pin(receiver.filter_map(|item| async { item }));
Ok(stream.next().await.unwrap())
})
}
}
// An object that can wake a `TestEvent`.
#[derive(Clone)]
struct TestEventWaker<R> {
sender: mpsc::UnboundedSender<Option<R>>,
}
impl<R> TestEventWaker<R> {
fn wake_spurious(&self) {
let _ = self.sender.unbounded_send(None);
}
fn wake_final(&self, value: R) {
let _ = self.sender.unbounded_send(Some(value));
}
}
fn test_event<R>() -> (TestEvent<R>, TestEventWaker<R>) {
let (sender, receiver) = mpsc::unbounded();
(
TestEvent {
receiver,
fut_storage: None,
},
TestEventWaker { sender },
)
}
#[test]
fn loom_broadcast_basic() {
const DEFAULT_PREEMPTION_BOUND: usize = 3;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (test_event1, waker1) = test_event::<usize>();
let (test_event2, waker2) = test_event::<usize>();
let (test_event3, waker3) = test_event::<usize>();
let mut broadcaster = Broadcaster::default();
broadcaster.add(Box::new(test_event1), LineId(1));
broadcaster.add(Box::new(test_event2), LineId(2));
broadcaster.add(Box::new(test_event3), LineId(3));
let mut fut = Box::pin(broadcaster.broadcast_query(()));
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
let is_scheduled_waker = is_scheduled.clone();
let waker = waker_fn(move || {
// We use swap rather than a plain store to work around this
// bug: <https://github.com/tokio-rs/loom/issues/254>
is_scheduled_waker.swap(true, Ordering::Release);
});
let mut cx = Context::from_waker(&waker);
let th1 = thread::spawn(move || waker1.wake_final(3));
let th2 = thread::spawn(move || waker2.wake_final(7));
let th3 = thread::spawn(move || waker3.wake_final(42));
let mut schedule_count = 0;
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), Some(42));
assert_eq!(res.next(), None);
return;
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => {}
}
// If the task has not been scheduled, exit the polling loop.
if !is_scheduled.swap(false, Ordering::Acquire) {
break;
}
schedule_count += 1;
assert!(schedule_count <= 1);
}
th1.join().unwrap();
th2.join().unwrap();
th3.join().unwrap();
assert!(is_scheduled.load(Ordering::Acquire));
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), Some(42));
assert_eq!(res.next(), None);
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => panic!("the future has not completed"),
};
});
}
#[test]
fn loom_broadcast_spurious() {
const DEFAULT_PREEMPTION_BOUND: usize = 3;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (test_event1, waker1) = test_event::<usize>();
let (test_event2, waker2) = test_event::<usize>();
let mut broadcaster = Broadcaster::default();
broadcaster.add(Box::new(test_event1), LineId(1));
broadcaster.add(Box::new(test_event2), LineId(2));
let mut fut = Box::pin(broadcaster.broadcast_query(()));
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
let is_scheduled_waker = is_scheduled.clone();
let waker = waker_fn(move || {
// We use swap rather than a plain store to work around this
// bug: <https://github.com/tokio-rs/loom/issues/254>
is_scheduled_waker.swap(true, Ordering::Release);
});
let mut cx = Context::from_waker(&waker);
let spurious_waker = waker1.clone();
let th1 = thread::spawn(move || waker1.wake_final(3));
let th2 = thread::spawn(move || waker2.wake_final(7));
let th_spurious = thread::spawn(move || spurious_waker.wake_spurious());
let mut schedule_count = 0;
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), None);
return;
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => {}
}
// If the task has not been scheduled, exit the polling loop.
if !is_scheduled.swap(false, Ordering::Acquire) {
break;
}
schedule_count += 1;
assert!(schedule_count <= 2);
}
th1.join().unwrap();
th2.join().unwrap();
th_spurious.join().unwrap();
assert!(is_scheduled.load(Ordering::Acquire));
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), None);
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => panic!("the future has not completed"),
};
});
}
}

View File

@ -1,245 +0,0 @@
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::mem::ManuallyDrop;
use std::pin::Pin;
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
use recycle_box::{coerce_box, RecycleBox};
use crate::channel;
use crate::model::{InputFn, Model, ReplierFn};
use crate::util::spsc_queue;
/// Abstraction over `EventSender` and `QuerySender`.
pub(super) trait Sender<T, R>: Send {
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>>;
}
/// An object that can send a payload to a model.
pub(super) struct EventSender<M: 'static, F, T, S> {
func: F,
sender: channel::Sender<M>,
fut_storage: Option<RecycleBox<()>>,
_phantom_closure: PhantomData<fn(&mut M, T)>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M: Send, F, T, S> EventSender<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + 'static,
{
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
Self {
func,
sender,
fut_storage: None,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M: Send, F, T, S> Sender<T, ()> for EventSender<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Copy,
T: Send + 'static,
S: Send,
{
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
let func = self.func;
let fut = self.sender.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
});
RecycledFuture::new(&mut self.fut_storage, async move {
fut.await.map_err(|_| SendError {})
})
}
}
/// An object that can send a payload to a model and retrieve a response.
pub(super) struct QuerySender<M: 'static, F, T, R, S> {
func: F,
sender: channel::Sender<M>,
receiver: multishot::Receiver<R>,
fut_storage: Option<RecycleBox<()>>,
_phantom_closure: PhantomData<fn(&mut M, T) -> R>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, F, T, R, S> QuerySender<M, F, T, R, S>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S>,
T: Send + 'static,
R: Send + 'static,
{
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
Self {
func,
sender,
receiver: multishot::Receiver::new(),
fut_storage: None,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, F, T, R, S> Sender<T, R> for QuerySender<M, F, T, R, S>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Copy,
T: Send + 'static,
R: Send + 'static,
S: Send,
{
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<R, SendError>> {
let func = self.func;
let sender = &mut self.sender;
let reply_receiver = &mut self.receiver;
let fut_storage = &mut self.fut_storage;
// The previous future generated by this method should have been polled
// to completion so a new sender should be readily available.
let reply_sender = reply_receiver.sender().unwrap();
let send_fut = sender.send(move |model, scheduler, recycle_box| {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
reply_sender.send(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
});
RecycledFuture::new(fut_storage, async move {
// Send the message.
send_fut.await.map_err(|_| SendError {})?;
// Wait until the message is processed and the reply is sent back.
// If an error is received, it most likely means the mailbox was
// dropped before the message was processed.
reply_receiver.recv().await.map_err(|_| SendError {})
})
}
}
/// An object that can send a payload to an unbounded queue.
pub(super) struct EventStreamSender<T> {
producer: spsc_queue::Producer<T>,
fut_storage: Option<RecycleBox<()>>,
}
impl<T> EventStreamSender<T> {
pub(super) fn new(producer: spsc_queue::Producer<T>) -> Self {
Self {
producer,
fut_storage: None,
}
}
}
impl<T> Sender<T, ()> for EventStreamSender<T>
where
T: Send + 'static,
{
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
let producer = &mut self.producer;
RecycledFuture::new(&mut self.fut_storage, async move {
producer.push(arg).map_err(|_| SendError {})
})
}
}
/// An object that can send a payload to a mutex-protected slot.
pub(super) struct EventSlotSender<T> {
slot: Arc<Mutex<Option<T>>>,
fut_storage: Option<RecycleBox<()>>,
}
impl<T> EventSlotSender<T> {
pub(super) fn new(slot: Arc<Mutex<Option<T>>>) -> Self {
Self {
slot,
fut_storage: None,
}
}
}
impl<T> Sender<T, ()> for EventSlotSender<T>
where
T: Send + 'static,
{
fn send(&mut self, arg: T) -> RecycledFuture<'_, Result<(), SendError>> {
let slot = &*self.slot;
RecycledFuture::new(&mut self.fut_storage, async move {
let mut slot = slot.lock().unwrap();
*slot = Some(arg);
Ok(())
})
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
/// Error returned when the mailbox was closed or dropped.
pub(super) struct SendError {}
impl fmt::Display for SendError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "sending message into a closed mailbox")
}
}
impl Error for SendError {}
pub(super) struct RecycledFuture<'a, T> {
fut: ManuallyDrop<Pin<RecycleBox<dyn Future<Output = T> + Send + 'a>>>,
lender_box: &'a mut Option<RecycleBox<()>>,
}
impl<'a, T> RecycledFuture<'a, T> {
pub(super) fn new<F: Future<Output = T> + Send + 'a>(
lender_box: &'a mut Option<RecycleBox<()>>,
fut: F,
) -> Self {
let vacated_box = lender_box.take().unwrap_or_else(|| RecycleBox::new(()));
let fut: RecycleBox<dyn Future<Output = T> + Send + 'a> =
coerce_box!(RecycleBox::recycle(vacated_box, fut));
Self {
fut: ManuallyDrop::new(RecycleBox::into_pin(fut)),
lender_box,
}
}
}
impl<'a, T> Drop for RecycledFuture<'a, T> {
fn drop(&mut self) {
// Return the box to the lender.
//
// Safety: taking the `fut` member is safe since it is never used again.
*self.lender_box = Some(RecycleBox::vacate_pinned(unsafe {
ManuallyDrop::take(&mut self.fut)
}));
}
}
impl<'a, T> Future for RecycledFuture<'a, T> {
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.fut.as_mut().poll(cx)
}
}

View File

@ -1,635 +0,0 @@
//! Discrete-event simulation management.
//!
//! This module contains most notably the [`Simulation`] environment, the
//! [`SimInit`] simulation builder, the [`Mailbox`] and [`Address`] types as
//! well as miscellaneous other types related to simulation management.
//!
//! # Simulation lifecycle
//!
//! The lifecycle of a simulation bench typically comprises the following
//! stages:
//!
//! 1. instantiation of models and their [`Mailbox`]es,
//! 2. connection of the models' output/requestor ports to input/replier ports
//! using the [`Address`]es of the target models,
//! 3. instantiation of a [`SimInit`] simulation builder and migration of all
//! models and mailboxes to the builder with [`SimInit::add_model()`],
//! 4. initialization of a [`Simulation`] instance with [`SimInit::init()`] or
//! [`SimInit::init_with_clock()`],
//! 5. discrete-time simulation, which typically involves scheduling events and
//! incrementing simulation time while observing the models outputs.
//!
//! Most information necessary to run a simulation is available in the root
//! crate [documentation](crate) and in the [`SimInit`] and [`Simulation`]
//! documentation. The next section complement this information with a set of
//! practical recommendations that can help run and troubleshoot simulations.
//!
//! # Practical considerations
//!
//! ## Mailbox capacity
//!
//! A [`Mailbox`] is a buffer that store incoming events and queries for a
//! single model instance. Mailboxes have a bounded capacity, which defaults to
//! [`Mailbox::DEFAULT_CAPACITY`].
//!
//! The capacity is a trade-off: too large a capacity may lead to excessive
//! memory usage, whereas too small a capacity can hamper performance and
//! increase the likelihood of deadlocks (see next section). Note that, because
//! a mailbox may receive events or queries of various sizes, it is actually the
//! largest message sent that ultimately determines the amount of allocated
//! memory.
//!
//! The default capacity should prove a reasonable trade-off in most cases, but
//! for situations where it is not appropriate, it is possible to instantiate
//! mailboxes with a custom capacity by using [`Mailbox::with_capacity()`]
//! instead of [`Mailbox::new()`].
//!
//! ## Avoiding deadlocks
//!
//! While the underlying architecture of Asynchronix—the actor model—should
//! prevent most race conditions (including obviously data races which are not
//! possible in safe Rust) it is still possible in theory to generate deadlocks.
//! Though rare in practice, these may occur due to one of the below:
//!
//! 1. *query loopback*: if a model sends a query which is further forwarded by
//! other models until it loops back to the initial model, that model would
//! in effect wait for its own response and block,
//! 2. *mailbox saturation*: if several models concurrently send to one another
//! a very large number of messages in succession, these models may end up
//! saturating all mailboxes, at which point they will wait for the other's
//! mailboxes to free space so they can send the next message, eventually
//! preventing all of them to make further progress.
//!
//! The first scenario is usually very easy to avoid and is typically the result
//! of an improper assembly of models. Because requestor ports are only used
//! sparingly in idiomatic simulations, this situation should be relatively
//! exceptional.
//!
//! The second scenario is rare in well-behaving models and if it occurs, it is
//! most typically at the very beginning of a simulation when all models
//! simultaneously send events during the call to
//! [`Model::init()`](crate::model::Model::init). If such a large amount of
//! concurrent messages is deemed normal behavior, the issue can be readily
//! remedied by increasing the capacity of the saturated mailboxes.
//!
//! At the moment, Asynchronix is unfortunately not able to discriminate between
//! such pathological deadlocks and the "expected" deadlock that occurs when all
//! events in a given time slice have completed and all models are starved on an
//! empty mailbox. Consequently, blocking method such as [`SimInit::init()`],
//! [`Simulation::step()`], [`Simulation::send_event()`], etc., will return
//! without error after a pathological deadlock, leaving the user responsible
//! for inferring the deadlock from the behavior of the simulation in the next
//! steps. This is obviously not ideal, but is hopefully only a temporary state
//! of things until a more precise deadlock detection algorithm is implemented.
//!
//! ## Modifying connections during simulation
//!
//! Although uncommon, there is sometimes a need for connecting and/or
//! disconnecting models after they have been migrated to the simulation.
//! Likewise, one may want to connect or disconnect an [`EventSlot`] or
//! [`EventStream`] after the simulation has been instantiated.
//!
//! There is actually a very simple solution to this problem: since the
//! [`InputFn`](crate::model::InputFn) trait also matches closures of type
//! `FnOnce(&mut impl Model)`, it is enough to invoke
//! [`Simulation::send_event()`] with a closure that connects or disconnects a
//! port, such as:
//!
//! ```
//! # use asynchronix::model::{Model, Output};
//! # use asynchronix::time::{MonotonicTime, Scheduler};
//! # use asynchronix::simulation::{Mailbox, SimInit};
//! # pub struct ModelA {
//! # pub output: Output<i32>,
//! # }
//! # impl Model for ModelA {};
//! # pub struct ModelB {}
//! # impl ModelB {
//! # pub fn input(&mut self, value: i32) {}
//! # }
//! # impl Model for ModelB {};
//! # let modelA_addr = Mailbox::<ModelA>::new().address();
//! # let modelB_addr = Mailbox::<ModelB>::new().address();
//! # let mut simu = SimInit::new().init(MonotonicTime::EPOCH);
//! simu.send_event(
//! |m: &mut ModelA| {
//! m.output.connect(ModelB::input, modelB_addr);
//! },
//! (),
//! &modelA_addr
//! );
//! ```
mod endpoints;
mod mailbox;
mod sim_init;
pub use endpoints::{EventSlot, EventStream};
pub use mailbox::{Address, Mailbox};
pub use sim_init::SimInit;
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::sync::{Arc, Mutex, MutexGuard};
use std::time::Duration;
use recycle_box::{coerce_box, RecycleBox};
use crate::executor::Executor;
use crate::model::{InputFn, Model, ReplierFn};
use crate::time::{
self, Clock, Deadline, EventKey, MonotonicTime, NoClock, ScheduledEvent, SchedulerQueue,
SchedulingError, TearableAtomicTime,
};
use crate::util::futures::SeqFuture;
use crate::util::slot;
use crate::util::sync_cell::SyncCell;
/// Simulation environment.
///
/// A `Simulation` is created by calling
/// [`SimInit::init()`](crate::simulation::SimInit::init) or
/// [`SimInit::init_with_clock()`](crate::simulation::SimInit::init_with_clock)
/// method on a simulation initializer. It contains an asynchronous executor
/// that runs all simulation models added beforehand to
/// [`SimInit`](crate::simulation::SimInit).
///
/// A [`Simulation`] object also manages an event scheduling queue and
/// simulation time. The scheduling queue can be accessed from the simulation
/// itself, but also from models via the optional
/// [`&Scheduler`](time::Scheduler) argument of input and replier port methods.
/// Likewise, simulation time can be accessed with the [`Simulation::time()`]
/// method, or from models with the [`Scheduler::time()`](time::Scheduler::time)
/// method.
///
/// Events and queries can be scheduled immediately, *i.e.* for the current
/// simulation time, using [`send_event()`](Simulation::send_event) and
/// [`send_query()`](Simulation::send_query). Calling these methods will block
/// until all computations triggered by such event or query have completed. In
/// the case of queries, the response is returned.
///
/// Events can also be scheduled at a future simulation time using one of the
/// [`schedule_*()`](Simulation::schedule_event) method. These methods queue an
/// event without blocking.
///
/// Finally, the [`Simulation`] instance manages simulation time. A call to
/// [`step()`](Simulation::step) will:
///
/// 1. increment simulation time until that of the next scheduled event in
/// chronological order, then
/// 2. call [`Clock::synchronize()`](time::Clock::synchronize) which, unless the
/// simulation is configured to run as fast as possible, blocks until the
/// desired wall clock time, and finally
/// 3. run all computations scheduled for the new simulation time.
///
/// The [`step_by()`](Simulation::step_by) and
/// [`step_until()`](Simulation::step_until) methods operate similarly but
/// iterate until the target simulation time has been reached.
pub struct Simulation {
executor: Executor,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCell<TearableAtomicTime>,
clock: Box<dyn Clock>,
}
impl Simulation {
/// Creates a new `Simulation`.
pub(crate) fn new(
executor: Executor,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCell<TearableAtomicTime>,
) -> Self {
Self {
executor,
scheduler_queue,
time,
clock: Box::new(NoClock::new()),
}
}
/// Creates a new `Simulation` with the specified clock.
pub(crate) fn with_clock(
executor: Executor,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCell<TearableAtomicTime>,
clock: impl Clock + 'static,
) -> Self {
Self {
executor,
scheduler_queue,
time,
clock: Box::new(clock),
}
}
/// Returns the current simulation time.
pub fn time(&self) -> MonotonicTime {
self.time.read()
}
/// Advances simulation time to that of the next scheduled event, processing
/// that event as well as all other event scheduled for the same time.
///
/// Processing is gated by a (possibly blocking) call to
/// [`Clock::synchronize()`](time::Clock::synchronize) on the configured
/// simulation clock. This method blocks until all newly processed events
/// have completed.
pub fn step(&mut self) {
self.step_to_next_bounded(MonotonicTime::MAX);
}
/// Iteratively advances the simulation time by the specified duration, as
/// if by calling [`Simulation::step()`] repeatedly.
///
/// This method blocks until all events scheduled up to the specified target
/// time have completed. The simulation time upon completion is equal to the
/// initial simulation time incremented by the specified duration, whether
/// or not an event was scheduled for that time.
pub fn step_by(&mut self, duration: Duration) {
let target_time = self.time.read() + duration;
self.step_until_unchecked(target_time);
}
/// Iteratively advances the simulation time until the specified deadline,
/// as if by calling [`Simulation::step()`] repeatedly.
///
/// This method blocks until all events scheduled up to the specified target
/// time have completed. The simulation time upon completion is equal to the
/// specified target time, whether or not an event was scheduled for that
/// time.
pub fn step_until(&mut self, target_time: MonotonicTime) -> Result<(), SchedulingError> {
if self.time.read() >= target_time {
return Err(SchedulingError::InvalidScheduledTime);
}
self.step_until_unchecked(target_time);
Ok(())
}
/// Schedules an event at a future time.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time.
///
/// Events scheduled for the same time and targeting the same model are
/// guaranteed to be processed according to the scheduling order.
///
/// See also: [`time::Scheduler::schedule_event`].
pub fn schedule_event<M, F, T, S>(
&mut self,
deadline: impl Deadline,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<(), SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
time::schedule_event_at_unchecked(time, func, arg, address.into().0, &self.scheduler_queue);
Ok(())
}
/// Schedules a cancellable event at a future time and returns an event key.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time.
///
/// Events scheduled for the same time and targeting the same model are
/// guaranteed to be processed according to the scheduling order.
///
/// See also: [`time::Scheduler::schedule_keyed_event`].
pub fn schedule_keyed_event<M, F, T, S>(
&mut self,
deadline: impl Deadline,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<EventKey, SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
let event_key = time::schedule_keyed_event_at_unchecked(
time,
func,
arg,
address.into().0,
&self.scheduler_queue,
);
Ok(event_key)
}
/// Schedules a periodically recurring event at a future time.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time or if the specified period is null.
///
/// Events scheduled for the same time and targeting the same model are
/// guaranteed to be processed according to the scheduling order.
///
/// See also: [`time::Scheduler::schedule_periodic_event`].
pub fn schedule_periodic_event<M, F, T, S>(
&mut self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<(), SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
if period.is_zero() {
return Err(SchedulingError::NullRepetitionPeriod);
}
time::schedule_periodic_event_at_unchecked(
time,
period,
func,
arg,
address.into().0,
&self.scheduler_queue,
);
Ok(())
}
/// Schedules a cancellable, periodically recurring event at a future time
/// and returns an event key.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time or if the specified period is null.
///
/// Events scheduled for the same time and targeting the same model are
/// guaranteed to be processed according to the scheduling order.
///
/// See also: [`time::Scheduler::schedule_keyed_periodic_event`].
pub fn schedule_keyed_periodic_event<M, F, T, S>(
&mut self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<EventKey, SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
if period.is_zero() {
return Err(SchedulingError::NullRepetitionPeriod);
}
let event_key = time::schedule_periodic_keyed_event_at_unchecked(
time,
period,
func,
arg,
address.into().0,
&self.scheduler_queue,
);
Ok(event_key)
}
/// Sends and processes an event, blocking until completion.
///
/// Simulation time remains unchanged.
pub fn send_event<M, F, T, S>(&mut self, func: F, arg: T, address: impl Into<Address<M>>)
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
let sender = address.into().0;
let fut = async move {
// Ignore send errors.
let _ = sender
.send(
move |model: &mut M,
scheduler,
recycle_box: RecycleBox<()>|
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
},
)
.await;
};
self.executor.spawn_and_forget(fut);
self.executor.run();
}
/// Sends and processes a query, blocking until completion.
///
/// Simulation time remains unchanged.
pub fn send_query<M, F, T, R, S>(
&mut self,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<R, QueryError>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S>,
T: Send + Clone + 'static,
R: Send + 'static,
{
let (reply_writer, mut reply_reader) = slot::slot();
let sender = address.into().0;
let fut = async move {
// Ignore send errors.
let _ = sender
.send(
move |model: &mut M,
scheduler,
recycle_box: RecycleBox<()>|
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
let _ = reply_writer.write(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
},
)
.await;
};
self.executor.spawn_and_forget(fut);
self.executor.run();
reply_reader.try_read().map_err(|_| QueryError {})
}
/// Advances simulation time to that of the next scheduled event if its
/// scheduling time does not exceed the specified bound, processing that
/// event as well as all other events scheduled for the same time.
///
/// If at least one event was found that satisfied the time bound, the
/// corresponding new simulation time is returned.
fn step_to_next_bounded(&mut self, upper_time_bound: MonotonicTime) -> Option<MonotonicTime> {
// Function pulling the next event. If the event is periodic, it is
// immediately re-scheduled.
fn pull_next_event(
scheduler_queue: &mut MutexGuard<SchedulerQueue>,
) -> Box<dyn ScheduledEvent> {
let ((time, channel_id), event) = scheduler_queue.pull().unwrap();
if let Some((event_clone, period)) = event.next() {
scheduler_queue.insert((time + period, channel_id), event_clone);
}
event
}
// Closure returning the next key which time stamp is no older than the
// upper bound, if any. Cancelled events are pulled and discarded.
let peek_next_key = |scheduler_queue: &mut MutexGuard<SchedulerQueue>| {
loop {
match scheduler_queue.peek() {
Some((&k, t)) if k.0 <= upper_time_bound => {
if !t.is_cancelled() {
break Some(k);
}
// Discard cancelled events.
scheduler_queue.pull();
}
_ => break None,
}
}
};
// Move to the next scheduled time.
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
let mut current_key = peek_next_key(&mut scheduler_queue)?;
self.time.write(current_key.0);
loop {
let event = pull_next_event(&mut scheduler_queue);
let mut next_key = peek_next_key(&mut scheduler_queue);
if next_key != Some(current_key) {
// Since there are no other events targeting the same mailbox
// and the same time, the event is spawned immediately.
event.spawn_and_forget(&self.executor);
} else {
// To ensure that their relative order of execution is
// preserved, all event targeting the same mailbox are executed
// sequentially within a single compound future.
let mut event_sequence = SeqFuture::new();
event_sequence.push(event.into_future());
loop {
let event = pull_next_event(&mut scheduler_queue);
event_sequence.push(event.into_future());
next_key = peek_next_key(&mut scheduler_queue);
if next_key != Some(current_key) {
break;
}
}
// Spawn a compound future that sequentially polls all events
// targeting the same mailbox.
self.executor.spawn_and_forget(event_sequence);
}
current_key = match next_key {
// If the next event is scheduled at the same time, update the
// key and continue.
Some(k) if k.0 == current_key.0 => k,
// Otherwise wait until all events have completed and return.
_ => {
drop(scheduler_queue); // make sure the queue's mutex is released.
let current_time = current_key.0;
// TODO: check synchronization status?
self.clock.synchronize(current_time);
self.executor.run();
return Some(current_time);
}
};
}
}
/// Iteratively advances simulation time and processes all events scheduled
/// up to the specified target time.
///
/// Once the method returns it is guaranteed that (i) all events scheduled
/// up to the specified target time have completed and (ii) the final
/// simulation time matches the target time.
///
/// This method does not check whether the specified time lies in the future
/// of the current simulation time.
fn step_until_unchecked(&mut self, target_time: MonotonicTime) {
loop {
match self.step_to_next_bounded(target_time) {
// The target time was reached exactly.
Some(t) if t == target_time => return,
// No events are scheduled before or at the target time.
None => {
// Update the simulation time.
self.time.write(target_time);
return;
}
// The target time was not reached yet.
_ => {}
}
}
}
}
impl fmt::Debug for Simulation {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Simulation")
.field("time", &self.time.read())
.finish_non_exhaustive()
}
}
/// Error returned when a query did not obtain a response.
///
/// This can happen either because the model targeted by the address was not
/// added to the simulation or due to a simulation deadlock.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub struct QueryError {}
impl fmt::Display for QueryError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "the query did not receive a response")
}
}
impl Error for QueryError {}

View File

@ -1,69 +0,0 @@
use std::fmt;
use std::sync::{Arc, Mutex, TryLockError, TryLockResult};
use crate::util::spsc_queue;
/// An iterator that returns all events that were broadcast by an output port.
///
/// Events are returned in first-in-first-out order. Note that even if the
/// iterator returns `None`, it may still produce more items after simulation
/// time is incremented.
pub struct EventStream<T> {
consumer: spsc_queue::Consumer<T>,
}
impl<T> EventStream<T> {
/// Creates a new `EventStream`.
pub(crate) fn new(consumer: spsc_queue::Consumer<T>) -> Self {
Self { consumer }
}
}
impl<T> Iterator for EventStream<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.consumer.pop()
}
}
impl<T> fmt::Debug for EventStream<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventStream").finish_non_exhaustive()
}
}
/// A single-value slot that holds the last event that was broadcast by an
/// output port.
pub struct EventSlot<T> {
slot: Arc<Mutex<Option<T>>>,
}
impl<T> EventSlot<T> {
/// Creates a new `EventSlot`.
pub(crate) fn new(slot: Arc<Mutex<Option<T>>>) -> Self {
Self { slot }
}
/// Take the last event, if any, leaving the slot empty.
///
/// Note that even after the event is taken, it may become populated anew
/// after simulation time is incremented.
pub fn take(&mut self) -> Option<T> {
// We don't actually need to take self by mutable reference, but this
// signature is probably less surprising for the user and more
// consistent with `EventStream`. It also prevents multi-threaded
// access, which would be likely to be misused.
match self.slot.try_lock() {
TryLockResult::Ok(mut v) => v.take(),
TryLockResult::Err(TryLockError::WouldBlock) => None,
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
}
}
}
impl<T> fmt::Debug for EventSlot<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventSlot").finish_non_exhaustive()
}
}

View File

@ -1,100 +0,0 @@
use std::fmt;
use std::sync::{Arc, Mutex};
use crate::executor::Executor;
use crate::model::Model;
use crate::time::{Clock, Scheduler};
use crate::time::{MonotonicTime, SchedulerQueue, TearableAtomicTime};
use crate::util::priority_queue::PriorityQueue;
use crate::util::sync_cell::SyncCell;
use super::{Mailbox, Simulation};
/// Builder for a multi-threaded, discrete-event simulation.
pub struct SimInit {
executor: Executor,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCell<TearableAtomicTime>,
}
impl SimInit {
/// Creates a builder for a multithreaded simulation running on all
/// available logical threads.
pub fn new() -> Self {
Self::with_num_threads(num_cpus::get())
}
/// Creates a builder for a multithreaded simulation running on the
/// specified number of threads.
pub fn with_num_threads(num_threads: usize) -> Self {
// The current executor's implementation caps the number of thread to 64
// on 64-bit systems and 32 on 32-bit systems.
let num_threads = num_threads.min(usize::BITS as usize);
Self {
executor: Executor::new(num_threads),
scheduler_queue: Arc::new(Mutex::new(PriorityQueue::new())),
time: SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)),
}
}
/// Adds a model and its mailbox to the simulation bench.
pub fn add_model<M: Model>(self, model: M, mailbox: Mailbox<M>) -> Self {
let scheduler_queue = self.scheduler_queue.clone();
let time = self.time.reader();
let mut receiver = mailbox.0;
self.executor.spawn_and_forget(async move {
let sender = receiver.sender();
let scheduler = Scheduler::new(sender, scheduler_queue, time);
let mut model = model.init(&scheduler).await.0;
while receiver.recv(&mut model, &scheduler).await.is_ok() {}
});
self
}
/// Builds a simulation initialized at the specified simulation time,
/// executing the [`Model::init()`](crate::model::Model::init) method on all
/// model initializers.
///
/// This is equivalent to calling [`SimInit::init_with_clock()`] with a
/// [`NoClock`](crate::time::NoClock) argument and effectively makes the
/// simulation run as fast as possible.
pub fn init(mut self, start_time: MonotonicTime) -> Simulation {
self.time.write(start_time);
self.executor.run();
Simulation::new(self.executor, self.scheduler_queue, self.time)
}
/// Builds a simulation synchronized with the provided
/// [`Clock`](crate::time::Clock) and initialized at the specified
/// simulation time, executing the
/// [`Model::init()`](crate::model::Model::init) method on all model
/// initializers.
pub fn init_with_clock(
mut self,
start_time: MonotonicTime,
mut clock: impl Clock + 'static,
) -> Simulation {
self.time.write(start_time);
clock.synchronize(start_time);
self.executor.run();
Simulation::with_clock(self.executor, self.scheduler_queue, self.time, clock)
}
}
impl Default for SimInit {
fn default() -> Self {
Self::new()
}
}
impl fmt::Debug for SimInit {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SimInit").finish_non_exhaustive()
}
}

View File

@ -1,62 +0,0 @@
//! Simulation time and scheduling.
//!
//! This module provides most notably:
//!
//! * [`MonotonicTime`]: a monotonic timestamp based on the [TAI] time standard,
//! * [`Clock`]: a trait for types that can synchronize a simulation,
//! implemented for instance by [`SystemClock`] and [`AutoSystemClock`],
//! * [`Scheduler`]: a model-local handle to the global scheduler that can be
//! used by models to schedule future actions onto themselves.
//!
//! [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
//!
//!
//! # Examples
//!
//! An alarm clock model that prints a message when the simulation time reaches
//! the specified timestamp.
//!
//! ```
//! use asynchronix::model::Model;
//! use asynchronix::time::{MonotonicTime, Scheduler};
//!
//! // An alarm clock model.
//! pub struct AlarmClock {
//! msg: String
//! }
//!
//! impl AlarmClock {
//! // Creates a new alarm clock.
//! pub fn new(msg: String) -> Self {
//! Self { msg }
//! }
//!
//! // Sets an alarm [input port].
//! pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
//! if scheduler.schedule_event(setting, Self::ring, ()).is_err() {
//! println!("The alarm clock can only be set for a future time");
//! }
//! }
//!
//! // Rings the alarm [private input port].
//! fn ring(&mut self) {
//! println!("{}", self.msg);
//! }
//! }
//!
//! impl Model for AlarmClock {}
//! ```
mod clock;
mod monotonic_time;
mod scheduler;
pub use clock::{AutoSystemClock, Clock, NoClock, SyncStatus, SystemClock};
pub(crate) use monotonic_time::TearableAtomicTime;
pub use monotonic_time::{MonotonicTime, SystemTimeError};
pub(crate) use scheduler::{
schedule_event_at_unchecked, schedule_keyed_event_at_unchecked,
schedule_periodic_event_at_unchecked, schedule_periodic_keyed_event_at_unchecked,
ScheduledEvent, SchedulerQueue,
};
pub use scheduler::{Deadline, EventKey, Scheduler, SchedulingError};

View File

@ -1,671 +0,0 @@
//! Monotonic simulation time.
use std::error::Error;
use std::fmt;
use std::ops::{Add, AddAssign, Sub, SubAssign};
use std::sync::atomic::{AtomicI64, AtomicU32, Ordering};
use std::time::{Duration, SystemTime};
use crate::util::sync_cell::TearableAtomic;
const NANOS_PER_SEC: u32 = 1_000_000_000;
/// A nanosecond-precision monotonic clock timestamp.
///
/// A timestamp specifies a [TAI] point in time. It is represented as a 64-bit
/// signed number of seconds and a positive number of nanoseconds, counted with
/// reference to 1970-01-01 00:00:00 TAI. This timestamp format has a number of
/// desirable properties:
///
/// - it enables cheap inter-operation with the standard [`Duration`] type which
/// uses a very similar internal representation,
/// - it constitutes a strict 96-bit superset of 80-bit PTP IEEE-1588
/// timestamps, with the same epoch,
/// - if required, exact conversion to a Unix timestamp is trivial and only
/// requires subtracting from this timestamp the number of leap seconds
/// between TAI and UTC time (see also the
/// [`as_unix_secs()`](MonotonicTime::as_unix_secs) method).
///
/// Although no date-time conversion methods are provided, conversion from
/// timestamp to TAI date-time representations and back can be easily performed
/// using `NaiveDateTime` from the [chrono] crate or `OffsetDateTime` from the
/// [time] crate, treating the timestamp as a regular (UTC) Unix timestamp.
///
/// [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
/// [chrono]: https://crates.io/crates/chrono
/// [time]: https://crates.io/crates/time
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// // Set the timestamp to 2009-02-13 23:31:30.987654321 TAI.
/// let mut timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
///
/// // Increment the timestamp by 123.456s.
/// timestamp += Duration::new(123, 456_000_000);
///
/// assert_eq!(timestamp, MonotonicTime::new(1_234_568_014, 443_654_321));
/// assert_eq!(timestamp.as_secs(), 1_234_568_014);
/// assert_eq!(timestamp.subsec_nanos(), 443_654_321);
/// ```
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct MonotonicTime {
/// The number of whole seconds in the future (if positive) or in the past
/// (if negative) of 1970-01-01 00:00:00 TAI.
///
/// Note that the automatic derivation of `PartialOrd` relies on
/// lexicographical comparison so the `secs` field must appear before
/// `nanos` in declaration order to be given higher priority.
secs: i64,
/// The sub-second number of nanoseconds in the future of the point in time
/// defined by `secs`.
nanos: u32,
}
impl MonotonicTime {
/// The epoch used by `MonotonicTime`, equal to 1970-01-01 00:00:00 TAI.
///
/// This epoch coincides with the PTP epoch defined in the IEEE-1588
/// standard.
pub const EPOCH: Self = Self { secs: 0, nanos: 0 };
/// The minimum possible `MonotonicTime` timestamp.
pub const MIN: Self = Self {
secs: i64::MIN,
nanos: 0,
};
/// The maximum possible `MonotonicTime` timestamp.
pub const MAX: Self = Self {
secs: i64::MAX,
nanos: NANOS_PER_SEC - 1,
};
/// Creates a timestamp directly from timestamp parts.
///
/// The number of seconds is relative to the [`EPOCH`](MonotonicTime::EPOCH)
/// (1970-01-01 00:00:00 TAI). It is negative for dates in the past of the
/// epoch.
///
/// The number of nanoseconds is always positive and always points towards
/// the future.
///
/// # Panics
///
/// This constructor will panic if the number of nanoseconds is greater than
/// or equal to 1 second.
///
/// # Example
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// // A timestamp set to 2009-02-13 23:31:30.987654321 TAI.
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
///
/// // A timestamp set 3.5s before the epoch.
/// let timestamp = MonotonicTime::new(-4, 500_000_000);
/// assert_eq!(timestamp, MonotonicTime::EPOCH - Duration::new(3, 500_000_000));
/// ```
pub const fn new(secs: i64, subsec_nanos: u32) -> Self {
assert!(
subsec_nanos < NANOS_PER_SEC,
"invalid number of nanoseconds"
);
Self {
secs,
nanos: subsec_nanos,
}
}
/// Creates a timestamp from the current system time.
///
/// The argument is the current difference between TAI and UTC time in
/// seconds (a.k.a. leap seconds). For reference, this offset has been +37s
/// since 2017-01-01, a value which is to remain valid until at least
/// 2024-06-29. See the [official IERS bulletin
/// C](https://datacenter.iers.org/data/latestVersion/bulletinC.txt) for
/// leap second announcements or the [IETF
/// table](https://www.ietf.org/timezones/data/leap-seconds.list) for
/// current and historical values.
///
/// # Errors
///
/// This method will return an error if the reported system time is in the
/// past of the Unix epoch or if the offset-adjusted timestamp is outside
/// the representable range.
///
/// # Examples
///
/// ```
/// use asynchronix::time::MonotonicTime;
///
/// // Compute the current TAI time assuming that the current difference
/// // between TAI and UTC time is 37s.
/// let timestamp = MonotonicTime::from_system(37).unwrap();
/// ```
pub fn from_system(leap_secs: i64) -> Result<Self, SystemTimeError> {
let utc_timestamp = SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.map_err(|_| SystemTimeError::InvalidSystemTime)?;
Self::new(leap_secs, 0)
.checked_add(utc_timestamp)
.ok_or(SystemTimeError::OutOfRange)
}
/// Returns the number of whole seconds relative to
/// [`EPOCH`](MonotonicTime::EPOCH) (1970-01-01 00:00:00 TAI).
///
/// Consistently with the interpretation of seconds and nanoseconds in the
/// [`new()`](Self::new) constructor, seconds are always rounded towards
/// `-∞`.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
/// assert_eq!(timestamp.as_secs(), 1_234_567_890);
///
/// let timestamp = MonotonicTime::EPOCH - Duration::new(3, 500_000_000);
/// assert_eq!(timestamp.as_secs(), -4);
/// ```
pub const fn as_secs(&self) -> i64 {
self.secs
}
/// Returns the number of seconds of the corresponding Unix time.
///
/// The argument is the difference between TAI and UTC time in seconds
/// (a.k.a. leap seconds) applicable at the date represented by the
/// timestamp. See the [official IERS bulletin
/// C](https://datacenter.iers.org/data/latestVersion/bulletinC.txt) for
/// leap second announcements or the [IETF
/// table](https://www.ietf.org/timezones/data/leap-seconds.list) for
/// current and historical values.
///
/// This method merely subtracts the offset from the value returned by
/// [`as_secs()`](Self::as_secs) and checks for potential overflow; its main
/// purpose is to prevent mistakes regarding the direction in which the
/// offset should be applied.
///
/// Note that the nanosecond part of a Unix timestamp can be simply
/// retrieved with [`subsec_nanos()`](Self::subsec_nanos) since UTC and TAI
/// differ by a whole number of seconds.
///
/// # Panics
///
/// This will panic if the offset-adjusted timestamp cannot be represented
/// as an `i64`.
///
/// # Examples
///
/// ```
/// use asynchronix::time::MonotonicTime;
///
/// // Set the date to 2000-01-01 00:00:00 TAI.
/// let timestamp = MonotonicTime::new(946_684_800, 0);
///
/// // Convert to a Unix timestamp, accounting for the +32s difference between
/// // TAI and UTC on 2000-01-01.
/// let unix_secs = timestamp.as_unix_secs(32);
/// ```
pub const fn as_unix_secs(&self, leap_secs: i64) -> i64 {
if let Some(secs) = self.secs.checked_sub(leap_secs) {
secs
} else {
panic!("timestamp outside representable range");
}
}
/// Returns the sub-second fractional part in nanoseconds.
///
/// Note that nanoseconds always point towards the future even if the date
/// is in the past of the [`EPOCH`](MonotonicTime::EPOCH).
///
/// # Examples
///
/// ```
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
/// assert_eq!(timestamp.subsec_nanos(), 987_654_321);
/// ```
pub const fn subsec_nanos(&self) -> u32 {
self.nanos
}
/// Adds a duration to a timestamp, checking for overflow.
///
/// Returns `None` if overflow occurred.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
/// assert!(timestamp.checked_add(Duration::new(10, 123_456_789)).is_some());
/// assert!(timestamp.checked_add(Duration::MAX).is_none());
/// ```
pub const fn checked_add(self, rhs: Duration) -> Option<Self> {
// A durations in seconds greater than `i64::MAX` is actually fine as
// long as the number of seconds does not effectively overflow which is
// why the below does not use `checked_add`. So technically the below
// addition may wrap around on the negative side due to the
// unsigned-to-signed cast of the duration, but this does not
// necessarily indicate an actual overflow. Actual overflow can be ruled
// out by verifying that the new timestamp is in the future of the old
// timestamp.
let mut secs = self.secs.wrapping_add(rhs.as_secs() as i64);
// Check for overflow.
if secs < self.secs {
return None;
}
let mut nanos = self.nanos + rhs.subsec_nanos();
if nanos >= NANOS_PER_SEC {
secs = if let Some(s) = secs.checked_add(1) {
s
} else {
return None;
};
nanos -= NANOS_PER_SEC;
}
Some(Self { secs, nanos })
}
/// Subtracts a duration from a timestamp, checking for overflow.
///
/// Returns `None` if overflow occurred.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp = MonotonicTime::new(1_234_567_890, 987_654_321);
/// assert!(timestamp.checked_sub(Duration::new(10, 123_456_789)).is_some());
/// assert!(timestamp.checked_sub(Duration::MAX).is_none());
/// ```
pub const fn checked_sub(self, rhs: Duration) -> Option<Self> {
// A durations in seconds greater than `i64::MAX` is actually fine as
// long as the number of seconds does not effectively overflow, which is
// why the below does not use `checked_sub`. So technically the below
// subtraction may wrap around on the positive side due to the
// unsigned-to-signed cast of the duration, but this does not
// necessarily indicate an actual overflow. Actual overflow can be ruled
// out by verifying that the new timestamp is in the past of the old
// timestamp.
let mut secs = self.secs.wrapping_sub(rhs.as_secs() as i64);
// Check for overflow.
if secs > self.secs {
return None;
}
let nanos = if self.nanos < rhs.subsec_nanos() {
secs = if let Some(s) = secs.checked_sub(1) {
s
} else {
return None;
};
(self.nanos + NANOS_PER_SEC) - rhs.subsec_nanos()
} else {
self.nanos - rhs.subsec_nanos()
};
Some(Self { secs, nanos })
}
/// Subtracts a timestamp from another timestamp.
///
/// # Panics
///
/// Panics if the argument lies in the future of `self`.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321);
/// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789);
/// assert_eq!(
/// timestamp_later.duration_since(timestamp_earlier),
/// Duration::new(20, 135_802_468)
/// );
/// ```
pub fn duration_since(self, earlier: Self) -> Duration {
self.checked_duration_since(earlier)
.expect("attempt to substract a timestamp from an earlier timestamp")
}
/// Computes the duration elapsed between a timestamp and an earlier
/// timestamp, checking that the timestamps are appropriately ordered.
///
/// Returns `None` if the argument lies in the future of `self`.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
/// use asynchronix::time::MonotonicTime;
///
/// let timestamp_earlier = MonotonicTime::new(1_234_567_879, 987_654_321);
/// let timestamp_later = MonotonicTime::new(1_234_567_900, 123_456_789);
/// assert!(timestamp_later.checked_duration_since(timestamp_earlier).is_some());
/// assert!(timestamp_earlier.checked_duration_since(timestamp_later).is_none());
/// ```
pub const fn checked_duration_since(self, earlier: Self) -> Option<Duration> {
// If the subtraction of the nanosecond fractions would overflow, carry
// over one second to the nanoseconds.
let (secs, nanos) = if earlier.nanos > self.nanos {
if let Some(s) = self.secs.checked_sub(1) {
(s, self.nanos + NANOS_PER_SEC)
} else {
return None;
}
} else {
(self.secs, self.nanos)
};
// Make sure the computation of the duration will not overflow the
// seconds.
if secs < earlier.secs {
return None;
}
// This subtraction may wrap around if the difference between the two
// timestamps is more than `i64::MAX`, but even if it does the result
// will be correct once cast to an unsigned integer.
let delta_secs = secs.wrapping_sub(earlier.secs) as u64;
// The below subtraction is guaranteed to never overflow.
let delta_nanos = nanos - earlier.nanos;
Some(Duration::new(delta_secs, delta_nanos))
}
}
impl Add<Duration> for MonotonicTime {
type Output = Self;
/// Adds a duration to a timestamp.
///
/// # Panics
///
/// This function panics if the resulting timestamp cannot be
/// represented. See [`MonotonicTime::checked_add`] for a panic-free
/// version.
fn add(self, other: Duration) -> Self {
self.checked_add(other)
.expect("overflow when adding duration to timestamp")
}
}
impl Sub<Duration> for MonotonicTime {
type Output = Self;
/// Subtracts a duration from a timestamp.
///
/// # Panics
///
/// This function panics if the resulting timestamp cannot be
/// represented. See [`MonotonicTime::checked_sub`] for a panic-free
/// version.
fn sub(self, other: Duration) -> Self {
self.checked_sub(other)
.expect("overflow when subtracting duration from timestamp")
}
}
impl AddAssign<Duration> for MonotonicTime {
/// Increments the timestamp by a duration.
///
/// # Panics
///
/// This function panics if the resulting timestamp cannot be represented.
fn add_assign(&mut self, other: Duration) {
*self = *self + other;
}
}
impl SubAssign<Duration> for MonotonicTime {
/// Decrements the timestamp by a duration.
///
/// # Panics
///
/// This function panics if the resulting timestamp cannot be represented.
fn sub_assign(&mut self, other: Duration) {
*self = *self - other;
}
}
/// An error that may be returned when initializing a [`MonotonicTime`] from
/// system time.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum SystemTimeError {
/// The system time is in the past of the Unix epoch.
InvalidSystemTime,
/// The system time cannot be represented as a `MonotonicTime`.
OutOfRange,
}
impl fmt::Display for SystemTimeError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidSystemTime => write!(fmt, "invalid system time"),
Self::OutOfRange => write!(fmt, "timestamp outside representable range"),
}
}
}
impl Error for SystemTimeError {}
/// A tearable atomic adapter over a `MonotonicTime`.
///
/// This makes it possible to store the simulation time in a `SyncCell`, an
/// efficient, seqlock-based alternative to `RwLock`.
pub(crate) struct TearableAtomicTime {
secs: AtomicI64,
nanos: AtomicU32,
}
impl TearableAtomicTime {
pub(crate) fn new(time: MonotonicTime) -> Self {
Self {
secs: AtomicI64::new(time.secs),
nanos: AtomicU32::new(time.nanos),
}
}
}
impl TearableAtomic for TearableAtomicTime {
type Value = MonotonicTime;
fn tearable_load(&self) -> MonotonicTime {
// Load each field separately. This can never create invalid values of a
// `MonotonicTime`, even if the load is torn.
MonotonicTime {
secs: self.secs.load(Ordering::Relaxed),
nanos: self.nanos.load(Ordering::Relaxed),
}
}
fn tearable_store(&self, value: MonotonicTime) {
// Write each field separately. This can never create invalid values of
// a `MonotonicTime`, even if the store is torn.
self.secs.store(value.secs, Ordering::Relaxed);
self.nanos.store(value.nanos, Ordering::Relaxed);
}
}
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use super::*;
#[test]
fn time_equality() {
let t0 = MonotonicTime::new(123, 123_456_789);
let t1 = MonotonicTime::new(123, 123_456_789);
let t2 = MonotonicTime::new(123, 123_456_790);
let t3 = MonotonicTime::new(124, 123_456_789);
assert_eq!(t0, t1);
assert_ne!(t0, t2);
assert_ne!(t0, t3);
}
#[test]
fn time_ordering() {
let t0 = MonotonicTime::new(0, 1);
let t1 = MonotonicTime::new(1, 0);
assert!(t1 > t0);
}
#[cfg(not(miri))]
#[test]
fn time_from_system_smoke() {
const START_OF_2022: i64 = 1640995200;
const START_OF_2050: i64 = 2524608000;
let now_secs = MonotonicTime::from_system(0).unwrap().as_secs();
assert!(now_secs > START_OF_2022);
assert!(now_secs < START_OF_2050);
}
#[test]
#[should_panic]
fn time_invalid() {
MonotonicTime::new(123, 1_000_000_000);
}
#[test]
fn time_duration_since_smoke() {
let t0 = MonotonicTime::new(100, 100_000_000);
let t1 = MonotonicTime::new(123, 223_456_789);
assert_eq!(
t1.checked_duration_since(t0),
Some(Duration::new(23, 123_456_789))
);
}
#[test]
fn time_duration_with_carry() {
let t0 = MonotonicTime::new(100, 200_000_000);
let t1 = MonotonicTime::new(101, 100_000_000);
assert_eq!(
t1.checked_duration_since(t0),
Some(Duration::new(0, 900_000_000))
);
}
#[test]
fn time_duration_since_extreme() {
const MIN_TIME: MonotonicTime = MonotonicTime::new(i64::MIN, 0);
const MAX_TIME: MonotonicTime = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1);
assert_eq!(
MAX_TIME.checked_duration_since(MIN_TIME),
Some(Duration::new(u64::MAX, NANOS_PER_SEC - 1))
);
}
#[test]
fn time_duration_since_invalid() {
let t0 = MonotonicTime::new(100, 0);
let t1 = MonotonicTime::new(99, 0);
assert_eq!(t1.checked_duration_since(t0), None);
}
#[test]
fn time_add_duration_smoke() {
let t = MonotonicTime::new(-100, 100_000_000);
let dt = Duration::new(400, 300_000_000);
assert_eq!(t + dt, MonotonicTime::new(300, 400_000_000));
}
#[test]
fn time_add_duration_with_carry() {
let t = MonotonicTime::new(-100, 900_000_000);
let dt1 = Duration::new(400, 100_000_000);
let dt2 = Duration::new(400, 300_000_000);
assert_eq!(t + dt1, MonotonicTime::new(301, 0));
assert_eq!(t + dt2, MonotonicTime::new(301, 200_000_000));
}
#[test]
fn time_add_duration_extreme() {
let t = MonotonicTime::new(i64::MIN, 0);
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
assert_eq!(t + dt, MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1));
}
#[test]
#[should_panic]
fn time_add_duration_overflow() {
let t = MonotonicTime::new(i64::MIN, 1);
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
let _ = t + dt;
}
#[test]
fn time_sub_duration_smoke() {
let t = MonotonicTime::new(100, 500_000_000);
let dt = Duration::new(400, 300_000_000);
assert_eq!(t - dt, MonotonicTime::new(-300, 200_000_000));
}
#[test]
fn time_sub_duration_with_carry() {
let t = MonotonicTime::new(100, 100_000_000);
let dt1 = Duration::new(400, 100_000_000);
let dt2 = Duration::new(400, 300_000_000);
assert_eq!(t - dt1, MonotonicTime::new(-300, 0));
assert_eq!(t - dt2, MonotonicTime::new(-301, 800_000_000));
}
#[test]
fn time_sub_duration_extreme() {
let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 1);
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
assert_eq!(t - dt, MonotonicTime::new(i64::MIN, 0));
}
#[test]
#[should_panic]
fn time_sub_duration_overflow() {
let t = MonotonicTime::new(i64::MAX, NANOS_PER_SEC - 2);
let dt = Duration::new(u64::MAX, NANOS_PER_SEC - 1);
let _ = t - dt;
}
}

View File

@ -1,943 +0,0 @@
//! Scheduling functions and types.
use std::error::Error;
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
use std::time::Duration;
use pin_project_lite::pin_project;
use recycle_box::{coerce_box, RecycleBox};
use crate::channel::{ChannelId, Sender};
use crate::executor::Executor;
use crate::model::{InputFn, Model};
use crate::time::{MonotonicTime, TearableAtomicTime};
use crate::util::priority_queue::PriorityQueue;
use crate::util::sync_cell::SyncCellReader;
/// Shorthand for the scheduler queue type.
pub(crate) type SchedulerQueue = PriorityQueue<(MonotonicTime, ChannelId), Box<dyn ScheduledEvent>>;
/// Trait abstracting over time-absolute and time-relative deadlines.
///
/// This trait is implemented by [`std::time::Duration`] and
/// [`MonotonicTime`].
pub trait Deadline {
/// Make this deadline into an absolute timestamp, using the provided
/// current time as a reference.
fn into_time(self, now: MonotonicTime) -> MonotonicTime;
}
impl Deadline for Duration {
#[inline(always)]
fn into_time(self, now: MonotonicTime) -> MonotonicTime {
now + self
}
}
impl Deadline for MonotonicTime {
#[inline(always)]
fn into_time(self, _: MonotonicTime) -> MonotonicTime {
self
}
}
/// A local scheduler for models.
///
/// A `Scheduler` is a handle to the global scheduler associated to a model
/// instance. It can be used by the model to retrieve the simulation time or
/// schedule delayed actions on itself.
///
/// ### Caveat: self-scheduling `async` methods
///
/// Due to a current rustc issue, `async` methods that schedule themselves will
/// not compile unless an explicit `Send` bound is added to the returned future.
/// This can be done by replacing the `async` signature with a partially
/// desugared signature such as:
///
/// ```ignore
/// fn self_scheduling_method<'a>(
/// &'a mut self,
/// arg: MyEventType,
/// scheduler: &'a Scheduler<Self>
/// ) -> impl Future<Output=()> + Send + 'a {
/// async move {
/// /* implementation */
/// }
/// }
/// ```
///
/// Self-scheduling methods which are not `async` are not affected by this
/// issue.
///
/// # Examples
///
/// A model that sends a greeting after some delay.
///
/// ```
/// use std::time::Duration;
/// use asynchronix::model::{Model, Output}; use asynchronix::time::Scheduler;
///
/// #[derive(Default)]
/// pub struct DelayedGreeter {
/// msg_out: Output<String>,
/// }
///
/// impl DelayedGreeter {
/// // Triggers a greeting on the output port after some delay [input port].
/// pub async fn greet_with_delay(&mut self, delay: Duration, scheduler: &Scheduler<Self>) {
/// let time = scheduler.time();
/// let greeting = format!("Hello, this message was scheduled at: {:?}.", time);
///
/// if delay.is_zero() {
/// self.msg_out.send(greeting).await;
/// } else {
/// scheduler.schedule_event(delay, Self::send_msg, greeting).unwrap();
/// }
/// }
///
/// // Sends a message to the output [private input port].
/// async fn send_msg(&mut self, msg: String) {
/// self.msg_out.send(msg).await;
/// }
/// }
/// impl Model for DelayedGreeter {}
/// ```
// The self-scheduling caveat seems related to this issue:
// https://github.com/rust-lang/rust/issues/78649
pub struct Scheduler<M: Model> {
sender: Sender<M>,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCellReader<TearableAtomicTime>,
}
impl<M: Model> Scheduler<M> {
/// Creates a new local scheduler.
pub(crate) fn new(
sender: Sender<M>,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: SyncCellReader<TearableAtomicTime>,
) -> Self {
Self {
sender,
scheduler_queue,
time,
}
}
/// Returns the current simulation time.
///
/// # Examples
///
/// ```
/// use asynchronix::model::Model;
/// use asynchronix::time::{MonotonicTime, Scheduler};
///
/// fn is_third_millenium<M: Model>(scheduler: &Scheduler<M>) -> bool {
/// let time = scheduler.time();
///
/// time >= MonotonicTime::new(978307200, 0) && time < MonotonicTime::new(32535216000, 0)
/// }
/// ```
pub fn time(&self) -> MonotonicTime {
self.time.try_read().expect("internal simulation error: could not perform a synchronized read of the simulation time")
}
/// Schedules an event at a future time.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
///
/// use asynchronix::model::Model;
/// use asynchronix::time::Scheduler;
///
/// // A timer.
/// pub struct Timer {}
///
/// impl Timer {
/// // Sets an alarm [input port].
/// pub fn set(&mut self, setting: Duration, scheduler: &Scheduler<Self>) {
/// if scheduler.schedule_event(setting, Self::ring, ()).is_err() {
/// println!("The alarm clock can only be set for a future time");
/// }
/// }
///
/// // Rings [private input port].
/// fn ring(&mut self) {
/// println!("Brringggg");
/// }
/// }
///
/// impl Model for Timer {}
/// ```
pub fn schedule_event<F, T, S>(
&self,
deadline: impl Deadline,
func: F,
arg: T,
) -> Result<(), SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
let sender = self.sender.clone();
schedule_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue);
Ok(())
}
/// Schedules a cancellable event at a future time and returns an event key.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time.
///
/// # Examples
///
/// ```
/// use asynchronix::model::Model;
/// use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
///
/// // An alarm clock that can be cancelled.
/// #[derive(Default)]
/// pub struct CancellableAlarmClock {
/// event_key: Option<EventKey>,
/// }
///
/// impl CancellableAlarmClock {
/// // Sets an alarm [input port].
/// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
/// self.cancel();
/// match scheduler.schedule_keyed_event(setting, Self::ring, ()) {
/// Ok(event_key) => self.event_key = Some(event_key),
/// Err(_) => println!("The alarm clock can only be set for a future time"),
/// };
/// }
///
/// // Cancels the current alarm, if any [input port].
/// pub fn cancel(&mut self) {
/// self.event_key.take().map(|k| k.cancel());
/// }
///
/// // Rings the alarm [private input port].
/// fn ring(&mut self) {
/// println!("Brringggg!");
/// }
/// }
///
/// impl Model for CancellableAlarmClock {}
/// ```
pub fn schedule_keyed_event<F, T, S>(
&self,
deadline: impl Deadline,
func: F,
arg: T,
) -> Result<EventKey, SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
let sender = self.sender.clone();
let event_key =
schedule_keyed_event_at_unchecked(time, func, arg, sender, &self.scheduler_queue);
Ok(event_key)
}
/// Schedules a periodically recurring event at a future time.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time or if the specified period is null.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
///
/// use asynchronix::model::Model;
/// use asynchronix::time::{MonotonicTime, Scheduler};
///
/// // An alarm clock beeping at 1Hz.
/// pub struct BeepingAlarmClock {}
///
/// impl BeepingAlarmClock {
/// // Sets an alarm [input port].
/// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
/// if scheduler.schedule_periodic_event(
/// setting,
/// Duration::from_secs(1), // 1Hz = 1/1s
/// Self::beep,
/// ()
/// ).is_err() {
/// println!("The alarm clock can only be set for a future time");
/// }
/// }
///
/// // Emits a single beep [private input port].
/// fn beep(&mut self) {
/// println!("Beep!");
/// }
/// }
///
/// impl Model for BeepingAlarmClock {}
/// ```
pub fn schedule_periodic_event<F, T, S>(
&self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
) -> Result<(), SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
if period.is_zero() {
return Err(SchedulingError::NullRepetitionPeriod);
}
let sender = self.sender.clone();
schedule_periodic_event_at_unchecked(
time,
period,
func,
arg,
sender,
&self.scheduler_queue,
);
Ok(())
}
/// Schedules a cancellable, periodically recurring event at a future time
/// and returns an event key.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time or if the specified period is null.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
///
/// use asynchronix::model::Model;
/// use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
///
/// // An alarm clock beeping at 1Hz that can be cancelled before it sets off, or
/// // stopped after it sets off.
/// #[derive(Default)]
/// pub struct CancellableBeepingAlarmClock {
/// event_key: Option<EventKey>,
/// }
///
/// impl CancellableBeepingAlarmClock {
/// // Sets an alarm [input port].
/// pub fn set(&mut self, setting: MonotonicTime, scheduler: &Scheduler<Self>) {
/// self.cancel();
/// match scheduler.schedule_keyed_periodic_event(
/// setting,
/// Duration::from_secs(1), // 1Hz = 1/1s
/// Self::beep,
/// ()
/// ) {
/// Ok(event_key) => self.event_key = Some(event_key),
/// Err(_) => println!("The alarm clock can only be set for a future time"),
/// };
/// }
///
/// // Cancels or stops the alarm [input port].
/// pub fn cancel(&mut self) {
/// self.event_key.take().map(|k| k.cancel());
/// }
///
/// // Emits a single beep [private input port].
/// fn beep(&mut self) {
/// println!("Beep!");
/// }
/// }
///
/// impl Model for CancellableBeepingAlarmClock {}
/// ```
pub fn schedule_keyed_periodic_event<F, T, S>(
&self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
) -> Result<EventKey, SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
if period.is_zero() {
return Err(SchedulingError::NullRepetitionPeriod);
}
let sender = self.sender.clone();
let event_key = schedule_periodic_keyed_event_at_unchecked(
time,
period,
func,
arg,
sender,
&self.scheduler_queue,
);
Ok(event_key)
}
}
impl<M: Model> fmt::Debug for Scheduler<M> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Scheduler").finish_non_exhaustive()
}
}
/// Handle to a scheduled event.
///
/// An `EventKey` can be used to cancel a future event.
#[derive(Clone, Debug)]
#[must_use = "prefer unkeyed scheduling methods if the event is never cancelled"]
pub struct EventKey {
is_cancelled: Arc<AtomicBool>,
}
impl EventKey {
/// Creates a key for a pending event.
pub(crate) fn new() -> Self {
Self {
is_cancelled: Arc::new(AtomicBool::new(false)),
}
}
/// Checks whether the event was cancelled.
pub(crate) fn is_cancelled(&self) -> bool {
self.is_cancelled.load(Ordering::Relaxed)
}
/// Cancels the associated event.
pub fn cancel(self) {
self.is_cancelled.store(true, Ordering::Relaxed);
}
}
/// Error returned when the scheduled time or the repetition period are invalid.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum SchedulingError {
/// The scheduled time does not lie in the future of the current simulation
/// time.
InvalidScheduledTime,
/// The repetition period is zero.
NullRepetitionPeriod,
}
impl fmt::Display for SchedulingError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidScheduledTime => write!(
fmt,
"the scheduled time should be in the future of the current simulation time"
),
Self::NullRepetitionPeriod => write!(fmt, "the repetition period cannot be zero"),
}
}
}
impl Error for SchedulingError {}
/// Schedules an event at a future time.
///
/// This method does not check whether the specified time lies in the future
/// of the current simulation time.
pub(crate) fn schedule_event_at_unchecked<M, F, T, S>(
time: MonotonicTime,
func: F,
arg: T,
sender: Sender<M>,
scheduler_queue: &Mutex<SchedulerQueue>,
) where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let channel_id = sender.channel_id();
let event_dispatcher = Box::new(new_event_dispatcher(func, arg, sender));
let mut scheduler_queue = scheduler_queue.lock().unwrap();
scheduler_queue.insert((time, channel_id), event_dispatcher);
}
/// Schedules an event at a future time, returning an event key.
///
/// This method does not check whether the specified time lies in the future
/// of the current simulation time.
pub(crate) fn schedule_keyed_event_at_unchecked<M, F, T, S>(
time: MonotonicTime,
func: F,
arg: T,
sender: Sender<M>,
scheduler_queue: &Mutex<SchedulerQueue>,
) -> EventKey
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let event_key = EventKey::new();
let channel_id = sender.channel_id();
let event_dispatcher = Box::new(KeyedEventDispatcher::new(
event_key.clone(),
func,
arg,
sender,
));
let mut scheduler_queue = scheduler_queue.lock().unwrap();
scheduler_queue.insert((time, channel_id), event_dispatcher);
event_key
}
/// Schedules a periodic event at a future time.
///
/// This method does not check whether the specified time lies in the future
/// of the current simulation time.
pub(crate) fn schedule_periodic_event_at_unchecked<M, F, T, S>(
time: MonotonicTime,
period: Duration,
func: F,
arg: T,
sender: Sender<M>,
scheduler_queue: &Mutex<SchedulerQueue>,
) where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
let channel_id = sender.channel_id();
let event_dispatcher = Box::new(PeriodicEventDispatcher::new(func, arg, sender, period));
let mut scheduler_queue = scheduler_queue.lock().unwrap();
scheduler_queue.insert((time, channel_id), event_dispatcher);
}
/// Schedules an event at a future time, returning an event key.
///
/// This method does not check whether the specified time lies in the future
/// of the current simulation time.
pub(crate) fn schedule_periodic_keyed_event_at_unchecked<M, F, T, S>(
time: MonotonicTime,
period: Duration,
func: F,
arg: T,
sender: Sender<M>,
scheduler_queue: &Mutex<SchedulerQueue>,
) -> EventKey
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
let event_key = EventKey::new();
let channel_id = sender.channel_id();
let event_dispatcher = Box::new(PeriodicKeyedEventDispatcher::new(
event_key.clone(),
func,
arg,
sender,
period,
));
let mut scheduler_queue = scheduler_queue.lock().unwrap();
scheduler_queue.insert((time, channel_id), event_dispatcher);
event_key
}
/// Trait for objects that can be converted to a future dispatching a scheduled
/// event.
pub(crate) trait ScheduledEvent: Send {
/// Reports whether the associated event was cancelled.
fn is_cancelled(&self) -> bool;
/// Returns a boxed clone of this event and the repetition period if this is
/// a periodic even, otherwise returns `None`.
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)>;
/// Returns a boxed future dispatching the associated event.
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>>;
/// Spawns the future that dispatches the associated event onto the provided
/// executor.
///
/// This method is typically more efficient that spawning the boxed future
/// from `into_future` since it can directly spawn the unboxed future.
fn spawn_and_forget(self: Box<Self>, executor: &Executor);
}
pin_project! {
/// Object that can be converted to a future dispatching a non-cancellable
/// event.
///
/// Note that this particular event dispatcher is in fact already a future:
/// since the future cannot be cancelled and the dispatcher does not need to
/// be cloned, there is no need to defer the construction of the future.
/// This makes `into_future` a trivial cast, which saves a boxing operation.
pub(crate) struct EventDispatcher<F> {
#[pin]
fut: F,
}
}
/// Constructs a new `EventDispatcher`.
///
/// Due to some limitations of type inference or of my understanding of it, the
/// constructor for this event dispatchers is a freestanding function.
fn new_event_dispatcher<M, F, T, S>(
func: F,
arg: T,
sender: Sender<M>,
) -> EventDispatcher<impl Future<Output = ()>>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
let fut = dispatch_event(func, arg, sender);
EventDispatcher { fut }
}
impl<F> Future for EventDispatcher<F>
where
F: Future,
{
type Output = F::Output;
#[inline(always)]
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().fut.poll(cx)
}
}
impl<F> ScheduledEvent for EventDispatcher<F>
where
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
false
}
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
None
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
// No need for boxing, type coercion is enough here.
Box::into_pin(self)
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
executor.spawn_and_forget(*self);
}
}
/// Object that can be converted to a future dispatching a non-cancellable periodic
/// event.
pub(crate) struct PeriodicEventDispatcher<M, F, T, S>
where
M: Model,
{
func: F,
arg: T,
sender: Sender<M>,
period: Duration,
_input_kind: PhantomData<S>,
}
impl<M, F, T, S> PeriodicEventDispatcher<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
/// Constructs a new `PeriodicEventDispatcher`.
fn new(func: F, arg: T, sender: Sender<M>, period: Duration) -> Self {
Self {
func,
arg,
sender,
period,
_input_kind: PhantomData,
}
}
}
impl<M, F, T, S> ScheduledEvent for PeriodicEventDispatcher<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
fn is_cancelled(&self) -> bool {
false
}
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
let event = Box::new(Self::new(
self.func.clone(),
self.arg.clone(),
self.sender.clone(),
self.period,
));
Some((event, self.period))
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
let Self {
func, arg, sender, ..
} = *self;
Box::pin(dispatch_event(func, arg, sender))
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
let Self {
func, arg, sender, ..
} = *self;
let fut = dispatch_event(func, arg, sender);
executor.spawn_and_forget(fut);
}
}
/// Object that can be converted to a future dispatching a cancellable event.
pub(crate) struct KeyedEventDispatcher<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
event_key: EventKey,
func: F,
arg: T,
sender: Sender<M>,
_input_kind: PhantomData<S>,
}
impl<M, F, T, S> KeyedEventDispatcher<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
/// Constructs a new `KeyedEventDispatcher`.
fn new(event_key: EventKey, func: F, arg: T, sender: Sender<M>) -> Self {
Self {
event_key,
func,
arg,
sender,
_input_kind: PhantomData,
}
}
}
impl<M, F, T, S> ScheduledEvent for KeyedEventDispatcher<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
fn is_cancelled(&self) -> bool {
self.event_key.is_cancelled()
}
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
None
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
let Self {
event_key,
func,
arg,
sender,
..
} = *self;
Box::pin(dispatch_keyed_event(event_key, func, arg, sender))
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
let Self {
event_key,
func,
arg,
sender,
..
} = *self;
let fut = dispatch_keyed_event(event_key, func, arg, sender);
executor.spawn_and_forget(fut);
}
}
/// Object that can be converted to a future dispatching a cancellable event.
pub(crate) struct PeriodicKeyedEventDispatcher<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
event_key: EventKey,
func: F,
arg: T,
sender: Sender<M>,
period: Duration,
_input_kind: PhantomData<S>,
}
impl<M, F, T, S> PeriodicKeyedEventDispatcher<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
/// Constructs a new `KeyedEventDispatcher`.
fn new(event_key: EventKey, func: F, arg: T, sender: Sender<M>, period: Duration) -> Self {
Self {
event_key,
func,
arg,
sender,
period,
_input_kind: PhantomData,
}
}
}
impl<M, F, T, S> ScheduledEvent for PeriodicKeyedEventDispatcher<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
fn is_cancelled(&self) -> bool {
self.event_key.is_cancelled()
}
fn next(&self) -> Option<(Box<dyn ScheduledEvent>, Duration)> {
let event = Box::new(Self::new(
self.event_key.clone(),
self.func.clone(),
self.arg.clone(),
self.sender.clone(),
self.period,
));
Some((event, self.period))
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
let Self {
event_key,
func,
arg,
sender,
..
} = *self;
Box::pin(dispatch_keyed_event(event_key, func, arg, sender))
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
let Self {
event_key,
func,
arg,
sender,
..
} = *self;
let fut = dispatch_keyed_event(event_key, func, arg, sender);
executor.spawn_and_forget(fut);
}
}
/// Asynchronously dispatch a regular, non-cancellable event.
async fn dispatch_event<M, F, T, S>(func: F, arg: T, sender: Sender<M>)
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
let _ = sender
.send(
move |model: &mut M,
scheduler,
recycle_box: RecycleBox<()>|
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
},
)
.await;
}
/// Asynchronously dispatch a cancellable event.
async fn dispatch_keyed_event<M, F, T, S>(event_key: EventKey, func: F, arg: T, sender: Sender<M>)
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
let _ = sender
.send(
move |model: &mut M,
scheduler,
recycle_box: RecycleBox<()>|
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
let fut = async move {
// Only perform the call if the event wasn't cancelled.
if !event_key.is_cancelled() {
func.call(model, arg, scheduler).await;
}
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
},
)
.await;
}

View File

@ -1,7 +0,0 @@
pub(crate) mod bit;
pub(crate) mod futures;
pub(crate) mod priority_queue;
pub(crate) mod rng;
pub(crate) mod slot;
pub(crate) mod spsc_queue;
pub(crate) mod sync_cell;

View File

@ -1,393 +0,0 @@
//! Single-producer single-consumer unbounded FIFO queue that stores values in
//! fixed-size memory segments.
#![allow(unused)]
use std::cell::Cell;
use std::error::Error;
use std::fmt;
use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::panic::{RefUnwindSafe, UnwindSafe};
use std::ptr::{self, NonNull};
use std::sync::atomic::Ordering;
use crossbeam_utils::CachePadded;
use crate::loom_exports::cell::UnsafeCell;
use crate::loom_exports::sync::atomic::{AtomicBool, AtomicPtr};
use crate::loom_exports::sync::Arc;
/// The number of slots in a single segment.
const SEGMENT_LEN: usize = 32;
/// A slot containing a single value.
struct Slot<T> {
has_value: AtomicBool,
value: UnsafeCell<MaybeUninit<T>>,
}
impl<T> Default for Slot<T> {
fn default() -> Self {
Slot {
has_value: AtomicBool::new(false),
value: UnsafeCell::new(MaybeUninit::uninit()),
}
}
}
/// A memory segment containing `SEGMENT_LEN` slots.
struct Segment<T> {
/// Address of the next segment.
///
/// A null pointer means that the next segment is not allocated yet.
next_segment: AtomicPtr<Segment<T>>,
data: [Slot<T>; SEGMENT_LEN],
}
impl<T> Segment<T> {
/// Allocates a new segment.
fn allocate_new() -> NonNull<Self> {
let segment = Self {
next_segment: AtomicPtr::new(ptr::null_mut()),
data: Default::default(),
};
// Safety: the pointer is non-null since it comes from a box.
unsafe { NonNull::new_unchecked(Box::into_raw(Box::new(segment))) }
}
}
/// The head of the queue from which values are popped.
struct Head<T> {
/// Pointer to the segment at the head of the queue.
segment: NonNull<Segment<T>>,
/// Index of the next value to be read.
///
/// If the index is equal to the segment length, it is necessary to move to
/// the next segment before the next value can be read.
next_read_idx: usize,
}
/// The tail of the queue to which values are pushed.
struct Tail<T> {
/// Pointer to the segment at the tail of the queue.
segment: NonNull<Segment<T>>,
/// Index of the next value to be written.
///
/// If the index is equal to the segment length, a new segment must be
/// allocated before a new value can be written.
next_write_idx: usize,
}
/// A single-producer, single-consumer unbounded FIFO queue.
struct Queue<T> {
head: CachePadded<UnsafeCell<Head<T>>>,
tail: CachePadded<UnsafeCell<Tail<T>>>,
}
impl<T> Queue<T> {
/// Creates a new queue.
fn new() -> Self {
let segment = Segment::allocate_new();
let head = Head {
segment,
next_read_idx: 0,
};
let tail = Tail {
segment,
next_write_idx: 0,
};
Self {
head: CachePadded::new(UnsafeCell::new(head)),
tail: CachePadded::new(UnsafeCell::new(tail)),
}
}
/// Pushes a new value.
///
/// # Safety
///
/// The method cannot be called from multiple threads concurrently.
unsafe fn push(&self, value: T) {
// Safety: this is the only thread accessing the tail.
let tail = self.tail.with_mut(|p| &mut *p);
// If the whole segment has been written, allocate a new segment.
if tail.next_write_idx == SEGMENT_LEN {
let old_segment = tail.segment;
tail.segment = Segment::allocate_new();
// Safety: the old segment is still allocated since the consumer
// cannot deallocate it before `next_segment` is set to a non-null
// value.
old_segment
.as_ref()
.next_segment
.store(tail.segment.as_ptr(), Ordering::Release);
tail.next_write_idx = 0;
}
// Safety: the tail segment is allocated since the consumer cannot
// deallocate it before `next_segment` is set to a non-null value.
let data = &tail.segment.as_ref().data[tail.next_write_idx];
// Safety: we have exclusive access to the slot value since the consumer
// cannot access it before `has_value` is set to true.
data.value.with_mut(|p| (*p).write(value));
// Ordering: this Release store synchronizes with the Acquire load in
// `pop` and ensures that the value is visible to the consumer once
// `has_value` reads `true`.
data.has_value.store(true, Ordering::Release);
tail.next_write_idx += 1;
}
/// Pops a new value.
///
/// # Safety
///
/// The method cannot be called from multiple threads concurrently.
unsafe fn pop(&self) -> Option<T> {
// Safety: this is the only thread accessing the head.
let head = self.head.with_mut(|p| &mut *p);
// If the whole segment has been read, try to move to the next segment.
if head.next_read_idx == SEGMENT_LEN {
// Read the next segment or return `None` if it is not ready yet.
//
// Safety: the head segment is still allocated since we are the only
// thread that can deallocate it.
let next_segment = head.segment.as_ref().next_segment.load(Ordering::Acquire);
let next_segment = NonNull::new(next_segment)?;
// Deallocate the old segment.
//
// Safety: the pointer was initialized from a box and the segment is
// still allocated since we are the only thread that can deallocate
// it.
let _ = Box::from_raw(head.segment.as_ptr());
// Update the segment and the next index.
head.segment = next_segment;
head.next_read_idx = 0;
}
let data = &head.segment.as_ref().data[head.next_read_idx];
// Ordering: this Acquire load synchronizes with the Release store in
// `push` and ensures that the value is visible once `has_value` reads
// `true`.
if !data.has_value.load(Ordering::Acquire) {
return None;
}
// Safety: since `has_value` is `true` then we have exclusive ownership
// of the value and we know that it was initialized.
let value = data.value.with(|p| (*p).assume_init_read());
head.next_read_idx += 1;
Some(value)
}
}
impl<T> Drop for Queue<T> {
fn drop(&mut self) {
unsafe {
// Drop all values.
while self.pop().is_some() {}
// All values have been dropped: the last segment can be freed.
// Safety: this is the only thread accessing the head since both the
// consumer and producer have been dropped.
let head = self.head.with_mut(|p| &mut *p);
// Safety: the pointer was initialized from a box and the segment is
// still allocated since we are the only thread that can deallocate
// it.
let _ = Box::from_raw(head.segment.as_ptr());
}
}
}
unsafe impl<T: Send> Send for Queue<T> {}
unsafe impl<T: Send> Sync for Queue<T> {}
impl<T> UnwindSafe for Queue<T> {}
impl<T> RefUnwindSafe for Queue<T> {}
/// A handle to a single-producer, single-consumer queue that can push values.
pub(crate) struct Producer<T> {
queue: Arc<Queue<T>>,
_non_sync_phantom: PhantomData<Cell<()>>,
}
impl<T> Producer<T> {
/// Pushes a value to the queue.
pub(crate) fn push(&self, value: T) -> Result<(), PushError> {
if Arc::strong_count(&self.queue) == 1 {
return Err(PushError {});
}
unsafe { self.queue.push(value) };
Ok(())
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
/// Error returned when a push failed due to the consumer being dropped.
pub(crate) struct PushError {}
impl fmt::Display for PushError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "sending message into a closed mailbox")
}
}
impl Error for PushError {}
/// A handle to a single-producer, single-consumer queue that can pop values.
pub(crate) struct Consumer<T> {
queue: Arc<Queue<T>>,
_non_sync_phantom: PhantomData<Cell<()>>,
}
impl<T> Consumer<T> {
/// Pops a value from the queue.
pub(crate) fn pop(&self) -> Option<T> {
unsafe { self.queue.pop() }
}
}
/// Creates the producer and consumer handles of a single-producer,
/// single-consumer queue.
pub(crate) fn spsc_queue<T>() -> (Producer<T>, Consumer<T>) {
let queue = Arc::new(Queue::new());
let producer = Producer {
queue: queue.clone(),
_non_sync_phantom: PhantomData,
};
let consumer = Consumer {
queue,
_non_sync_phantom: PhantomData,
};
(producer, consumer)
}
/// Loom tests.
#[cfg(all(test, not(asynchronix_loom)))]
mod tests {
use super::*;
use std::thread;
#[test]
fn spsc_queue_basic() {
const VALUE_COUNT: usize = if cfg!(miri) { 1000 } else { 100_000 };
let (producer, consumer) = spsc_queue();
let th = thread::spawn(move || {
for i in 0..VALUE_COUNT {
let value = loop {
if let Some(v) = consumer.pop() {
break v;
}
};
assert_eq!(value, i);
}
});
for i in 0..VALUE_COUNT {
producer.push(i).unwrap();
}
th.join().unwrap();
}
}
/// Loom tests.
#[cfg(all(test, asynchronix_loom))]
mod tests {
use super::*;
use loom::model::Builder;
use loom::thread;
#[test]
fn loom_spsc_queue_basic() {
const DEFAULT_PREEMPTION_BOUND: usize = 4;
const VALUE_COUNT: usize = 10;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (producer, consumer) = spsc_queue();
let th = thread::spawn(move || {
let mut value = 0;
for _ in 0..VALUE_COUNT {
if let Some(v) = consumer.pop() {
assert_eq!(v, value);
value += 1;
}
}
});
for i in 0..VALUE_COUNT {
let _ = producer.push(i);
}
th.join().unwrap();
});
}
#[test]
fn loom_spsc_queue_new_segment() {
const DEFAULT_PREEMPTION_BOUND: usize = 4;
const VALUE_COUNT_BEFORE: usize = 5;
const VALUE_COUNT_AFTER: usize = 5;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (producer, consumer) = spsc_queue();
// Fill up the first segment except for the last `VALUE_COUNT_BEFORE` slots.
for i in 0..(SEGMENT_LEN - VALUE_COUNT_BEFORE) {
producer.push(i).unwrap();
consumer.pop();
}
let th = thread::spawn(move || {
let mut value = SEGMENT_LEN - VALUE_COUNT_BEFORE;
for _ in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) {
if let Some(v) = consumer.pop() {
assert_eq!(v, value);
value += 1;
}
}
});
for i in (SEGMENT_LEN - VALUE_COUNT_BEFORE)..(SEGMENT_LEN + VALUE_COUNT_AFTER) {
let _ = producer.push(i);
}
th.join().unwrap();
});
}
}

View File

@ -1,225 +0,0 @@
//! Event scheduling within `Model` input methods.
use std::time::Duration;
use asynchronix::model::{Model, Output};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
#[test]
fn model_schedule_event() {
#[derive(Default)]
struct TestModel {
output: Output<()>,
}
impl TestModel {
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
scheduler
.schedule_event(scheduler.time() + Duration::from_secs(2), Self::action, ())
.unwrap();
}
async fn action(&mut self) {
self.output.send(()).await;
}
}
impl Model for TestModel {}
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
assert!(output.next().is_some());
simu.step();
assert!(output.next().is_none());
}
#[test]
fn model_cancel_future_keyed_event() {
#[derive(Default)]
struct TestModel {
output: Output<i32>,
key: Option<EventKey>,
}
impl TestModel {
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
scheduler
.schedule_event(scheduler.time() + Duration::from_secs(1), Self::action1, ())
.unwrap();
self.key = scheduler
.schedule_keyed_event(scheduler.time() + Duration::from_secs(2), Self::action2, ())
.ok();
}
async fn action1(&mut self) {
self.output.send(1).await;
// Cancel the call to `action2`.
self.key.take().unwrap().cancel();
}
async fn action2(&mut self) {
self.output.send(2).await;
}
}
impl Model for TestModel {}
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(1));
assert_eq!(output.next(), Some(1));
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(1));
assert!(output.next().is_none());
}
#[test]
fn model_cancel_same_time_keyed_event() {
#[derive(Default)]
struct TestModel {
output: Output<i32>,
key: Option<EventKey>,
}
impl TestModel {
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
scheduler
.schedule_event(scheduler.time() + Duration::from_secs(2), Self::action1, ())
.unwrap();
self.key = scheduler
.schedule_keyed_event(scheduler.time() + Duration::from_secs(2), Self::action2, ())
.ok();
}
async fn action1(&mut self) {
self.output.send(1).await;
// Cancel the call to `action2`.
self.key.take().unwrap().cancel();
}
async fn action2(&mut self) {
self.output.send(2).await;
}
}
impl Model for TestModel {}
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
assert_eq!(output.next(), Some(1));
assert!(output.next().is_none());
simu.step();
assert!(output.next().is_none());
}
#[test]
fn model_schedule_periodic_event() {
#[derive(Default)]
struct TestModel {
output: Output<i32>,
}
impl TestModel {
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
scheduler
.schedule_periodic_event(
scheduler.time() + Duration::from_secs(2),
Duration::from_secs(3),
Self::action,
42,
)
.unwrap();
}
async fn action(&mut self, payload: i32) {
self.output.send(payload).await;
}
}
impl Model for TestModel {}
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
// Move to the next events at t0 + 2s + k*3s.
for k in 0..10 {
simu.step();
assert_eq!(
simu.time(),
t0 + Duration::from_secs(2) + k * Duration::from_secs(3)
);
assert_eq!(output.next(), Some(42));
assert!(output.next().is_none());
}
}
#[test]
fn model_cancel_periodic_event() {
#[derive(Default)]
struct TestModel {
output: Output<()>,
key: Option<EventKey>,
}
impl TestModel {
fn trigger(&mut self, _: (), scheduler: &Scheduler<Self>) {
self.key = scheduler
.schedule_keyed_periodic_event(
scheduler.time() + Duration::from_secs(2),
Duration::from_secs(3),
Self::action,
(),
)
.ok();
}
async fn action(&mut self) {
self.output.send(()).await;
// Cancel the next events.
self.key.take().unwrap().cancel();
}
}
impl Model for TestModel {}
let mut model = TestModel::default();
let mbox = Mailbox::new();
let mut output = model.output.connect_stream().0;
let addr = mbox.address();
let t0 = MonotonicTime::EPOCH;
let mut simu = SimInit::new().add_model(model, mbox).init(t0);
simu.send_event(TestModel::trigger, (), addr);
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
assert!(output.next().is_some());
assert!(output.next().is_none());
simu.step();
assert_eq!(simu.time(), t0 + Duration::from_secs(2));
assert!(output.next().is_none());
}

View File

@ -1,4 +0,0 @@
#[cfg(not(asynchronix_loom))]
mod model_scheduling;
#[cfg(not(asynchronix_loom))]
mod simulation_scheduling;

10
nexosim-util/Cargo.toml Normal file
View File

@ -0,0 +1,10 @@
[package]
name = "nexosim-util"
version = "0.1.0"
edition = "2021"
[dependencies]
nexosim = { version = "0.3.1", path = "../nexosim" }
[dev-dependencies]
rand = "0.8"

5
nexosim-util/README.md Normal file
View File

@ -0,0 +1,5 @@
# Utilities for model-building
This crate contains utilities used for model and simulation bench development.

View File

@ -0,0 +1,304 @@
//! Example: processor with observable states.
//!
//! This example demonstrates in particular:
//!
//! * the use of observable states,
//! * state machine with delays.
//!
//! ```text
//! ┌───────────┐
//! Switch ON/OFF ●────►│ ├────► Mode
//! │ Processor │
//! Process data ●────►│ ├────► Value
//! │ │
//! │ ├────► House Keeping
//! └───────────┘
//! ```
use std::time::Duration;
use nexosim::model::{Context, InitializedModel, Model};
use nexosim::ports::{EventBuffer, Output};
use nexosim::simulation::{AutoActionKey, Mailbox, SimInit, SimulationError};
use nexosim::time::MonotonicTime;
use nexosim_util::observables::{Observable, ObservableState, ObservableValue};
/// House keeping TM.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Hk {
pub voltage: f64,
pub current: f64,
}
impl Default for Hk {
fn default() -> Self {
Self {
voltage: 0.0,
current: 0.0,
}
}
}
/// Processor mode ID.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub enum ModeId {
#[default]
Off,
Idle,
Processing,
}
/// Processor state.
#[derive(Default)]
pub enum State {
#[default]
Off,
Idle,
Processing(AutoActionKey),
}
impl Observable<ModeId> for State {
fn observe(&self) -> ModeId {
match *self {
State::Off => ModeId::Off,
State::Idle => ModeId::Idle,
State::Processing(_) => ModeId::Processing,
}
}
}
/// Processor model.
pub struct Processor {
/// Mode output.
pub mode: Output<ModeId>,
/// Calculated value output.
pub value: Output<u16>,
/// HK output.
pub hk: Output<Hk>,
/// Internal state.
state: ObservableState<State, ModeId>,
/// Accumulator.
acc: ObservableValue<u16>,
/// Electrical data.
elc: ObservableValue<Hk>,
}
impl Processor {
/// Create a new processor.
pub fn new() -> Self {
let mode = Output::new();
let value = Output::new();
let hk = Output::new();
Self {
mode: mode.clone(),
value: value.clone(),
hk: hk.clone(),
state: ObservableState::new(mode),
acc: ObservableValue::new(value),
elc: ObservableValue::new(hk),
}
}
/// Switch processor ON/OFF.
pub async fn switch_power(&mut self, on: bool) {
if on {
self.state.set(State::Idle).await;
self.elc
.set(Hk {
voltage: 5.5,
current: 0.1,
})
.await;
self.acc.set(0).await;
} else {
self.state.set(State::Off).await;
self.elc.set(Hk::default()).await;
self.acc.set(0).await;
}
}
/// Process data for dt milliseconds.
pub async fn process(&mut self, dt: u64, cx: &mut Context<Self>) {
if matches!(self.state.observe(), ModeId::Idle | ModeId::Processing) {
self.state
.set(State::Processing(
cx.schedule_keyed_event(Duration::from_millis(dt), Self::finish_processing, ())
.unwrap()
.into_auto(),
))
.await;
self.elc.modify(|hk| hk.current = 1.0).await;
}
}
/// Finish processing.
async fn finish_processing(&mut self) {
self.state.set(State::Idle).await;
self.acc.modify(|a| *a += 1).await;
self.elc.modify(|hk| hk.current = 0.1).await;
}
}
impl Model for Processor {
/// Propagate all internal states.
async fn init(mut self, _: &mut Context<Self>) -> InitializedModel<Self> {
self.state.propagate().await;
self.acc.propagate().await;
self.elc.propagate().await;
self.into()
}
}
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
// Models.
let mut proc = Processor::new();
// Mailboxes.
let proc_mbox = Mailbox::new();
// Model handles for simulation.
let mut mode = EventBuffer::new();
let mut value = EventBuffer::new();
let mut hk = EventBuffer::new();
proc.mode.connect_sink(&mode);
proc.value.connect_sink(&value);
proc.hk.connect_sink(&hk);
let proc_addr = proc_mbox.address();
// Start time (arbitrary since models do not depend on absolute time).
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let mut simu = SimInit::new()
.add_model(proc, proc_mbox, "proc")
.init(t0)?
.0;
// ----------
// Simulation.
// ----------
// Initial state.
expect(
&mut mode,
Some(ModeId::Off),
&mut value,
Some(0),
&mut hk,
0.0,
0.0,
);
// Switch processor on.
simu.process_event(Processor::switch_power, true, &proc_addr)?;
expect(
&mut mode,
Some(ModeId::Idle),
&mut value,
Some(0),
&mut hk,
5.5,
0.1,
);
// Trigger processing.
simu.process_event(Processor::process, 100, &proc_addr)?;
expect(
&mut mode,
Some(ModeId::Processing),
&mut value,
None,
&mut hk,
5.5,
1.0,
);
// All data processed.
simu.step_until(Duration::from_millis(101))?;
expect(
&mut mode,
Some(ModeId::Idle),
&mut value,
Some(1),
&mut hk,
5.5,
0.1,
);
// Trigger long processing.
simu.process_event(Processor::process, 100, &proc_addr)?;
expect(
&mut mode,
Some(ModeId::Processing),
&mut value,
None,
&mut hk,
5.5,
1.0,
);
// Trigger short processing, it cancels the previous one.
simu.process_event(Processor::process, 10, &proc_addr)?;
expect(
&mut mode,
Some(ModeId::Processing),
&mut value,
None,
&mut hk,
5.5,
1.0,
);
// Wait for short processing to finish, check results.
simu.step_until(Duration::from_millis(11))?;
expect(
&mut mode,
Some(ModeId::Idle),
&mut value,
Some(2),
&mut hk,
5.5,
0.1,
);
// Wait long enough, no state change as the long processing has been
// cancelled.
simu.step_until(Duration::from_millis(100))?;
assert_eq!(mode.next(), None);
assert_eq!(value.next(), None);
assert_eq!(hk.next(), None);
Ok(())
}
// Check observable state.
fn expect(
mode: &mut EventBuffer<ModeId>,
mode_ex: Option<ModeId>,
value: &mut EventBuffer<u16>,
value_ex: Option<u16>,
hk: &mut EventBuffer<Hk>,
voltage_ex: f64,
current_ex: f64,
) {
assert_eq!(mode.next(), mode_ex);
assert_eq!(value.next(), value_ex);
let hk_value = hk.next().unwrap();
assert!(same(hk_value.voltage, voltage_ex));
assert!(same(hk_value.current, current_ex));
}
// Compare two voltages or currents.
fn same(a: f64, b: f64) -> bool {
(a - b).abs() < 1e-12
}

View File

@ -0,0 +1,276 @@
//! Example: RIU acquiring data from sensor.
//!
//! This example demonstrates in particular:
//!
//! * the use of replier port adaptor,
//! * periodic model self-scheduling.
//!
//! ```text
//! ┌────────┐ ┌─────────┐ Sensor TC ┌─────┐
//! Set temperature ●────►│ │ ◄Sensor TC │ │◄────────────┤ │
//! │ Sensor │◄►────────────►◄│ Adaptor │ Sensor TM │ RIU ├────► RIU TM
//! Set illuminance ●────►│ │ Sensor TM► │ ├────────────►│ │
//! └────────┘ └─────────┘ └─────┘
//! ```
use std::fmt::Debug;
use std::time::Duration;
use nexosim::model::{Context, InitializedModel, Model};
use nexosim::ports::{EventBuffer, Output};
use nexosim::simulation::{Mailbox, SimInit, SimulationError};
use nexosim::time::MonotonicTime;
use nexosim_util::combinators::ReplierAdaptor;
const DELTA: Duration = Duration::from_millis(2);
const PERIOD: Duration = Duration::from_secs(1);
/// Sensor TC.
#[derive(Clone, Debug, PartialEq)]
pub enum SensorTc {
GetTemp,
GetIllum,
}
/// Sensor TM.
#[derive(Clone, Debug, PartialEq)]
pub enum SensorTm {
Temp(f64),
Illum(f64),
}
/// Sensor model.
pub struct Sensor {
/// Temperature [deg C] -- internal state.
temp: f64,
/// Illuminance [lx] -- internal state.
illum: f64,
}
impl Sensor {
/// Creates a sensor model.
pub fn new() -> Self {
Self {
temp: 0.0,
illum: 0.0,
}
}
/// Sets sensor temperature [deg C].
pub async fn set_temp(&mut self, temp: f64) {
self.temp = temp;
}
/// Sets sensor illuminance [lx].
pub async fn set_illum(&mut self, illum: f64) {
self.illum = illum;
}
/// Processes sensor TC -- input port.
pub async fn process_tc(&mut self, tc: SensorTc) -> SensorTm {
match tc {
SensorTc::GetTemp => SensorTm::Temp(self.temp),
SensorTc::GetIllum => SensorTm::Illum(self.illum),
}
}
}
impl Model for Sensor {}
/// Internal TM field.
#[derive(Clone, Debug, PartialEq)]
pub struct TmField<T>
where
T: Clone + Debug + PartialEq,
{
/// TM value.
pub value: T,
/// TM readiness flag.
pub ready: bool,
}
/// RIU TM.
#[derive(Clone, Debug, PartialEq)]
pub struct RiuTm {
/// Temperature [deg C].
temp: f64,
/// Iluminance [lx].
illum: f64,
}
/// RIU model.
pub struct Riu {
/// Sensor TC -- output port.
pub sensor_tc: Output<SensorTc>,
/// RIU TM -- output port.
pub tm: Output<RiuTm>,
/// Temperature [deg C] -- internal state.
temp: TmField<f64>,
/// Illuminance [lx] -- internal state.
illum: TmField<f64>,
}
impl Riu {
/// Creates an RIU model.
pub fn new() -> Self {
Self {
sensor_tc: Output::new(),
tm: Output::new(),
temp: TmField {
value: 0.0,
ready: true,
},
illum: TmField {
value: 0.0,
ready: true,
},
}
}
/// Processes sensor TM -- input port.
pub async fn sensor_tm(&mut self, tm: SensorTm) {
match tm {
SensorTm::Temp(temp) => {
self.temp = TmField {
value: temp,
ready: true,
}
}
SensorTm::Illum(illum) => {
self.illum = TmField {
value: illum,
ready: true,
}
}
}
if self.temp.ready && self.illum.ready {
self.report().await
}
}
/// Starts sensor TM acquisition -- periodic activity.
async fn acquire(&mut self) {
self.temp.ready = false;
self.illum.ready = false;
self.sensor_tc.send(SensorTc::GetTemp).await;
self.sensor_tc.send(SensorTc::GetIllum).await
}
/// Reports RIU TM.
async fn report(&mut self) {
self.tm
.send(RiuTm {
temp: self.temp.value,
illum: self.illum.value,
})
.await
}
}
impl Model for Riu {
/// Initializes model.
async fn init(self, cx: &mut Context<Self>) -> InitializedModel<Self> {
// Schedule periodic acquisition.
cx.schedule_periodic_event(DELTA, PERIOD, Riu::acquire, ())
.unwrap();
self.into()
}
}
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
// Models.
let sensor = Sensor::new();
let mut riu = Riu::new();
let mut sensor_adaptor = ReplierAdaptor::new();
// Mailboxes.
let sensor_mbox = Mailbox::new();
let riu_mbox = Mailbox::new();
let sensor_adaptor_mbox = Mailbox::new();
// Connections.
riu.sensor_tc
.connect(ReplierAdaptor::input, &sensor_adaptor_mbox);
sensor_adaptor.output.connect(Riu::sensor_tm, &riu_mbox);
sensor_adaptor
.requestor
.connect(Sensor::process_tc, &sensor_mbox);
// Model handles for simulation.
let mut tm = EventBuffer::new();
let sensor_addr = sensor_mbox.address();
riu.tm.connect_sink(&tm);
// Start time (arbitrary since models do not depend on absolute time).
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let mut simu = SimInit::new()
.add_model(sensor, sensor_mbox, "sensor")
.add_model(riu, riu_mbox, "riu")
.add_model(sensor_adaptor, sensor_adaptor_mbox, "sensor_adaptor")
.init(t0)?
.0;
// ----------
// Simulation.
// ----------
// Initial state: no RIU TM.
assert_eq!(tm.next(), None);
simu.step_until(Duration::from_millis(1200))?;
// RIU TM generated.
assert_eq!(
tm.next(),
Some(RiuTm {
temp: 0.0,
illum: 0.0
})
);
// Consume all RIU TM generated so far.
while tm.next().is_some() {}
// Set temperature and wait for RIU TM.
simu.process_event(Sensor::set_temp, 2.0, &sensor_addr)?;
simu.step_until(Duration::from_millis(1000))?;
assert_eq!(
tm.next(),
Some(RiuTm {
temp: 2.0,
illum: 0.0
})
);
// Set illuminance and wait for RIU TM.
simu.process_event(Sensor::set_illum, 3.0, &sensor_addr)?;
simu.step_until(Duration::from_millis(1000))?;
assert_eq!(
tm.next(),
Some(RiuTm {
temp: 2.0,
illum: 3.0
})
);
Ok(())
}

View File

@ -0,0 +1,274 @@
//! Example: a simulation that runs infinitely until stopped. This setup is
//! typical for hardware-in-the-loop use case. The test scenario is driven by
//! simulation events.
//!
//! This example demonstrates in particular:
//!
//! * infinite simulation,
//! * blocking event queue,
//! * simulation halting,
//! * system clock,
//! * periodic scheduling,
//! * observable state.
//!
//! ```text
//! ┏━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┓
//! ┃ Simulation ┃
//! ┃ ┌──────────┐ ┌──────────┐mode ┃
//! ┃ │ │pulses │ ├──────╂┐BlockingEventQueue
//! ┃ │ Detector ├──────►│ Counter │count ┃├───────────────────►
//! ┃ │ │ │ ├──────╂┘
//! ┃ └──────────┘ └──────────┘ ┃
//! ┗━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┛
//! ```
use std::future::Future;
use std::thread;
use std::time::Duration;
use rand::Rng;
use nexosim::model::{Context, Model};
use nexosim::ports::{BlockingEventQueue, Output};
use nexosim::simulation::{ActionKey, ExecutionError, Mailbox, SimInit, SimulationError};
use nexosim::time::{AutoSystemClock, MonotonicTime};
use nexosim_util::helper_models::Ticker;
use nexosim_util::observables::ObservableValue;
const SWITCH_ON_DELAY: Duration = Duration::from_secs(1);
const MAX_PULSE_PERIOD: u64 = 100;
const TICK: Duration = Duration::from_millis(100);
const N: u64 = 10;
/// Counter mode.
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub enum Mode {
#[default]
Off,
On,
}
/// Simulation event.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum Event {
Mode(Mode),
Count(u64),
}
/// The `Counter` Model.
pub struct Counter {
/// Operation mode.
pub mode: Output<Mode>,
/// Pulses count.
pub count: Output<u64>,
/// Internal state.
state: ObservableValue<Mode>,
/// Counter.
acc: ObservableValue<u64>,
}
impl Counter {
/// Creates a new `Counter` model.
fn new() -> Self {
let mode = Output::default();
let count = Output::default();
Self {
mode: mode.clone(),
count: count.clone(),
state: ObservableValue::new(mode),
acc: ObservableValue::new(count),
}
}
/// Power -- input port.
pub async fn power_in(&mut self, on: bool, cx: &mut Context<Self>) {
match *self.state {
Mode::Off if on => cx
.schedule_event(SWITCH_ON_DELAY, Self::switch_on, ())
.unwrap(),
Mode::On if !on => self.switch_off().await,
_ => (),
};
}
/// Pulse -- input port.
pub async fn pulse(&mut self) {
self.acc.modify(|x| *x += 1).await;
}
/// Switches `Counter` on.
async fn switch_on(&mut self) {
self.state.set(Mode::On).await;
}
/// Switches `Counter` off.
async fn switch_off(&mut self) {
self.state.set(Mode::Off).await;
}
}
impl Model for Counter {}
/// Detector model that produces pulses.
pub struct Detector {
/// Output pulse.
pub pulse: Output<()>,
/// `ActionKey` of the next scheduled detection.
next: Option<ActionKey>,
}
impl Detector {
/// Creates a new `Detector` model.
pub fn new() -> Self {
Self {
pulse: Output::default(),
next: None,
}
}
/// Switches `Detector` on -- input port.
pub async fn switch_on(&mut self, _: (), cx: &mut Context<Self>) {
self.schedule_next(cx).await;
}
/// Switches `Detector` off -- input port.
pub async fn switch_off(&mut self) {
self.next = None;
}
/// Generates a pulse.
///
/// Note: self-scheduling async methods must be for now defined with an
/// explicit signature instead of `async fn` due to a rustc issue.
fn pulse<'a>(
&'a mut self,
_: (),
cx: &'a mut Context<Self>,
) -> impl Future<Output = ()> + Send + 'a {
async move {
self.pulse.send(()).await;
self.schedule_next(cx).await;
}
}
/// Schedules the next detection.
async fn schedule_next(&mut self, cx: &mut Context<Self>) {
let next = {
let mut rng = rand::thread_rng();
rng.gen_range(1..MAX_PULSE_PERIOD)
};
self.next = Some(
cx.schedule_keyed_event(Duration::from_millis(next), Self::pulse, ())
.unwrap(),
);
}
}
impl Model for Detector {}
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
// Models.
// The detector model that produces pulses.
let mut detector = Detector::new();
// The counter model.
let mut counter = Counter::new();
// The ticker model that keeps simulation alive.
let ticker = Ticker::new(TICK);
// Mailboxes.
let detector_mbox = Mailbox::new();
let counter_mbox = Mailbox::new();
let ticker_mbox = Mailbox::new();
// Connections.
detector.pulse.connect(Counter::pulse, &counter_mbox);
// Model handles for simulation.
let detector_addr = detector_mbox.address();
let counter_addr = counter_mbox.address();
let observer = BlockingEventQueue::new();
counter
.mode
.map_connect_sink(|m| Event::Mode(*m), &observer);
counter
.count
.map_connect_sink(|c| Event::Count(*c), &observer);
let mut observer = observer.into_reader();
// Start time (arbitrary since models do not depend on absolute time).
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let (mut simu, mut scheduler) = SimInit::new()
.add_model(detector, detector_mbox, "detector")
.add_model(counter, counter_mbox, "counter")
.add_model(ticker, ticker_mbox, "ticker")
.set_clock(AutoSystemClock::new())
.init(t0)?;
// Simulation thread.
let simulation_handle = thread::spawn(move || {
// ---------- Simulation. ----------
// Infinitely kept alive by the ticker model until halted.
simu.step_unbounded()
});
// Switch the counter on.
scheduler.schedule_event(
Duration::from_millis(1),
Counter::power_in,
true,
counter_addr,
)?;
// Wait until counter mode is `On`.
loop {
let event = observer.next();
match event {
Some(Event::Mode(Mode::On)) => {
break;
}
None => panic!("Simulation exited unexpectedly"),
_ => (),
}
}
// Switch the detector on.
scheduler.schedule_event(
Duration::from_millis(100),
Detector::switch_on,
(),
detector_addr,
)?;
// Wait until `N` detections.
loop {
let event = observer.next();
match event {
Some(Event::Count(c)) if c >= N => {
break;
}
None => panic!("Simulation exited unexpectedly"),
_ => (),
}
}
// Stop the simulation.
scheduler.halt();
match simulation_handle.join().unwrap() {
Err(ExecutionError::Halted) => Ok(()),
Err(e) => Err(e.into()),
_ => Ok(()),
}
}

View File

@ -0,0 +1,174 @@
//! Example: sensor reading data from environment model.
//!
//! This example demonstrates in particular:
//!
//! * cyclical self-scheduling methods,
//! * model initialization,
//! * simulation monitoring with buffered event sinks,
//! * connection with mapping,
//! * UniRequestor port.
//!
//! ```text
//! ┌─────────────┐ ┌──────────┐
//! │ │ temperature │ │ overheat
//! Temperature ●─────────►│ Environment ├──────────────►│ Sensor ├──────────►
//! │ │ │ │
//! └─────────────┘ └──────────┘
//! ```
use std::time::Duration;
use nexosim_util::observables::ObservableValue;
use nexosim::model::{Context, InitializedModel, Model};
use nexosim::ports::{EventBuffer, Output, UniRequestor};
use nexosim::simulation::{Mailbox, SimInit, SimulationError};
use nexosim::time::MonotonicTime;
/// Sensor model
pub struct Sensor {
/// Temperature [deg C] -- requestor port.
pub temp: UniRequestor<(), f64>,
/// Overheat detection [-] -- output port.
pub overheat: Output<bool>,
/// Temperature threshold [deg C] -- parameter.
threshold: f64,
/// Overheat detection [-] -- observable state.
oh: ObservableValue<bool>,
}
impl Sensor {
/// Creates new Sensor with overheat threshold set [deg C].
pub fn new(threshold: f64, temp: UniRequestor<(), f64>) -> Self {
let overheat = Output::new();
Self {
temp,
overheat: overheat.clone(),
threshold,
oh: ObservableValue::new(overheat),
}
}
/// Cyclically scheduled method that reads data from environment and
/// avaluates overheat state.
pub async fn tick(&mut self) {
let temp = self.temp.send(()).await.unwrap();
if temp > self.threshold {
if !self.oh.get() {
self.oh.set(true).await;
}
} else if *self.oh.get() {
self.oh.set(false).await;
}
}
}
impl Model for Sensor {
/// Propagate state and schedule cyclic method.
async fn init(mut self, context: &mut Context<Self>) -> InitializedModel<Self> {
self.oh.propagate().await;
context
.schedule_periodic_event(
Duration::from_millis(500),
Duration::from_millis(500),
Self::tick,
(),
)
.unwrap();
self.into()
}
}
/// Environment model.
pub struct Env {
/// Temperature [deg F] -- internal state.
temp: f64,
}
impl Env {
/// Creates new environment model with the temperature [deg F] set.
pub fn new(temp: f64) -> Self {
Self { temp }
}
/// Sets temperature [deg F].
pub async fn set_temp(&mut self, temp: f64) {
self.temp = temp;
}
/// Gets temperature [deg F].
pub async fn get_temp(&mut self, _: ()) -> f64 {
self.temp
}
}
impl Model for Env {}
/// Converts Fahrenheit to Celsius.
pub fn fahr_to_cels(t: f64) -> f64 {
5.0 * (t - 32.0) / 9.0
}
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
// Mailboxes.
let sensor_mbox = Mailbox::new();
let env_mbox = Mailbox::new();
// Connect data line and convert Fahrenheit degrees to Celsius.
let temp_req = UniRequestor::with_map(|x| *x, fahr_to_cels, Env::get_temp, &env_mbox);
// Models.
let mut sensor = Sensor::new(100.0, temp_req);
let env = Env::new(0.0);
// Model handles for simulation.
let env_addr = env_mbox.address();
let mut overheat = EventBuffer::new();
sensor.overheat.connect_sink(&overheat);
// Start time (arbitrary since models do not depend on absolute time).
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let (mut simu, scheduler) = SimInit::new()
.add_model(sensor, sensor_mbox, "sensor")
.add_model(env, env_mbox, "env")
.init(t0)?;
// ----------
// Simulation.
// ----------
// Check initial conditions.
assert_eq!(simu.time(), t0);
assert_eq!(overheat.next(), Some(false));
assert!(overheat.next().is_none());
// Change temperature in 2s.
scheduler
.schedule_event(Duration::from_secs(2), Env::set_temp, 105.0, &env_addr)
.unwrap();
// Change temperature in 4s.
scheduler
.schedule_event(Duration::from_secs(4), Env::set_temp, 213.0, &env_addr)
.unwrap();
simu.step_until(Duration::from_secs(3))?;
assert!(overheat.next().is_none());
simu.step_until(Duration::from_secs(5))?;
assert_eq!(overheat.next(), Some(true));
Ok(())
}

View File

@ -0,0 +1,47 @@
//! Connector combinators.
//!
//! This module contains combinator types useful for simulation bench assembly.
//!
use nexosim::model::Model;
use nexosim::ports::{Output, Requestor};
/// A replier adaptor.
///
/// `ReplierAdaptor` generic model is aimed to connect pair of input/output
/// ports to a replier ports.
///
/// Model input is propagated to all the connected replier ports and their
/// answers are written to the model output.
pub struct ReplierAdaptor<T: Clone + Send + 'static, R: Clone + Send + 'static> {
/// Requestor port to be connected to replier port.
pub requestor: Requestor<T, R>,
/// Output port to be connected to input port.
pub output: Output<R>,
}
impl<T: Clone + Send + 'static, R: Clone + Send + 'static> ReplierAdaptor<T, R> {
/// Creates a `ReplierAdaptor` model.
pub fn new() -> Self {
Self::default()
}
/// Input port.
pub async fn input(&mut self, data: T) {
for res in self.requestor.send(data).await {
self.output.send(res).await;
}
}
}
impl<T: Clone + Send + 'static, R: Clone + Send + 'static> Model for ReplierAdaptor<T, R> {}
impl<T: Clone + Send + 'static, R: Clone + Send + 'static> Default for ReplierAdaptor<T, R> {
fn default() -> Self {
Self {
requestor: Requestor::new(),
output: Output::new(),
}
}
}

View File

@ -0,0 +1,35 @@
//! Helper models.
//!
//! This module contains helper models useful for simulation bench assembly.
//!
use std::time::Duration;
use nexosim::model::{Context, InitializedModel, Model};
/// A ticker model.
///
/// This model self-schedules at the specified period, which can be used to keep
/// the simulation alive.
pub struct Ticker {
/// Tick period.
tick: Duration,
}
impl Ticker {
/// Creates a new `Ticker` with the specified self-scheduling period.
pub fn new(tick: Duration) -> Self {
Self { tick }
}
/// Self-scheduled function.
async fn tick(&mut self) {}
}
impl Model for Ticker {
async fn init(self, cx: &mut Context<Self>) -> InitializedModel<Self> {
cx.schedule_periodic_event(self.tick, self.tick, Self::tick, ())
.unwrap();
self.into()
}
}

3
nexosim-util/src/lib.rs Normal file
View File

@ -0,0 +1,3 @@
pub mod combinators;
pub mod helper_models;
pub mod observables;

View File

@ -0,0 +1,96 @@
//! Observable states.
//!
//! This module contains types used to implement states automatically propagated
//! to output on change.
//!
use std::ops::Deref;
use nexosim::ports::Output;
/// Observability trait.
pub trait Observable<T> {
/// Observe the value.
fn observe(&self) -> T;
}
impl<T> Observable<T> for T
where
T: Clone,
{
fn observe(&self) -> Self {
self.clone()
}
}
/// Observable state.
///
/// This object encapsulates state. Every state change access is propagated to
/// the output.
#[derive(Debug)]
pub struct ObservableState<S, T>
where
S: Observable<T> + Default,
T: Clone + Send + 'static,
{
/// State.
state: S,
/// Output used for observation.
out: Output<T>,
}
impl<S, T> ObservableState<S, T>
where
S: Observable<T> + Default,
T: Clone + Send + 'static,
{
/// New default state.
pub fn new(out: Output<T>) -> Self {
Self {
state: S::default(),
out,
}
}
/// Get state.
pub fn get(&self) -> &S {
&self.state
}
/// Set state.
pub async fn set(&mut self, value: S) {
self.state = value;
self.out.send(self.state.observe()).await;
}
/// Modify state using mutable reference.
pub async fn modify<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut S) -> R,
{
let r = f(&mut self.state);
self.out.send(self.state.observe()).await;
r
}
/// Propagate value.
pub async fn propagate(&mut self) {
self.out.send(self.state.observe()).await;
}
}
impl<S, T> Deref for ObservableState<S, T>
where
S: Observable<T> + Default,
T: Clone + Send + 'static,
{
type Target = S;
fn deref(&self) -> &S {
&self.state
}
}
/// Observable value.
pub type ObservableValue<T> = ObservableState<T, T>;

112
nexosim/Cargo.toml Normal file
View File

@ -0,0 +1,112 @@
[package]
name = "nexosim"
# When incrementing version and releasing to crates.io:
# - Update crate version in this Cargo.toml
# - Update crate version in README.md
# - Update crate version in the lib.rs documentation of feature flags
# - Update dependency in sibling crates
# - Remove path dependencies
# - Update CHANGELOG.md
# - Update if necessary copyright notice in LICENSE-MIT
# - Create a "vX.Y.Z" git tag
authors = ["Serge Barral <serge.barral@asynchronics.com>"]
version = "0.3.1"
edition = "2021"
rust-version = "1.77.0"
license = "MIT OR Apache-2.0"
repository = "https://github.com/asynchronics/nexosim"
readme = "../README.md"
description = """
A high performance asychronous compute framework for system simulation.
"""
categories = ["simulation", "aerospace", "science"]
keywords = [
"simulation",
"discrete-event",
"systems",
"cyberphysical",
"real-time",
]
[features]
server = [
"dep:bytes",
"dep:ciborium",
"dep:prost",
"dep:prost-types",
"dep:serde",
"dep:tonic",
"dep:tokio",
"dep:tokio-stream",
"dep:tonic",
"tai-time/serde",
]
tracing = ["dep:tracing", "dep:tracing-subscriber"]
# DEVELOPMENT ONLY: API-unstable public exports meant for external test/benchmarking.
dev-hooks = []
[dependencies]
# Mandatory dependencies.
async-event = "0.2.1"
crossbeam-utils = "0.8"
diatomic-waker = "0.2.3"
dyn-clone = "1.0"
futures-channel = "0.3"
futures-task = "0.3"
multishot = "0.3.2"
num_cpus = "1.13"
parking = "2"
pin-project = "1"
recycle-box = "0.2"
slab = "0.4"
spin_sleep = "1"
st3 = "0.4"
tai-time = "0.3.3"
# Optional dependencies.
bytes = { version = "1", default-features = false, optional = true }
ciborium = { version = "0.2.2", optional = true }
prost = { version = "0.13", optional = true }
prost-types = { version = "0.13", optional = true }
serde = { version = "1", optional = true }
tokio = { version = "1.0", features = [
"net",
"rt-multi-thread",
], optional = true }
tonic = { version = "0.12", default-features = false, features = [
"codegen",
"prost",
"server",
], optional = true }
tracing = { version = "0.1.40", default-features = false, features = [
"std",
], optional = true }
tracing-subscriber = { version = "0.3.18", optional = true }
[target.'cfg(unix)'.dependencies]
tokio-stream = { version = "0.1.10", features = ["net"], optional = true }
[dev-dependencies]
futures-util = "0.3"
futures-executor = "0.3"
tracing-subscriber = { version = "0.3.18", features = ["env-filter"] }
[target.'cfg(nexosim_loom)'.dev-dependencies]
loom = "0.7"
waker-fn = "1.1"
[target.'cfg(nexosim_server_codegen)'.build-dependencies]
tonic-build = { version = "0.12" }
[lints.rust]
# `nexosim_loom` flag: run loom-based tests.
# `nexosim_server_codegen` flag: regenerate gRPC code from .proto definitions.
unexpected_cfgs = { level = "warn", check-cfg = [
'cfg(nexosim_loom)',
'cfg(nexosim_server_codegen)',
] }
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]

9
nexosim/build.rs Normal file
View File

@ -0,0 +1,9 @@
fn main() -> Result<(), Box<dyn std::error::Error>> {
#[cfg(nexosim_server_codegen)]
tonic_build::configure()
.build_client(false)
.out_dir("src/server/codegen/")
.compile_protos(&["simulation.proto"], &["src/server/api/"])?;
Ok(())
}

View File

@ -0,0 +1,186 @@
//! Example: an assembly consisting of a current-controlled stepper motor and
//! its driver.
//!
//! This example demonstrates in particular:
//!
//! * model prototypes,
//! * submodels,
//! * self-scheduling methods,
//! * model initialization,
//! * simulation monitoring with buffered event sinks.
//!
//! ```text
//! ┌────────────────────────────────────────────┐
//! │ Assembly │
//! │ ┌──────────┐ │
//! PPS │ │ │ coil currents ┌─────────┐ │
//! Pulse rate ●──────────┼──►│ Driver ├───────────────►│ │ │
//! (±freq) │ │ │ (IA, IB) │ │ │ position
//! │ └──────────┘ │ Motor ├──┼──────────►
//! torque │ │ │ │ (0:199)
//! Load ●──────────┼──────────────────────────────►│ │ │
//! │ └─────────┘ │
//! └────────────────────────────────────────────┘
//! ```
use std::time::Duration;
use nexosim::model::{BuildContext, Model, ProtoModel};
use nexosim::ports::{EventBuffer, Output};
use nexosim::simulation::{Mailbox, SimInit, SimulationError};
use nexosim::time::MonotonicTime;
mod stepper_motor;
pub use stepper_motor::{Driver, Motor};
/// A prototype for `MotorAssembly`.
pub struct ProtoMotorAssembly {
pub position: Output<u16>,
init_pos: u16,
}
impl ProtoMotorAssembly {
/// The prototype has a public constructor.
pub fn new(init_pos: u16) -> Self {
Self {
position: Default::default(),
init_pos,
}
}
// Input methods are in the model itself.
}
/// The parent model which submodels are the driver and the motor.
pub struct MotorAssembly {
/// Private output for submodel connection.
pps: Output<f64>,
/// Private output for submodel connection.
load: Output<f64>,
}
impl MotorAssembly {
/// The model now has a module-private constructor.
fn new() -> Self {
Self {
pps: Default::default(),
load: Default::default(),
}
}
/// Pulse rate (sign = direction) [Hz] -- input port.
pub async fn pulse_rate(&mut self, pps: f64) {
self.pps.send(pps).await
}
/// Torque applied by the load [N·m] -- input port.
pub async fn load(&mut self, torque: f64) {
self.load.send(torque).await
}
}
impl Model for MotorAssembly {}
impl ProtoModel for ProtoMotorAssembly {
type Model = MotorAssembly;
fn build(self, cx: &mut BuildContext<Self>) -> MotorAssembly {
let mut assembly = MotorAssembly::new();
let mut motor = Motor::new(self.init_pos);
let mut driver = Driver::new(1.0);
// Mailboxes.
let motor_mbox = Mailbox::new();
let driver_mbox = Mailbox::new();
// Connections.
assembly.pps.connect(Driver::pulse_rate, &driver_mbox);
assembly.load.connect(Motor::load, &motor_mbox);
driver.current_out.connect(Motor::current_in, &motor_mbox);
// Move the prototype's output to the submodel. The `self.position`
// output can be cloned if necessary if several submodels need access to
// it.
motor.position = self.position;
// Add the submodels to the simulation.
cx.add_submodel(driver, driver_mbox, "driver");
cx.add_submodel(motor, motor_mbox, "motor");
assembly
}
}
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
// Models.
let init_pos = 123;
let mut assembly = ProtoMotorAssembly::new(init_pos);
// Mailboxes.
let assembly_mbox = Mailbox::new();
let assembly_addr = assembly_mbox.address();
// Model handles for simulation.
let mut position = EventBuffer::new();
assembly.position.connect_sink(&position);
// Start time (arbitrary since models do not depend on absolute time).
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let (mut simu, scheduler) = SimInit::new()
.add_model(assembly, assembly_mbox, "assembly")
.init(t0)?;
// ----------
// Simulation.
// ----------
// Check initial conditions.
let mut t = t0;
assert_eq!(simu.time(), t);
assert_eq!(position.next(), Some(init_pos));
assert!(position.next().is_none());
// Start the motor in 2s with a PPS of 10Hz.
scheduler
.schedule_event(
Duration::from_secs(2),
MotorAssembly::pulse_rate,
10.0,
&assembly_addr,
)
.unwrap();
// Advance simulation time to two next events.
simu.step()?;
t += Duration::new(2, 0);
assert_eq!(simu.time(), t);
simu.step()?;
t += Duration::new(0, 100_000_000);
assert_eq!(simu.time(), t);
// Whichever the starting position, after two phase increments from the
// driver the rotor should have synchronized with the driver, with a
// position given by this beautiful formula.
let mut pos = (((init_pos + 1) / 4) * 4 + 1) % Motor::STEPS_PER_REV;
assert_eq!(position.by_ref().last().unwrap(), pos);
// Advance simulation time by 0.9s, which with a 10Hz PPS should correspond to
// 9 position increments.
simu.step_until(Duration::new(0, 900_000_000))?;
t += Duration::new(0, 900_000_000);
assert_eq!(simu.time(), t);
for _ in 0..9 {
pos = (pos + 1) % Motor::STEPS_PER_REV;
assert_eq!(position.next(), Some(pos));
}
assert!(position.next().is_none());
Ok(())
}

View File

@ -12,9 +12,9 @@
//! ┌─────────────────────────────────────────────┐
//! │ (≥0) │
//! │ ┌────────────┐ │
//! └───│ │ │
//! └───│ │ │
//! added volume │ Water tank ├────┐ │
//! Water fill ●───────────────────│ │ │ │
//! Water fill ●───────────────────│ │ │ │
//! (>0) └────────────┘ │ │
//! │ │
//! water sense │ │
@ -22,22 +22,21 @@
//! │ (empty|not empty) │
//! │ │
//! │ ┌────────────┐ ┌────────────┐ │
//! brew time └───│ │ command │ │ │
//! Brew time dial ●───────────────────▶│ Controller ├─────────▶│ Water pump ├───┘
//! (>0) ┌───│ │ (on|off) │ │
//! brew time └───│ │ command │ │ │
//! Brew time dial ●───────────────────►│ Controller ├─────────►│ Water pump ├───┘
//! (>0) ┌───│ │ (on|off) │ │
//! │ └────────────┘ └────────────┘
//! trigger │
//! Brew command ●───────────────┘
//! (-)
//! ```
use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use asynchronix::model::{InitializedModel, Model, Output};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{EventKey, MonotonicTime, Scheduler};
use nexosim::model::{Context, InitializedModel, Model};
use nexosim::ports::{EventSlot, Output};
use nexosim::simulation::{ActionKey, Mailbox, SimInit, SimulationError};
use nexosim::time::MonotonicTime;
/// Water pump.
pub struct Pump {
@ -81,7 +80,7 @@ pub struct Controller {
water_sense: WaterSenseState,
/// Event key, which if present indicates that the machine is currently
/// brewing -- internal state.
stop_brew_key: Option<EventKey>,
stop_brew_key: Option<ActionKey>,
}
impl Controller {
@ -121,7 +120,7 @@ impl Controller {
}
/// Starts brewing or cancels the current brew -- input port.
pub async fn brew_cmd(&mut self, _: (), scheduler: &Scheduler<Self>) {
pub async fn brew_cmd(&mut self, _: (), cx: &mut Context<Self>) {
// If a brew was ongoing, sending the brew command is interpreted as a
// request to cancel it.
if let Some(key) = self.stop_brew_key.take() {
@ -140,8 +139,7 @@ impl Controller {
// Schedule the `stop_brew()` method and turn on the pump.
self.stop_brew_key = Some(
scheduler
.schedule_keyed_event(self.brew_time, Self::stop_brew, ())
cx.schedule_keyed_event(self.brew_time, Self::stop_brew, ())
.unwrap(),
);
self.pump_cmd.send(PumpCommand::On).await;
@ -189,7 +187,7 @@ impl Tank {
}
/// Water volume added [m³] -- input port.
pub async fn fill(&mut self, added_volume: f64, scheduler: &Scheduler<Self>) {
pub async fn fill(&mut self, added_volume: f64, cx: &mut Context<Self>) {
// Ignore zero and negative values. We could also impose a maximum based
// on tank capacity.
if added_volume <= 0.0 {
@ -207,11 +205,11 @@ impl Tank {
state.set_empty_key.cancel();
// Update the volume, saturating at 0 in case of rounding errors.
let time = scheduler.time();
let time = cx.time();
let elapsed_time = time.duration_since(state.last_volume_update).as_secs_f64();
self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0);
self.schedule_empty(state.flow_rate, time, scheduler).await;
self.schedule_empty(state.flow_rate, time, cx).await;
// There is no need to broadcast the state of the water sense since
// it could not be previously `Empty` (otherwise the dynamic state
@ -229,10 +227,10 @@ impl Tank {
/// # Panics
///
/// This method will panic if the flow rate is negative.
pub async fn set_flow_rate(&mut self, flow_rate: f64, scheduler: &Scheduler<Self>) {
pub async fn set_flow_rate(&mut self, flow_rate: f64, cx: &mut Context<Self>) {
assert!(flow_rate >= 0.0);
let time = scheduler.time();
let time = cx.time();
// If the flow rate was non-zero up to now, update the volume.
if let Some(state) = self.dynamic_state.take() {
@ -244,7 +242,7 @@ impl Tank {
self.volume = (self.volume - state.flow_rate * elapsed_time).max(0.0);
}
self.schedule_empty(flow_rate, time, scheduler).await;
self.schedule_empty(flow_rate, time, cx).await;
}
/// Schedules a callback for when the tank becomes empty.
@ -257,7 +255,7 @@ impl Tank {
&mut self,
flow_rate: f64,
time: MonotonicTime,
scheduler: &Scheduler<Self>,
cx: &mut Context<Self>,
) {
// Determine when the tank will be empty at the current flow rate.
let duration_until_empty = if self.volume == 0.0 {
@ -274,7 +272,7 @@ impl Tank {
let duration_until_empty = Duration::from_secs_f64(duration_until_empty);
// Schedule the next update.
match scheduler.schedule_keyed_event(duration_until_empty, Self::set_empty, ()) {
match cx.schedule_keyed_event(duration_until_empty, Self::set_empty, ()) {
Ok(set_empty_key) => {
let state = TankDynamicState {
last_volume_update: time,
@ -301,21 +299,16 @@ impl Tank {
impl Model for Tank {
/// Broadcasts the initial state of the water sense.
fn init(
mut self,
_scheduler: &Scheduler<Self>,
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
Box::pin(async move {
self.water_sense
.send(if self.volume == 0.0 {
WaterSenseState::Empty
} else {
WaterSenseState::NotEmpty
})
.await;
async fn init(mut self, _: &mut Context<Self>) -> InitializedModel<Self> {
self.water_sense
.send(if self.volume == 0.0 {
WaterSenseState::Empty
} else {
WaterSenseState::NotEmpty
})
.await;
self.into()
})
self.into()
}
}
@ -323,7 +316,7 @@ impl Model for Tank {
/// is non-zero.
struct TankDynamicState {
last_volume_update: MonotonicTime,
set_empty_key: EventKey,
set_empty_key: ActionKey,
flow_rate: f64,
}
@ -334,7 +327,7 @@ pub enum WaterSenseState {
NotEmpty,
}
fn main() {
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
@ -364,7 +357,8 @@ fn main() {
pump.flow_rate.connect(Tank::set_flow_rate, &tank_mbox);
// Model handles for simulation.
let mut flow_rate = pump.flow_rate.connect_slot().0;
let mut flow_rate = EventSlot::new();
pump.flow_rate.connect_sink(&flow_rate);
let controller_addr = controller_mbox.address();
let tank_addr = tank_mbox.address();
@ -372,11 +366,11 @@ fn main() {
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let mut simu = SimInit::new()
.add_model(controller, controller_mbox)
.add_model(pump, pump_mbox)
.add_model(tank, tank_mbox)
.init(t0);
let (mut simu, scheduler) = SimInit::new()
.add_model(controller, controller_mbox, "controller")
.add_model(pump, pump_mbox, "pump")
.add_model(tank, tank_mbox, "tank")
.init(t0)?;
// ----------
// Simulation.
@ -387,62 +381,65 @@ fn main() {
assert_eq!(simu.time(), t);
// Brew one espresso shot with the default brew time.
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
simu.process_event(Controller::brew_cmd, (), &controller_addr)?;
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
simu.step();
simu.step()?;
t += Controller::DEFAULT_BREW_TIME;
assert_eq!(simu.time(), t);
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
// Drink too much coffee.
let volume_per_shot = pump_flow_rate * Controller::DEFAULT_BREW_TIME.as_secs_f64();
let shots_per_tank = (init_tank_volume / volume_per_shot) as u64; // YOLO--who cares about floating-point rounding errors?
for _ in 0..(shots_per_tank - 1) {
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
simu.step();
simu.process_event(Controller::brew_cmd, (), &controller_addr)?;
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
simu.step()?;
t += Controller::DEFAULT_BREW_TIME;
assert_eq!(simu.time(), t);
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
}
// Check that the tank becomes empty before the completion of the next shot.
simu.send_event(Controller::brew_cmd, (), &controller_addr);
simu.step();
simu.process_event(Controller::brew_cmd, (), &controller_addr)?;
simu.step()?;
assert!(simu.time() < t + Controller::DEFAULT_BREW_TIME);
t = simu.time();
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
// Try to brew another shot while the tank is still empty.
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert!(flow_rate.take().is_none());
simu.process_event(Controller::brew_cmd, (), &controller_addr)?;
assert!(flow_rate.next().is_none());
// Change the brew time and fill up the tank.
let brew_time = Duration::new(30, 0);
simu.send_event(Controller::brew_time, brew_time, &controller_addr);
simu.send_event(Tank::fill, 1.0e-3, tank_addr);
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
simu.process_event(Controller::brew_time, brew_time, &controller_addr)?;
simu.process_event(Tank::fill, 1.0e-3, tank_addr)?;
simu.process_event(Controller::brew_cmd, (), &controller_addr)?;
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
simu.step();
simu.step()?;
t += brew_time;
assert_eq!(simu.time(), t);
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
// Interrupt the brew after 15s by pressing again the brew button.
simu.schedule_event(
Duration::from_secs(15),
Controller::brew_cmd,
(),
&controller_addr,
)
.unwrap();
simu.send_event(Controller::brew_cmd, (), &controller_addr);
assert_eq!(flow_rate.take(), Some(pump_flow_rate));
scheduler
.schedule_event(
Duration::from_secs(15),
Controller::brew_cmd,
(),
&controller_addr,
)
.unwrap();
simu.process_event(Controller::brew_cmd, (), &controller_addr)?;
assert_eq!(flow_rate.next(), Some(pump_flow_rate));
simu.step();
simu.step()?;
t += Duration::from_secs(15);
assert_eq!(simu.time(), t);
assert_eq!(flow_rate.take(), Some(0.0));
assert_eq!(flow_rate.next(), Some(0.0));
Ok(())
}

View File

@ -0,0 +1,257 @@
//! Example: a model that reads data external to the simulation.
//!
//! This example demonstrates in particular:
//!
//! * processing of external inputs (useful in co-simulation),
//! * system clock,
//! * periodic scheduling.
//!
//! ```text
//! ┏━━━━━━━━━━━━━━━━━━━━━━━━┓
//! ┃ Simulation ┃
//! ┌╌╌╌╌╌╌╌╌╌╌╌╌┐ ┌╌╌╌╌╌╌╌╌╌╌╌╌┐ ┃ ┌──────────┐ ┃
//! ┆ ┆ message ┆ ┆ message ┃ │ │ message ┃
//! ┆ UDP Client ├╌╌╌╌╌╌╌╌►┆ UDP Server ├╌╌╌╌╌╌╌╌╌╌╌╂╌╌►│ Listener ├─────────╂─►
//! ┆ ┆ [UDP] ┆ ┆ [channel] ┃ │ │ ┃
//! └╌╌╌╌╌╌╌╌╌╌╌╌┘ └╌╌╌╌╌╌╌╌╌╌╌╌┘ ┃ └──────────┘ ┃
//! ┗━━━━━━━━━━━━━━━━━━━━━━━━┛
//! ```
use std::io::ErrorKind;
use std::net::{Ipv4Addr, UdpSocket};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::{Arc, Condvar, Mutex};
use std::thread::{self, sleep, JoinHandle};
use std::time::Duration;
use nexosim::model::{BuildContext, Context, InitializedModel, Model, ProtoModel};
use nexosim::ports::{EventBuffer, Output};
use nexosim::simulation::{Mailbox, SimInit, SimulationError};
use nexosim::time::{AutoSystemClock, MonotonicTime};
const DELTA: Duration = Duration::from_millis(2);
const PERIOD: Duration = Duration::from_millis(20);
const N: usize = 10;
const SHUTDOWN_SIGNAL: &str = "<SHUTDOWN>";
const SENDER: (Ipv4Addr, u16) = (Ipv4Addr::new(127, 0, 0, 1), 8000);
const RECEIVER: (Ipv4Addr, u16) = (Ipv4Addr::new(127, 0, 0, 1), 9000);
/// Prototype for the `Listener` Model.
pub struct ProtoListener {
/// Received message.
pub message: Output<String>,
/// Notifier to start the UDP client.
start: Notifier,
}
impl ProtoListener {
fn new(start: Notifier) -> Self {
Self {
message: Output::default(),
start,
}
}
}
impl ProtoModel for ProtoListener {
type Model = Listener;
/// Start the UDP Server immediately upon model construction.
fn build(self, _: &mut BuildContext<Self>) -> Listener {
let (tx, rx) = channel();
let external_handle = thread::spawn(move || {
Listener::listen(tx, self.start);
});
Listener::new(self.message, rx, external_handle)
}
}
/// Model that asynchronously receives messages external to the simulation.
pub struct Listener {
/// Received message.
message: Output<String>,
/// Receiver of external messages.
rx: Receiver<String>,
/// Handle to UDP Server.
server_handle: Option<JoinHandle<()>>,
}
impl Listener {
/// Creates a Listener.
pub fn new(
message: Output<String>,
rx: Receiver<String>,
server_handle: JoinHandle<()>,
) -> Self {
Self {
message,
rx,
server_handle: Some(server_handle),
}
}
/// Periodically scheduled function that processes external events.
async fn process(&mut self) {
while let Ok(message) = self.rx.try_recv() {
self.message.send(message).await;
}
}
/// Starts the UDP server.
fn listen(tx: Sender<String>, start: Notifier) {
let socket = UdpSocket::bind(RECEIVER).unwrap();
let mut buf = [0; 1 << 16];
// Wake up the client.
start.notify();
loop {
match socket.recv_from(&mut buf) {
Ok((packet_size, _)) => {
if let Ok(message) = std::str::from_utf8(&buf[..packet_size]) {
if message == SHUTDOWN_SIGNAL {
break;
}
// Inject external message into simulation.
if tx.send(message.into()).is_err() {
break;
}
};
}
Err(e) if e.kind() == ErrorKind::Interrupted => {
continue;
}
_ => {
break;
}
}
}
}
}
impl Model for Listener {
/// Initialize model.
async fn init(self, cx: &mut Context<Self>) -> InitializedModel<Self> {
// Schedule periodic function that processes external events.
cx.schedule_periodic_event(DELTA, PERIOD, Listener::process, ())
.unwrap();
self.into()
}
}
impl Drop for Listener {
/// Wait for UDP Server shutdown.
fn drop(&mut self) {
if let Some(handle) = self.server_handle.take() {
let _ = handle.join();
};
}
}
/// A synchronization barrier that can be unblocked by a notifier.
struct WaitBarrier(Arc<(Mutex<bool>, Condvar)>);
impl WaitBarrier {
fn new() -> Self {
Self(Arc::new((Mutex::new(false), Condvar::new())))
}
fn notifier(&self) -> Notifier {
Notifier(self.0.clone())
}
fn wait(self) {
let _unused = self
.0
.1
.wait_while(self.0 .0.lock().unwrap(), |pending| *pending)
.unwrap();
}
}
/// A notifier for the associated synchronization barrier.
struct Notifier(Arc<(Mutex<bool>, Condvar)>);
impl Notifier {
fn notify(self) {
*self.0 .0.lock().unwrap() = false;
self.0 .1.notify_one();
}
}
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
// Models.
// Synchronization barrier for the UDP client.
let start = WaitBarrier::new();
// Prototype of the listener model.
let mut listener = ProtoListener::new(start.notifier());
// Mailboxes.
let listener_mbox = Mailbox::new();
// Model handles for simulation.
let mut message = EventBuffer::with_capacity(N + 1);
listener.message.connect_sink(&message);
// Start time (arbitrary since models do not depend on absolute time).
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let mut simu = SimInit::new()
.add_model(listener, listener_mbox, "listener")
.set_clock(AutoSystemClock::new())
.init(t0)?
.0;
// ----------
// Simulation.
// ----------
// External client that sends UDP messages.
let sender_handle = thread::spawn(move || {
let socket = UdpSocket::bind(SENDER).unwrap();
// Wait until the UDP Server is ready.
start.wait();
for i in 0..N {
socket.send_to(i.to_string().as_bytes(), RECEIVER).unwrap();
if i % 3 == 0 {
sleep(PERIOD * i as u32)
}
}
socket
});
// Advance simulation, external messages will be collected.
simu.step_until(Duration::from_secs(2))?;
// Shut down the server.
let socket = sender_handle.join().unwrap();
socket
.send_to(SHUTDOWN_SIGNAL.as_bytes(), RECEIVER)
.unwrap();
// Check collected external messages.
let mut packets = 0_u32;
for _ in 0..N {
// Check all messages accounting for possible UDP packet re-ordering,
// but assuming no packet loss.
packets |= 1 << message.next().unwrap().parse::<u8>().unwrap();
}
assert_eq!(packets, u32::MAX >> 22);
assert_eq!(message.next(), None);
Ok(())
}

View File

@ -0,0 +1,131 @@
//! Example: a simulation that runs infinitely, receiving data from
//! outside. This setup is typical for hardware-in-the-loop use case.
//!
//! This example demonstrates in particular:
//!
//! * infinite simulation (useful in hardware-in-the-loop),
//! * simulation halting,
//! * processing of external data (useful in co-simulation),
//! * system clock,
//! * periodic scheduling.
//!
//! ```text
//! ┏━━━━━━━━━━━━━━━━━━━━━━━━┓
//! ┃ Simulation ┃
//!┌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┐ ┃ ┌──────────┐ ┃
//!┆ ┆ message ┃ │ │ message ┃
//!┆ External thread ├╌╌╌╌╌╌╌╌╌╌╌╂╌╌►│ Listener ├─────────╂─►
//!┆ ┆ [channel] ┃ │ │ ┃
//!└╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌╌┘ ┃ └──────────┘ ┃
//! ┗━━━━━━━━━━━━━━━━━━━━━━━━┛
//! ```
use std::sync::mpsc::{channel, Receiver};
use std::thread::{self, sleep};
use std::time::Duration;
use nexosim::model::{Context, InitializedModel, Model};
use nexosim::ports::{EventBuffer, Output};
use nexosim::simulation::{ExecutionError, Mailbox, SimInit, SimulationError};
use nexosim::time::{AutoSystemClock, MonotonicTime};
const DELTA: Duration = Duration::from_millis(2);
const PERIOD: Duration = Duration::from_millis(20);
const N: usize = 10;
/// The `Listener` Model.
pub struct Listener {
/// Received message.
pub message: Output<String>,
/// Source of external messages.
external: Receiver<String>,
}
impl Listener {
/// Creates new `Listener` model.
fn new(external: Receiver<String>) -> Self {
Self {
message: Output::default(),
external,
}
}
/// Periodically scheduled function that processes external events.
async fn process(&mut self) {
while let Ok(message) = self.external.try_recv() {
self.message.send(message).await;
}
}
}
impl Model for Listener {
/// Initialize model.
async fn init(self, cx: &mut Context<Self>) -> InitializedModel<Self> {
// Schedule periodic function that processes external events.
cx.schedule_periodic_event(DELTA, PERIOD, Listener::process, ())
.unwrap();
self.into()
}
}
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
// Channel for communication with simulation from outside.
let (tx, rx) = channel();
// Models.
// The listener model.
let mut listener = Listener::new(rx);
// Mailboxes.
let listener_mbox = Mailbox::new();
// Model handles for simulation.
let mut message = EventBuffer::with_capacity(N + 1);
listener.message.connect_sink(&message);
// Start time (arbitrary since models do not depend on absolute time).
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let (mut simu, mut scheduler) = SimInit::new()
.add_model(listener, listener_mbox, "listener")
.set_clock(AutoSystemClock::new())
.init(t0)?;
// Simulation thread.
let simulation_handle = thread::spawn(move || {
// ----------
// Simulation.
// ----------
simu.step_unbounded()
});
// Send data to simulation from outside.
for i in 0..N {
tx.send(i.to_string()).unwrap();
if i % 3 == 0 {
sleep(PERIOD * i as u32)
}
}
// Check collected external messages.
for i in 0..N {
assert_eq!(message.next().unwrap(), i.to_string());
}
assert_eq!(message.next(), None);
// Stop the simulation.
scheduler.halt();
match simulation_handle.join().unwrap() {
Err(ExecutionError::Halted) => Ok(()),
Err(e) => Err(e.into()),
_ => Ok(()),
}
}

View File

@ -8,27 +8,28 @@
//! ```text
//! ┌────────┐
//! │ │
//! ┌───▶│ Load ├───▶ Power
//! ┌──◄►│ Load ├───► Power
//! │ │ │
//! │ └────────┘
//! │
//! │ ┌────────┐
//! │ │ │
//! ├───▶│ Load ├───▶ Power
//! ├──◄►│ Load ├───► Power
//! │ │ │
//! │ └────────┘
//! │
//! │ ┌────────┐
//! ┌──────────┐ voltage │ │ │
//! Voltage setting ●────▶│ │◀────────────┴───▶│ Load ├───▶ Power
//! │ Power │ current │ │
//! ┌──────────┐ voltage │ │ │
//! Voltage setting ●────►│ │►◄───────────┴──◄►│ Load ├───► Power
//! │ Power │ current │ │
//! │ supply │ └────────┘
//! │ ├─────────────────────────────── Total power
//! │ ├─────────────────────────────── Total power
//! └──────────┘
//! ```
use asynchronix::model::{Model, Output, Requestor};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::MonotonicTime;
use nexosim::model::Model;
use nexosim::ports::{EventSlot, Output, Requestor};
use nexosim::simulation::{Mailbox, SimInit, SimulationError};
use nexosim::time::MonotonicTime;
/// Power supply.
pub struct PowerSupply {
@ -98,7 +99,7 @@ impl Load {
impl Model for Load {}
fn main() {
fn main() -> Result<(), SimulationError> {
// ---------------
// Bench assembly.
// ---------------
@ -124,10 +125,14 @@ fn main() {
psu.pwr_out.connect(Load::pwr_in, &load3_mbox);
// Model handles for simulation.
let mut psu_power = psu.power.connect_slot().0;
let mut load1_power = load1.power.connect_slot().0;
let mut load2_power = load2.power.connect_slot().0;
let mut load3_power = load3.power.connect_slot().0;
let mut psu_power = EventSlot::new();
let mut load1_power = EventSlot::new();
let mut load2_power = EventSlot::new();
let mut load3_power = EventSlot::new();
psu.power.connect_sink(&psu_power);
load1.power.connect_sink(&load1_power);
load2.power.connect_sink(&load2_power);
load3.power.connect_sink(&load3_power);
let psu_addr = psu_mbox.address();
// Start time (arbitrary since models do not depend on absolute time).
@ -135,11 +140,12 @@ fn main() {
// Assembly and initialization.
let mut simu = SimInit::new()
.add_model(psu, psu_mbox)
.add_model(load1, load1_mbox)
.add_model(load2, load2_mbox)
.add_model(load3, load3_mbox)
.init(t0);
.add_model(psu, psu_mbox, "psu")
.add_model(load1, load1_mbox, "load1")
.add_model(load2, load2_mbox, "load2")
.add_model(load3, load3_mbox, "load3")
.init(t0)?
.0;
// ----------
// Simulation.
@ -153,15 +159,17 @@ fn main() {
// Vary the supply voltage, check the load and power supply consumptions.
for voltage in [10.0, 15.0, 20.0] {
simu.send_event(PowerSupply::voltage_setting, voltage, &psu_addr);
simu.process_event(PowerSupply::voltage_setting, voltage, &psu_addr)?;
let v_square = voltage * voltage;
assert!(same_power(load1_power.take().unwrap(), v_square / r1));
assert!(same_power(load2_power.take().unwrap(), v_square / r2));
assert!(same_power(load3_power.take().unwrap(), v_square / r3));
assert!(same_power(load1_power.next().unwrap(), v_square / r1));
assert!(same_power(load2_power.next().unwrap(), v_square / r2));
assert!(same_power(load3_power.next().unwrap(), v_square / r3));
assert!(same_power(
psu_power.take().unwrap(),
psu_power.next().unwrap(),
v_square * (1.0 / r1 + 1.0 / r2 + 1.0 / r3)
));
}
Ok(())
}

View File

@ -4,23 +4,26 @@
//!
//! * self-scheduling methods,
//! * model initialization,
//! * simulation monitoring with event streams.
//! * simulation monitoring with buffered event sinks.
//!
//! ```text
//! ┌──────────┐ ┌──────────┐
//! PPS │ │ coil currents │ │ position
//! Pulse rate ●─────────▶│ Driver ├───────────────▶│ Motor ├──────────▶
//! (±freq) │ │ (IA, IB) │ │ (0:199)
//! └──────────┘ └──────────┘
//! ┌──────────┐
//! PPS │ │ coil currents ┌─────────┐
//! Pulse rate ●─────────►│ Driver ├───────────────►│ │
//! (±freq) │ │ (IA, IB) │ │ position
//! └──────────┘ │ Motor ├──────────►
//! torque │ │ (0:199)
//! Load ●─────────────────────────────────────►│ │
//! └─────────┘
//! ```
use std::future::Future;
use std::pin::Pin;
use std::time::Duration;
use asynchronix::model::{InitializedModel, Model, Output};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{MonotonicTime, Scheduler};
use nexosim::model::{Context, InitializedModel, Model};
use nexosim::ports::{EventBuffer, Output};
use nexosim::simulation::{Mailbox, SimInit};
use nexosim::time::MonotonicTime;
/// Stepper motor.
pub struct Motor {
@ -40,7 +43,7 @@ impl Motor {
pub const TORQUE_CONSTANT: f64 = 1.0;
/// Creates a motor with the specified initial position.
fn new(position: u16) -> Self {
pub fn new(position: u16) -> Self {
Self {
position: Default::default(),
pos: position % Self::STEPS_PER_REV,
@ -87,15 +90,9 @@ impl Motor {
impl Model for Motor {
/// Broadcasts the initial position of the motor.
fn init(
mut self,
_scheduler: &Scheduler<Self>,
) -> Pin<Box<dyn Future<Output = InitializedModel<Self>> + Send + '_>> {
Box::pin(async move {
self.position.send(self.pos).await;
self.into()
})
async fn init(mut self, _: &mut Context<Self>) -> InitializedModel<Self> {
self.position.send(self.pos).await;
self.into()
}
}
@ -128,8 +125,8 @@ impl Driver {
}
}
/// Sets the pulse rate (sign = direction) [Hz] -- input port.
pub async fn pulse_rate(&mut self, pps: f64, scheduler: &Scheduler<Self>) {
/// Pulse rate (sign = direction) [Hz] -- input port.
pub async fn pulse_rate(&mut self, pps: f64, cx: &mut Context<Self>) {
let pps = pps.signum() * pps.abs().clamp(Self::MIN_PPS, Self::MAX_PPS);
if pps == self.pps {
return;
@ -141,7 +138,7 @@ impl Driver {
// Trigger the rotation if the motor is currently idle. Otherwise the
// new value will be accounted for at the next pulse.
if is_idle {
self.send_pulse((), scheduler).await;
self.send_pulse((), cx).await;
}
}
@ -152,7 +149,7 @@ impl Driver {
fn send_pulse<'a>(
&'a mut self,
_: (),
scheduler: &'a Scheduler<Self>,
cx: &'a mut Context<Self>,
) -> impl Future<Output = ()> + Send + 'a {
async move {
let current_out = match self.next_phase {
@ -173,8 +170,7 @@ impl Driver {
let pulse_duration = Duration::from_secs_f64(1.0 / self.pps.abs());
// Schedule the next pulse.
scheduler
.schedule_event(pulse_duration, Self::send_pulse, ())
cx.schedule_event(pulse_duration, Self::send_pulse, ())
.unwrap();
}
}
@ -182,7 +178,8 @@ impl Driver {
impl Model for Driver {}
fn main() {
#[allow(dead_code)]
fn main() -> Result<(), nexosim::simulation::SimulationError> {
// ---------------
// Bench assembly.
// ---------------
@ -200,7 +197,8 @@ fn main() {
driver.current_out.connect(Motor::current_in, &motor_mbox);
// Model handles for simulation.
let mut position = motor.position.connect_stream().0;
let mut position = EventBuffer::new();
motor.position.connect_sink(&position);
let motor_addr = motor_mbox.address();
let driver_addr = driver_mbox.address();
@ -208,10 +206,10 @@ fn main() {
let t0 = MonotonicTime::EPOCH;
// Assembly and initialization.
let mut simu = SimInit::new()
.add_model(driver, driver_mbox)
.add_model(motor, motor_mbox)
.init(t0);
let (mut simu, scheduler) = SimInit::new()
.add_model(driver, driver_mbox, "driver")
.add_model(motor, motor_mbox, "motor")
.init(t0)?;
// ----------
// Simulation.
@ -224,19 +222,20 @@ fn main() {
assert!(position.next().is_none());
// Start the motor in 2s with a PPS of 10Hz.
simu.schedule_event(
Duration::from_secs(2),
Driver::pulse_rate,
10.0,
&driver_addr,
)
.unwrap();
scheduler
.schedule_event(
Duration::from_secs(2),
Driver::pulse_rate,
10.0,
&driver_addr,
)
.unwrap();
// Advance simulation time to two next events.
simu.step();
simu.step()?;
t += Duration::new(2, 0);
assert_eq!(simu.time(), t);
simu.step();
simu.step()?;
t += Duration::new(0, 100_000_000);
assert_eq!(simu.time(), t);
@ -248,7 +247,7 @@ fn main() {
// Advance simulation time by 0.9s, which with a 10Hz PPS should correspond to
// 9 position increments.
simu.step_by(Duration::new(0, 900_000_000));
simu.step_until(Duration::new(0, 900_000_000))?;
t += Duration::new(0, 900_000_000);
assert_eq!(simu.time(), t);
for _ in 0..9 {
@ -258,24 +257,24 @@ fn main() {
assert!(position.next().is_none());
// Increase the load beyond the torque limit for a 1A driver current.
simu.send_event(Motor::load, 2.0, &motor_addr);
simu.process_event(Motor::load, 2.0, &motor_addr)?;
// Advance simulation time and check that the motor is blocked.
simu.step();
simu.step()?;
t += Duration::new(0, 100_000_000);
assert_eq!(simu.time(), t);
assert!(position.next().is_none());
// Do it again.
simu.step();
simu.step()?;
t += Duration::new(0, 100_000_000);
assert_eq!(simu.time(), t);
assert!(position.next().is_none());
// Decrease the load below the torque limit for a 1A driver current and
// advance simulation time.
simu.send_event(Motor::load, 0.5, &motor_addr);
simu.step();
simu.process_event(Motor::load, 0.5, &motor_addr)?;
simu.step()?;
t += Duration::new(0, 100_000_000);
// The motor should start moving again, but since the phase was incremented
@ -287,7 +286,7 @@ fn main() {
// Advance simulation time by 0.7s, which with a 10Hz PPS should correspond to
// 7 position increments.
simu.step_by(Duration::new(0, 700_000_000));
simu.step_until(Duration::new(0, 700_000_000))?;
t += Duration::new(0, 700_000_000);
assert_eq!(simu.time(), t);
for _ in 0..7 {
@ -298,8 +297,8 @@ fn main() {
// Now make the motor rotate in the opposite direction. Note that this
// driver only accounts for a new PPS at the next pulse.
simu.send_event(Driver::pulse_rate, -10.0, &driver_addr);
simu.step();
simu.process_event(Driver::pulse_rate, -10.0, &driver_addr)?;
simu.step()?;
t += Duration::new(0, 100_000_000);
assert_eq!(simu.time(), t);
pos = (pos + 1) % Motor::STEPS_PER_REV;
@ -307,9 +306,11 @@ fn main() {
// Advance simulation time by 1.9s, which with a -10Hz PPS should correspond
// to 19 position decrements.
simu.step_by(Duration::new(1, 900_000_000));
simu.step_until(Duration::new(1, 900_000_000))?;
t += Duration::new(1, 900_000_000);
assert_eq!(simu.time(), t);
pos = (pos + Motor::STEPS_PER_REV - 19) % Motor::STEPS_PER_REV;
assert_eq!(position.by_ref().last(), Some(pos));
Ok(())
}

View File

@ -4,11 +4,11 @@
mod queue;
use std::cell::Cell;
use std::error;
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::num::NonZeroUsize;
use std::sync::atomic::{self, AtomicUsize, Ordering};
use std::sync::Arc;
@ -19,8 +19,15 @@ use recycle_box::RecycleBox;
use queue::{PopError, PushError, Queue};
use recycle_box::coerce_box;
use crate::model::Model;
use crate::time::Scheduler;
use crate::model::{Context, Model};
// Counts the difference between the number of sent and received messages for
// this thread.
//
// This is used by the executor to make sure that all messages have been
// received upon completion of a simulation step, i.e. that no deadlock
// occurred.
thread_local! { pub(crate) static THREAD_MSG_COUNT: Cell<isize> = const { Cell::new(0) }; }
/// Data shared between the receiver and the senders.
struct Inner<M> {
@ -46,7 +53,7 @@ impl<M: 'static> Inner<M> {
}
/// A receiver which can asynchronously execute `async` message that take an
/// argument of type `&mut M` and an optional `&Scheduler<M>` argument.
/// argument of type `&mut M` and an optional `&mut Context<M>` argument.
pub(crate) struct Receiver<M> {
/// Shared data.
inner: Arc<Inner<M>>,
@ -86,12 +93,19 @@ impl<M: Model> Receiver<M> {
}
}
/// Creates a new observer.
pub(crate) fn observer(&self) -> impl ChannelObserver {
Observer {
inner: self.inner.clone(),
}
}
/// Receives and executes a message asynchronously, if necessary waiting
/// until one becomes available.
pub(crate) async fn recv(
&mut self,
model: &mut M,
scheduler: &Scheduler<M>,
cx: &mut Context<M>,
) -> Result<(), RecvError> {
let msg = unsafe {
self.inner
@ -106,12 +120,16 @@ impl<M: Model> Receiver<M> {
match msg {
Some(mut msg) => {
// Consume the message to obtain a boxed future.
let fut = msg.call_once(model, scheduler, self.future_box.take().unwrap());
// Decrement the count of in-flight messages.
THREAD_MSG_COUNT.set(THREAD_MSG_COUNT.get().wrapping_sub(1));
// Now that `msg` was consumed and its slot in the queue was
// freed, signal to one awaiting sender that one slot is
// Take the message to obtain a boxed future.
let fut = msg.call_once(model, cx, self.future_box.take().unwrap());
// Now that the message was taken, drop `msg` to free its slot
// in the queue and signal to one awaiting sender that a slot is
// available for sending.
drop(msg);
self.inner.sender_signal.notify_one();
// Await the future provided by the message.
@ -154,7 +172,7 @@ impl<M: Model> Receiver<M> {
/// time, but an identifier may be reused after all handles to a channel
/// have been dropped.
pub(crate) fn channel_id(&self) -> ChannelId {
ChannelId(NonZeroUsize::new(&*self.inner as *const Inner<M> as usize).unwrap())
ChannelId(&*self.inner as *const Inner<M> as usize)
}
}
@ -189,7 +207,7 @@ impl<M: Model> Sender<M> {
where
F: for<'a> FnOnce(
&'a mut M,
&'a Scheduler<M>,
&'a mut Context<M>,
RecycleBox<()>,
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>
+ Send
@ -221,6 +239,9 @@ impl<M: Model> Sender<M> {
if success {
self.inner.receiver_signal.notify();
// Increment the count of in-flight messages.
THREAD_MSG_COUNT.set(THREAD_MSG_COUNT.get().wrapping_add(1));
Ok(())
} else {
Err(SendError)
@ -255,8 +276,8 @@ impl<M: Model> Sender<M> {
/// All channels are guaranteed to have different identifiers at any given
/// time, but an identifier may be reused after all handles to a channel
/// have been dropped.
pub(crate) fn channel_id(&self) -> ChannelId {
ChannelId(NonZeroUsize::new(&*self.inner as *const Inner<M> as usize).unwrap())
pub(crate) fn channel_id(&self) -> usize {
Arc::as_ptr(&self.inner) as usize
}
}
@ -277,6 +298,37 @@ impl<M> Clone for Sender<M> {
}
}
/// A model-independent handle to a channel that can observe the current number
/// of messages.
pub(crate) trait ChannelObserver: Send {
/// Returns the current number of messages in the channel.
///
/// # Warning
///
/// The returned result is only meaningful if it can be established than
/// there are no concurrent send or receive operations on the channel.
/// Otherwise, the returned value may neither reflect the current state nor
/// the past state of the channel, and may be greater than the capacity of
/// the channel.
fn len(&self) -> usize;
}
/// A handle to a channel that can observe the current number of messages.
///
/// Multiple [`Observer`]s can be created using the [`Receiver::observer`]
/// method or via cloning.
#[derive(Clone)]
pub(crate) struct Observer<M: 'static> {
/// Shared data.
inner: Arc<Inner<M>>,
}
impl<M: Model> ChannelObserver for Observer<M> {
fn len(&self) -> usize {
self.inner.queue.len()
}
}
impl<M: 'static> Drop for Sender<M> {
fn drop(&mut self) {
// Decrease the reference count of senders.
@ -312,7 +364,7 @@ impl<M> fmt::Debug for Sender<M> {
}
/// A closure that can be called once to create a future boxed in a `RecycleBox`
/// from an `&mut M`, a `&Scheduler<M>` and an empty `RecycleBox`.
/// from an `&mut M`, a `&mut Context<M>` and an empty `RecycleBox`.
///
/// This is basically a workaround to emulate an `FnOnce` with the equivalent of
/// an `FnMut` so that it is possible to call it as a `dyn` trait stored in a
@ -328,7 +380,7 @@ trait MessageFn<M: Model>: Send {
fn call_once<'a>(
&mut self,
model: &'a mut M,
scheduler: &'a Scheduler<M>,
cx: &'a mut Context<M>,
recycle_box: RecycleBox<()>,
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>;
}
@ -350,7 +402,7 @@ impl<F, M: Model> MessageFn<M> for MessageFnOnce<F, M>
where
F: for<'a> FnOnce(
&'a mut M,
&'a Scheduler<M>,
&'a mut Context<M>,
RecycleBox<()>,
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a>
+ Send,
@ -358,18 +410,18 @@ where
fn call_once<'a>(
&mut self,
model: &'a mut M,
scheduler: &'a Scheduler<M>,
cx: &'a mut Context<M>,
recycle_box: RecycleBox<()>,
) -> RecycleBox<dyn Future<Output = ()> + Send + 'a> {
let closure = self.msg_fn.take().unwrap();
(closure)(model, scheduler, recycle_box)
(closure)(model, cx, recycle_box)
}
}
/// Unique identifier for a channel.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub(crate) struct ChannelId(NonZeroUsize);
pub(crate) struct ChannelId(usize);
impl fmt::Display for ChannelId {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {

View File

@ -33,7 +33,7 @@ pub(super) struct MessageBorrow<'a, T: ?Sized> {
stamp: usize,
}
impl<'a, T: ?Sized> Deref for MessageBorrow<'a, T> {
impl<T: ?Sized> Deref for MessageBorrow<'_, T> {
type Target = T;
fn deref(&self) -> &Self::Target {
@ -41,13 +41,13 @@ impl<'a, T: ?Sized> Deref for MessageBorrow<'a, T> {
}
}
impl<'a, T: ?Sized> DerefMut for MessageBorrow<'a, T> {
impl<T: ?Sized> DerefMut for MessageBorrow<'_, T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.msg
}
}
impl<'a, T: ?Sized> Drop for MessageBorrow<'a, T> {
impl<T: ?Sized> Drop for MessageBorrow<'_, T> {
fn drop(&mut self) {
let slot = &self.queue.buffer[self.index];
@ -67,7 +67,7 @@ impl<'a, T: ?Sized> Drop for MessageBorrow<'a, T> {
slot.stamp.store(self.stamp, Ordering::Release);
}
}
impl<'a, M> fmt::Debug for MessageBorrow<'a, M> {
impl<M> fmt::Debug for MessageBorrow<'_, M> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("MessageBorrow").finish_non_exhaustive()
}
@ -85,7 +85,7 @@ struct Slot<T: ?Sized> {
message: UnsafeCell<MessageBox<T>>,
}
/// An fast MPSC queue that stores its items in recyclable boxes.
/// A fast MPSC queue that stores its items in recyclable boxes.
///
/// The item may be unsized.
///
@ -122,7 +122,7 @@ pub(super) struct Queue<T: ?Sized> {
/// and the producer. The reason it is shared is that the drop handler of
/// the last `Inner` owner (which may be a producer) needs access to the
/// dequeue position.
dequeue_pos: CachePadded<UnsafeCell<usize>>,
dequeue_pos: CachePadded<AtomicUsize>,
/// Buffer holding the closures and their stamps.
buffer: Box<[Slot<T>]>,
@ -160,7 +160,7 @@ impl<T: ?Sized> Queue<T> {
Queue {
enqueue_pos: CachePadded::new(AtomicUsize::new(0)),
dequeue_pos: CachePadded::new(UnsafeCell::new(0)),
dequeue_pos: CachePadded::new(AtomicUsize::new(0)),
buffer: buffer.into(),
right_mask,
closed_channel_mask,
@ -241,7 +241,7 @@ impl<T: ?Sized> Queue<T> {
///
/// This method may not be called concurrently from multiple threads.
pub(super) unsafe fn pop(&self) -> Result<MessageBorrow<'_, T>, PopError> {
let dequeue_pos = self.dequeue_pos.with(|p| *p);
let dequeue_pos = self.dequeue_pos.load(Ordering::Relaxed);
let index = dequeue_pos & self.right_mask;
let slot = &self.buffer[index];
let stamp = slot.stamp.load(Ordering::Acquire);
@ -251,10 +251,10 @@ impl<T: ?Sized> Queue<T> {
// closure can be popped.
debug_or_loom_assert_eq!(stamp, dequeue_pos + 1);
// Only this thread can access the dequeue position so there is no
// Only this thread can modify the dequeue position so there is no
// need to increment the position atomically with a `fetch_add`.
self.dequeue_pos
.with_mut(|p| *p = self.next_queue_pos(dequeue_pos));
.store(self.next_queue_pos(dequeue_pos), Ordering::Relaxed);
// Extract the closure from the slot and set the stamp to the value of
// the dequeue position increased by one sequence increment.
@ -318,6 +318,30 @@ impl<T: ?Sized> Queue<T> {
self.enqueue_pos.load(Ordering::Relaxed) & self.closed_channel_mask != 0
}
/// Returns the number of items in the queue.
///
/// # Warning
///
/// While this method is safe by Rust's standard, the returned result is
/// only meaningful if it can be established than there are no concurrent
/// `push` or `pop` operations. Otherwise, the returned value may neither
/// reflect the current state nor the past state of the queue, and may be
/// greater than the capacity of the queue.
pub(super) fn len(&self) -> usize {
let enqueue_pos = self.enqueue_pos.load(Ordering::Relaxed);
let dequeue_pos = self.dequeue_pos.load(Ordering::Relaxed);
let enqueue_idx = enqueue_pos & (self.right_mask >> 1);
let dequeue_idx = dequeue_pos & (self.right_mask >> 1);
// Establish whether the sequence numbers of the enqueue and dequeue
// positions differ. If yes, it means the enqueue position has wrapped
// around one more time so the difference between indices must be
// increased by the buffer capacity.
let carry_flag = (enqueue_pos & !self.right_mask) != (dequeue_pos & !self.right_mask);
(enqueue_idx + (carry_flag as usize) * self.buffer.len()) - dequeue_idx
}
/// Increment the queue position, incrementing the sequence count as well if
/// the index wraps to 0.
///
@ -389,7 +413,7 @@ impl<T: ?Sized> Producer<T> {
}
/// Checks if the queue is closed.
#[cfg(not(asynchronix_loom))]
#[cfg(not(nexosim_loom))]
fn is_closed(&self) -> bool {
self.inner.is_closed()
}
@ -423,6 +447,12 @@ impl<T: ?Sized> Consumer<T> {
fn close(&self) {
self.inner.close();
}
/// Returns the number of items.
#[cfg(not(nexosim_loom))]
fn len(&self) -> usize {
self.inner.len()
}
}
#[cfg(test)]
@ -440,7 +470,7 @@ fn queue<T: ?Sized>(capacity: usize) -> (Producer<T>, Consumer<T>) {
}
/// Regular tests.
#[cfg(all(test, not(asynchronix_loom)))]
#[cfg(all(test, not(nexosim_loom)))]
mod tests {
use super::*;
@ -452,7 +482,7 @@ mod tests {
assert!(matches!(c.pop(), Err(PopError::Empty)));
assert!(matches!(p.push(|b| RecycleBox::recycle(b, 42)), Ok(_)));
assert!(p.push(|b| RecycleBox::recycle(b, 42)).is_ok());
p.close();
assert_eq!(*c.pop().unwrap(), 42);
@ -463,12 +493,12 @@ mod tests {
fn queue_closed_by_consumer() {
let (p, mut c) = queue(3);
assert_eq!(p.is_closed(), false);
assert!(matches!(p.push(|b| RecycleBox::recycle(b, 42)), Ok(_)));
assert!(!p.is_closed());
assert!(p.push(|b| RecycleBox::recycle(b, 42)).is_ok());
c.close();
assert_eq!(p.is_closed(), true);
assert!(p.is_closed());
assert!(matches!(
p.push(|b| RecycleBox::recycle(b, 13)),
Err(PushError::Closed)
@ -569,10 +599,56 @@ mod tests {
fn queue_mpsc_capacity_three() {
queue_mpsc(3);
}
#[test]
fn queue_len() {
let (p, mut c) = queue(4);
let _ = p.push(|b| RecycleBox::recycle(b, 0));
assert_eq!(c.len(), 1);
let _ = p.push(|b| RecycleBox::recycle(b, 1));
assert_eq!(c.len(), 2);
let _ = c.pop();
assert_eq!(c.len(), 1);
let _ = p.push(|b| RecycleBox::recycle(b, 2));
assert_eq!(c.len(), 2);
let _ = p.push(|b| RecycleBox::recycle(b, 3));
assert_eq!(c.len(), 3);
let _ = c.pop();
assert_eq!(c.len(), 2);
let _ = p.push(|b| RecycleBox::recycle(b, 4));
assert_eq!(c.len(), 3);
let _ = c.pop();
assert_eq!(c.len(), 2);
let _ = p.push(|b| RecycleBox::recycle(b, 5));
assert_eq!(c.len(), 3);
let _ = p.push(|b| RecycleBox::recycle(b, 6));
assert_eq!(c.len(), 4);
let _ = c.pop();
assert_eq!(c.len(), 3);
let _ = p.push(|b| RecycleBox::recycle(b, 7));
assert_eq!(c.len(), 4);
let _ = c.pop();
assert_eq!(c.len(), 3);
let _ = p.push(|b| RecycleBox::recycle(b, 8));
assert_eq!(c.len(), 4);
let _ = c.pop();
assert_eq!(c.len(), 3);
let _ = p.push(|b| RecycleBox::recycle(b, 9));
assert_eq!(c.len(), 4);
let _ = c.pop();
assert_eq!(c.len(), 3);
let _ = c.pop();
assert_eq!(c.len(), 2);
let _ = c.pop();
assert_eq!(c.len(), 1);
let _ = c.pop();
assert_eq!(c.len(), 0);
}
}
/// Loom tests.
#[cfg(all(test, asynchronix_loom))]
#[cfg(all(test, nexosim_loom))]
mod tests {
use super::*;

View File

@ -3,6 +3,7 @@
//! Not for production use!
use std::future::Future;
use std::time::Duration;
use crate::executor;
@ -15,7 +16,18 @@ impl Executor {
///
/// The maximum number of threads is set with the `pool_size` parameter.
pub fn new(pool_size: usize) -> Self {
Self(executor::Executor::new(pool_size))
let dummy_cx = crate::executor::SimulationContext {
#[cfg(feature = "tracing")]
time_reader: crate::util::sync_cell::SyncCell::new(
crate::time::TearableAtomicTime::new(crate::time::MonotonicTime::EPOCH),
)
.reader(),
};
Self(executor::Executor::new_multi_threaded(
pool_size,
dummy_cx,
executor::Signal::new(),
))
}
/// Spawns a task which output will never be retrieved.
@ -33,6 +45,6 @@ impl Executor {
/// Let the executor run, blocking until all futures have completed or until
/// the executor deadlocks.
pub fn run(&mut self) {
self.0.run();
self.0.run(Duration::ZERO).unwrap();
}
}

257
nexosim/src/executor.rs Normal file
View File

@ -0,0 +1,257 @@
//! `async` executor trait.
mod mt_executor;
mod st_executor;
mod task;
use std::any::Any;
use std::future::Future;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::Duration;
use crossbeam_utils::CachePadded;
use crate::macros::scoped_thread_local::scoped_thread_local;
use crate::simulation::ModelId;
#[cfg(feature = "tracing")]
use crate::time::AtomicTimeReader;
use task::Promise;
/// Unique identifier for executor instances.
static NEXT_EXECUTOR_ID: AtomicUsize = AtomicUsize::new(0);
#[derive(Debug)]
pub(crate) enum ExecutorError {
/// Not all messages have been processed.
UnprocessedMessages(usize),
/// The simulation has timed out.
Timeout,
/// The simulation has panicked.
Panic(ModelId, Box<dyn Any + Send + 'static>),
}
/// Context common to all executor types.
#[derive(Clone)]
pub(crate) struct SimulationContext {
/// Read-only handle to the simulation time.
#[cfg(feature = "tracing")]
pub(crate) time_reader: AtomicTimeReader,
}
scoped_thread_local!(pub(crate) static SIMULATION_CONTEXT: SimulationContext);
/// A single-threaded or multi-threaded `async` executor.
#[derive(Debug)]
pub(crate) enum Executor {
StExecutor(st_executor::Executor),
MtExecutor(mt_executor::Executor),
}
impl Executor {
/// Creates an executor that runs futures on the current thread.
pub(crate) fn new_single_threaded(
simulation_context: SimulationContext,
abort_signal: Signal,
) -> Self {
Self::StExecutor(st_executor::Executor::new(simulation_context, abort_signal))
}
/// Creates an executor that runs futures on a thread pool.
///
/// The maximum number of threads is set with the `num_threads` parameter.
///
/// # Panics
///
/// This will panic if the specified number of threads is zero or more than
/// `usize::BITS`.
pub(crate) fn new_multi_threaded(
num_threads: usize,
simulation_context: SimulationContext,
abort_signal: Signal,
) -> Self {
Self::MtExecutor(mt_executor::Executor::new(
num_threads,
simulation_context,
abort_signal,
))
}
/// Spawns a task which output will never be retrieved.
///
/// Note that spawned tasks are not executed until [`run`](Executor::run) is
/// called.
#[allow(unused)]
pub(crate) fn spawn<T>(&self, future: T) -> Promise<T::Output>
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
match self {
Self::StExecutor(executor) => executor.spawn(future),
Self::MtExecutor(executor) => executor.spawn(future),
}
}
/// Spawns a task which output will never be retrieved.
///
/// Note that spawned tasks are not executed until [`run`](Executor::run) is
/// called.
pub(crate) fn spawn_and_forget<T>(&self, future: T)
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
match self {
Self::StExecutor(executor) => executor.spawn_and_forget(future),
Self::MtExecutor(executor) => executor.spawn_and_forget(future),
}
}
/// Execute spawned tasks, blocking until all futures have completed or
/// until the executor reaches a deadlock.
pub(crate) fn run(&mut self, timeout: Duration) -> Result<(), ExecutorError> {
match self {
Self::StExecutor(executor) => executor.run(timeout),
Self::MtExecutor(executor) => executor.run(timeout),
}
}
}
/// A single-use shared boolean signal.
#[derive(Clone, Debug)]
pub(crate) struct Signal(Arc<CachePadded<AtomicBool>>);
impl Signal {
/// Create a new, cleared signal.
pub(crate) fn new() -> Self {
Self(Arc::new(CachePadded::new(AtomicBool::new(false))))
}
/// Sets the signal.
pub(crate) fn set(&self) {
self.0.store(true, Ordering::Relaxed);
}
/// Returns `true``is the signal was set.
pub(crate) fn is_set(&self) -> bool {
self.0.load(Ordering::Relaxed)
}
}
#[cfg(all(test, not(nexosim_loom)))]
mod tests {
use std::sync::atomic::Ordering;
use std::sync::Arc;
use futures_channel::mpsc;
use futures_util::StreamExt;
use super::*;
fn dummy_simulation_context() -> SimulationContext {
SimulationContext {
#[cfg(feature = "tracing")]
time_reader: crate::util::sync_cell::SyncCell::new(
crate::time::TearableAtomicTime::new(crate::time::MonotonicTime::EPOCH),
)
.reader(),
}
}
/// An object that runs an arbitrary closure when dropped.
struct RunOnDrop<F: FnOnce()> {
drop_fn: Option<F>,
}
impl<F: FnOnce()> RunOnDrop<F> {
/// Creates a new `RunOnDrop`.
fn new(drop_fn: F) -> Self {
Self {
drop_fn: Some(drop_fn),
}
}
}
impl<F: FnOnce()> Drop for RunOnDrop<F> {
fn drop(&mut self) {
if let Some(f) = self.drop_fn.take() {
f()
}
}
}
fn executor_drop_cycle(mut executor: Executor) {
let (sender1, mut receiver1) = mpsc::channel(2);
let (sender2, mut receiver2) = mpsc::channel(2);
let (sender3, mut receiver3) = mpsc::channel(2);
let drop_count = Arc::new(AtomicUsize::new(0));
// Spawn 3 tasks that wake one another when dropped.
executor.spawn_and_forget({
let mut sender2 = sender2.clone();
let mut sender3 = sender3.clone();
let drop_count = drop_count.clone();
async move {
let _guard = RunOnDrop::new(move || {
let _ = sender2.try_send(());
let _ = sender3.try_send(());
drop_count.fetch_add(1, Ordering::Relaxed);
});
let _ = receiver1.next().await;
}
});
executor.spawn_and_forget({
let mut sender1 = sender1.clone();
let mut sender3 = sender3.clone();
let drop_count = drop_count.clone();
async move {
let _guard = RunOnDrop::new(move || {
let _ = sender1.try_send(());
let _ = sender3.try_send(());
drop_count.fetch_add(1, Ordering::Relaxed);
});
let _ = receiver2.next().await;
}
});
executor.spawn_and_forget({
let mut sender1 = sender1.clone();
let mut sender2 = sender2.clone();
let drop_count = drop_count.clone();
async move {
let _guard = RunOnDrop::new(move || {
let _ = sender1.try_send(());
let _ = sender2.try_send(());
drop_count.fetch_add(1, Ordering::Relaxed);
});
let _ = receiver3.next().await;
}
});
executor.run(Duration::ZERO).unwrap();
// Make sure that all tasks are eventually dropped even though each task
// wakes the others when dropped.
drop(executor);
assert_eq!(drop_count.load(Ordering::Relaxed), 3);
}
#[test]
fn executor_drop_cycle_st() {
executor_drop_cycle(Executor::new_single_threaded(
dummy_simulation_context(),
Signal::new(),
));
}
#[test]
fn executor_drop_cycle_mt() {
executor_drop_cycle(Executor::new_multi_threaded(
3,
dummy_simulation_context(),
Signal::new(),
));
}
}

View File

@ -41,37 +41,36 @@
//! notification flag, thus alleviating the need to reset the notification flag
//! before polling a future.
mod injector;
mod pool_manager;
use std::cell::Cell;
use std::fmt;
use std::future::Future;
use std::panic::{self, AssertUnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::atomic::{AtomicIsize, Ordering};
use std::sync::{Arc, Mutex};
use std::thread::{self, JoinHandle};
use std::time::{Duration, Instant};
use crossbeam_utils::sync::{Parker, Unparker};
// TODO: revert to `crossbeam_utils::sync::Parker` once timeout support lands in
// v1.0 (see https://github.com/crossbeam-rs/crossbeam/pull/1012).
use parking::{Parker, Unparker};
use slab::Slab;
mod injector;
mod pool_manager;
mod task;
mod worker;
#[cfg(all(test, not(asynchronix_loom)))]
mod tests;
use crate::channel;
use crate::executor::task::{self, CancelToken, Promise, Runnable};
use crate::executor::{
ExecutorError, Signal, SimulationContext, NEXT_EXECUTOR_ID, SIMULATION_CONTEXT,
};
use crate::macros::scoped_thread_local::scoped_thread_local;
use crate::simulation::CURRENT_MODEL_ID;
use crate::util::rng::Rng;
use self::pool_manager::PoolManager;
use self::task::{CancelToken, Promise, Runnable};
use self::worker::Worker;
use pool_manager::PoolManager;
const BUCKET_SIZE: usize = 128;
const QUEUE_SIZE: usize = BUCKET_SIZE * 2;
static NEXT_EXECUTOR_ID: AtomicUsize = AtomicUsize::new(0);
type Bucket = injector::Bucket<Runnable, BUCKET_SIZE>;
type Injector = injector::Injector<Runnable, BUCKET_SIZE>;
type LocalQueue = st3::fifo::Worker<Runnable>;
@ -88,8 +87,10 @@ pub(crate) struct Executor {
active_tasks: Arc<Mutex<Slab<CancelToken>>>,
/// Parker for the main executor thread.
parker: Parker,
/// Join handles of the worker threads.
/// Handles to the worker threads.
worker_handles: Vec<JoinHandle<()>>,
/// Handle to the forced termination signal.
abort_signal: Signal,
}
impl Executor {
@ -101,7 +102,11 @@ impl Executor {
///
/// This will panic if the specified number of threads is zero or is more
/// than `usize::BITS`.
pub(crate) fn new(num_threads: usize) -> Self {
pub(crate) fn new(
num_threads: usize,
simulation_context: SimulationContext,
abort_signal: Signal,
) -> Self {
let parker = Parker::new();
let unparker = parker.unparker().clone();
@ -147,11 +152,17 @@ impl Executor {
.spawn({
let context = context.clone();
let active_tasks = active_tasks.clone();
let simulation_context = simulation_context.clone();
let abort_signal = abort_signal.clone();
move || {
let worker = Worker::new(local_queue, context);
ACTIVE_TASKS.set(&active_tasks, || {
LOCAL_WORKER
.set(&worker, || run_local_worker(&worker, id, worker_parker))
SIMULATION_CONTEXT.set(&simulation_context, || {
ACTIVE_TASKS.set(&active_tasks, || {
LOCAL_WORKER.set(&worker, || {
run_local_worker(&worker, id, worker_parker, abort_signal)
})
})
});
}
})
@ -168,15 +179,15 @@ impl Executor {
active_tasks,
parker,
worker_handles,
abort_signal,
}
}
/// Spawns a task and returns a promise that can be polled to retrieve the
/// task's output.
///
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
/// is called.
#[allow(unused)]
/// Note that spawned tasks are not executed until [`run`](Executor::run) is
/// called.
pub(crate) fn spawn<T>(&self, future: T) -> Promise<T::Output>
where
T: Future + Send + 'static,
@ -204,8 +215,8 @@ impl Executor {
/// This is mostly useful to avoid undue reference counting for futures that
/// return a `()` type.
///
/// Note that spawned tasks are not executed until [`run()`](Executor::run)
/// is called.
/// Note that spawned tasks are not executed until [`run`](Executor::run) is
/// called.
pub(crate) fn spawn_and_forget<T>(&self, future: T)
where
T: Future + Send + 'static,
@ -226,20 +237,37 @@ impl Executor {
self.context.injector.insert_task(runnable);
}
/// Execute spawned tasks, blocking until all futures have completed or
/// until the executor reaches a deadlock.
pub(crate) fn run(&mut self) {
/// Execute spawned tasks, blocking until all futures have completed or an
/// error is encountered.
pub(crate) fn run(&mut self, timeout: Duration) -> Result<(), ExecutorError> {
self.context.pool_manager.activate_worker();
loop {
if let Some(worker_panic) = self.context.pool_manager.take_panic() {
panic::resume_unwind(worker_panic);
}
if self.context.pool_manager.pool_is_idle() {
return;
if let Some((model_id, payload)) = self.context.pool_manager.take_panic() {
return Err(ExecutorError::Panic(model_id, payload));
}
self.parker.park();
if self.context.pool_manager.pool_is_idle() {
let msg_count = self.context.msg_count.load(Ordering::Relaxed);
if msg_count != 0 {
let msg_count: usize = msg_count.try_into().unwrap();
return Err(ExecutorError::UnprocessedMessages(msg_count));
}
return Ok(());
}
if timeout.is_zero() {
self.parker.park();
} else if !self.parker.park_timeout(timeout) {
// A timeout occurred: request all worker threads to return
// as soon as possible.
self.abort_signal.set();
self.context.pool_manager.activate_all_workers();
return Err(ExecutorError::Timeout);
}
}
}
}
@ -247,7 +275,8 @@ impl Executor {
impl Drop for Executor {
fn drop(&mut self) {
// Force all threads to return.
self.context.pool_manager.trigger_termination();
self.abort_signal.set();
self.context.pool_manager.activate_all_workers();
for handle in self.worker_handles.drain(0..) {
handle.join().unwrap();
}
@ -295,12 +324,18 @@ impl fmt::Debug for Executor {
struct ExecutorContext {
/// Injector queue.
injector: Injector,
/// Unique executor ID inherited by all tasks spawned on this executor instance.
/// Unique executor identifier inherited by all tasks spawned on this
/// executor instance.
executor_id: usize,
/// Unparker for the main executor thread.
executor_unparker: Unparker,
/// Manager for all worker threads.
pool_manager: PoolManager,
/// Difference between the number of sent and received messages.
///
/// This counter is only updated by worker threads before they park and is
/// therefore only consistent once all workers are parked.
msg_count: AtomicIsize,
}
impl ExecutorContext {
@ -323,6 +358,7 @@ impl ExecutorContext {
stealers.into_boxed_slice(),
worker_unparkers,
),
msg_count: AtomicIsize::new(0),
}
}
}
@ -374,6 +410,24 @@ impl<T: Future> Drop for CancellableFuture<T> {
}
}
/// A local worker with access to global executor resources.
pub(crate) struct Worker {
local_queue: LocalQueue,
fast_slot: Cell<Option<Runnable>>,
executor_context: Arc<ExecutorContext>,
}
impl Worker {
/// Creates a new worker.
fn new(local_queue: LocalQueue, executor_context: Arc<ExecutorContext>) -> Self {
Self {
local_queue,
fast_slot: Cell::new(None),
executor_context,
}
}
}
/// Schedules a `Runnable` from within a worker thread.
///
/// # Panics
@ -434,13 +488,22 @@ fn schedule_task(task: Runnable, executor_id: usize) {
/// is received or until it panics.
///
/// Panics caught in this thread are relayed to the main executor thread.
fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
fn run_local_worker(worker: &Worker, id: usize, parker: Parker, abort_signal: Signal) {
let pool_manager = &worker.executor_context.pool_manager;
let injector = &worker.executor_context.injector;
let executor_unparker = &worker.executor_context.executor_unparker;
let local_queue = &worker.local_queue;
let fast_slot = &worker.fast_slot;
// Update the global message counter.
let update_msg_count = || {
let thread_msg_count = channel::THREAD_MSG_COUNT.replace(0);
worker
.executor_context
.msg_count
.fetch_add(thread_msg_count, Ordering::Relaxed);
};
let result = panic::catch_unwind(AssertUnwindSafe(|| {
// Set how long to spin when searching for a task.
const MAX_SEARCH_DURATION: Duration = Duration::from_nanos(1000);
@ -453,9 +516,10 @@ fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
// Try to deactivate the worker.
if pool_manager.try_set_worker_inactive(id) {
parker.park();
// No need to call `begin_worker_search()`: this was done by the
// thread that unparked the worker.
update_msg_count();
parker.park();
} else if injector.is_empty() {
// This worker could not be deactivated because it was the last
// active worker. In such case, the call to
@ -464,6 +528,7 @@ fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
// not activate a new worker, which is why some tasks may now be
// visible in the injector queue.
pool_manager.set_all_workers_inactive();
update_msg_count();
executor_unparker.unpark();
parker.park();
// No need to call `begin_worker_search()`: this was done by the
@ -472,7 +537,7 @@ fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
pool_manager.begin_worker_search();
}
if pool_manager.termination_is_triggered() {
if abort_signal.is_set() {
return;
}
@ -542,7 +607,7 @@ fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
// Pop tasks from the fast slot or the local queue.
while let Some(task) = fast_slot.take().or_else(|| local_queue.pop()) {
if pool_manager.termination_is_triggered() {
if abort_signal.is_set() {
return;
}
task.run();
@ -555,10 +620,12 @@ fn run_local_worker(worker: &Worker, id: usize, parker: Parker) {
}
}));
// Propagate the panic, if any.
if let Err(panic) = result {
pool_manager.register_panic(panic);
pool_manager.trigger_termination();
// Report the panic, if any.
if let Err(payload) = result {
let model_id = CURRENT_MODEL_ID.take();
pool_manager.register_panic(model_id, payload);
abort_signal.set();
pool_manager.activate_all_workers();
executor_unparker.unpark();
}
}

View File

@ -1,10 +1,11 @@
use std::any::Any;
use std::sync::atomic::{self, AtomicBool, AtomicUsize, Ordering};
use std::sync::atomic::{self, AtomicUsize, Ordering};
use std::sync::Mutex;
use crossbeam_utils::sync::Unparker;
use parking::Unparker;
use super::Stealer;
use crate::simulation::ModelId;
use crate::util::bit;
use crate::util::rng;
@ -22,13 +23,8 @@ pub(super) struct PoolManager {
active_workers: AtomicUsize,
/// Count of all workers currently searching for tasks.
searching_workers: AtomicUsize,
/// Flag requesting all workers to return immediately.
terminate_signal: AtomicBool,
/// Panic caught in a worker thread.
worker_panic: Mutex<Option<Box<dyn Any + Send + 'static>>>,
#[cfg(feature = "dev-logs")]
/// Thread wake-up statistics.
record: Record,
worker_panic: Mutex<Option<(ModelId, Box<dyn Any + Send + 'static>)>>,
}
impl PoolManager {
@ -59,10 +55,7 @@ impl PoolManager {
worker_unparkers,
active_workers: AtomicUsize::new(0),
searching_workers: AtomicUsize::new(0),
terminate_signal: AtomicBool::new(false),
worker_panic: Mutex::new(None),
#[cfg(feature = "dev-logs")]
record: Record::new(pool_size),
}
}
@ -85,8 +78,6 @@ impl PoolManager {
.active_workers
.fetch_or(1 << first_idle_worker, Ordering::Relaxed);
if active_workers & (1 << first_idle_worker) == 0 {
#[cfg(feature = "dev-logs")]
self.record.increment(first_idle_worker);
self.begin_worker_search();
self.worker_unparkers[first_idle_worker].unpark();
return;
@ -117,8 +108,6 @@ impl PoolManager {
.active_workers
.fetch_or(1 << first_idle_worker, Ordering::Relaxed);
if active_workers & (1 << first_idle_worker) == 0 {
#[cfg(feature = "dev-logs")]
self.record.increment(first_idle_worker);
self.begin_worker_search();
self.worker_unparkers[first_idle_worker].unpark();
return;
@ -220,36 +209,27 @@ impl PoolManager {
self.searching_workers.load(Ordering::Relaxed)
}
/// Triggers the termination signal and unparks all worker threads so they
/// can cleanly terminate.
pub(super) fn trigger_termination(&self) {
self.terminate_signal.store(true, Ordering::Relaxed);
/// Unparks all workers and mark them as active.
pub(super) fn activate_all_workers(&self) {
self.set_all_workers_active();
for unparker in &*self.worker_unparkers {
unparker.unpark();
}
}
/// Returns true if the termination signal was triggered.
pub(super) fn termination_is_triggered(&self) -> bool {
self.terminate_signal.load(Ordering::Relaxed)
}
/// Registers a panic associated with the provided worker ID.
/// Registers a worker panic.
///
/// If no panic is currently registered, the panic in argument is
/// registered. If a panic was already registered by a worker and was not
/// yet processed by the executor, then nothing is done.
pub(super) fn register_panic(&self, panic: Box<dyn Any + Send + 'static>) {
/// If a panic was already registered and was not yet processed by the
/// executor, then nothing is done.
pub(super) fn register_panic(&self, model_id: ModelId, payload: Box<dyn Any + Send + 'static>) {
let mut worker_panic = self.worker_panic.lock().unwrap();
if worker_panic.is_none() {
*worker_panic = Some(panic);
*worker_panic = Some((model_id, payload));
}
}
/// Takes a worker panic if any is registered.
pub(super) fn take_panic(&self) -> Option<Box<dyn Any + Send + 'static>> {
pub(super) fn take_panic(&self) -> Option<(ModelId, Box<dyn Any + Send + 'static>)> {
let mut worker_panic = self.worker_panic.lock().unwrap();
worker_panic.take()
}
@ -273,13 +253,6 @@ impl PoolManager {
}
}
#[cfg(feature = "dev-logs")]
impl Drop for PoolManager {
fn drop(&mut self) {
println!("Thread launch count: {:?}", self.record.get());
}
}
/// An iterator over active workers that yields their associated stealer,
/// starting from a randomly selected active worker.
pub(super) struct ShuffledStealers<'a> {
@ -346,27 +319,3 @@ impl<'a> Iterator for ShuffledStealers<'a> {
Some(&self.stealers[current_candidate])
}
}
#[cfg(feature = "dev-logs")]
#[derive(Debug)]
struct Record {
stats: Vec<AtomicUsize>,
}
#[cfg(feature = "dev-logs")]
impl Record {
fn new(worker_count: usize) -> Self {
let mut stats = Vec::new();
stats.resize_with(worker_count, Default::default);
Self { stats }
}
fn increment(&self, worker_id: usize) {
self.stats[worker_id].fetch_add(1, Ordering::Relaxed);
}
fn get(&self) -> Vec<usize> {
self.stats
.iter()
.map(|s| s.load(Ordering::Relaxed))
.collect()
}
}

View File

@ -0,0 +1,337 @@
use std::cell::RefCell;
use std::future::Future;
use std::panic::AssertUnwindSafe;
use std::sync::atomic::Ordering;
use std::time::Duration;
use std::{fmt, panic, thread};
// TODO: revert to `crossbeam_utils::sync::Parker` once timeout support lands in
// v1.0 (see https://github.com/crossbeam-rs/crossbeam/pull/1012).
use parking::Parker;
use slab::Slab;
use super::task::{self, CancelToken, Promise, Runnable};
use super::NEXT_EXECUTOR_ID;
use crate::channel;
use crate::executor::{ExecutorError, Signal, SimulationContext, SIMULATION_CONTEXT};
use crate::macros::scoped_thread_local::scoped_thread_local;
use crate::simulation::CURRENT_MODEL_ID;
const QUEUE_MIN_CAPACITY: usize = 32;
scoped_thread_local!(static EXECUTOR_CONTEXT: ExecutorContext);
scoped_thread_local!(static ACTIVE_TASKS: RefCell<Slab<CancelToken>>);
/// A single-threaded `async` executor.
pub(crate) struct Executor {
/// Executor state.
inner: Option<Box<ExecutorInner>>,
/// Handle to the forced termination signal.
abort_signal: Signal,
}
impl Executor {
/// Creates an executor that runs futures on the current thread.
pub(crate) fn new(simulation_context: SimulationContext, abort_signal: Signal) -> Self {
// Each executor instance has a unique ID inherited by tasks to ensure
// that tasks are scheduled on their parent executor.
let executor_id = NEXT_EXECUTOR_ID.fetch_add(1, Ordering::Relaxed);
assert!(
executor_id <= usize::MAX / 2,
"too many executors have been instantiated"
);
let context = ExecutorContext::new(executor_id);
let active_tasks = RefCell::new(Slab::new());
Self {
inner: Some(Box::new(ExecutorInner {
context,
active_tasks,
simulation_context,
abort_signal: abort_signal.clone(),
})),
abort_signal,
}
}
/// Spawns a task and returns a promise that can be polled to retrieve the
/// task's output.
///
/// Note that spawned tasks are not executed until [`run`](Executor::run)
/// is called.
pub(crate) fn spawn<T>(&self, future: T) -> Promise<T::Output>
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
let inner = self.inner.as_ref().unwrap();
// Book a slot to store the task cancellation token.
let mut active_tasks = inner.active_tasks.borrow_mut();
let task_entry = active_tasks.vacant_entry();
// Wrap the future so that it removes its cancel token from the
// executor's list when dropped.
let future = CancellableFuture::new(future, task_entry.key());
let (promise, runnable, cancel_token) =
task::spawn(future, schedule_task, inner.context.executor_id);
task_entry.insert(cancel_token);
let mut queue = inner.context.queue.borrow_mut();
queue.push(runnable);
promise
}
/// Spawns a task which output will never be retrieved.
///
/// This is mostly useful to avoid undue reference counting for futures that
/// return a `()` type.
///
/// Note that spawned tasks are not executed until [`run`](Executor::run)
/// is called.
pub(crate) fn spawn_and_forget<T>(&self, future: T)
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
let inner = self.inner.as_ref().unwrap();
// Book a slot to store the task cancellation token.
let mut active_tasks = inner.active_tasks.borrow_mut();
let task_entry = active_tasks.vacant_entry();
// Wrap the future so that it removes its cancel token from the
// executor's list when dropped.
let future = CancellableFuture::new(future, task_entry.key());
let (runnable, cancel_token) =
task::spawn_and_forget(future, schedule_task, inner.context.executor_id);
task_entry.insert(cancel_token);
let mut queue = inner.context.queue.borrow_mut();
queue.push(runnable);
}
/// Execute spawned tasks, blocking until all futures have completed or an
/// error is encountered.
pub(crate) fn run(&mut self, timeout: Duration) -> Result<(), ExecutorError> {
if timeout.is_zero() {
return self.inner.as_mut().unwrap().run();
}
// Temporarily move out the inner state so it can be moved to another
// thread.
let mut inner = self.inner.take().unwrap();
let parker = Parker::new();
let unparker = parker.unparker();
let th = thread::spawn(move || {
let res = inner.run();
unparker.unpark();
(inner, res)
});
if !parker.park_timeout(timeout) {
// Make a best-effort attempt at stopping the worker thread.
self.abort_signal.set();
return Err(ExecutorError::Timeout);
}
let (inner, res) = th.join().unwrap();
self.inner = Some(inner);
res
}
}
/// Inner state of the executor.
struct ExecutorInner {
/// Shared executor data.
context: ExecutorContext,
/// List of tasks that have not completed yet.
active_tasks: RefCell<Slab<CancelToken>>,
/// Read-only handle to the simulation time.
simulation_context: SimulationContext,
/// Signal requesting the worker thread to return as soon as possible.
abort_signal: Signal,
}
impl ExecutorInner {
fn run(&mut self) -> Result<(), ExecutorError> {
// In case this executor is nested in another one, reset the counter of in-flight messages.
let msg_count_stash = channel::THREAD_MSG_COUNT.replace(self.context.msg_count);
let result = SIMULATION_CONTEXT.set(&self.simulation_context, || {
ACTIVE_TASKS.set(&self.active_tasks, || {
EXECUTOR_CONTEXT.set(&self.context, || {
panic::catch_unwind(AssertUnwindSafe(|| loop {
let task = match self.context.queue.borrow_mut().pop() {
Some(task) => task,
None => break,
};
task.run();
if self.abort_signal.is_set() {
return;
}
}))
})
})
});
// Return the panic payload, if any.
if let Err(payload) = result {
let model_id = CURRENT_MODEL_ID.take();
return Err(ExecutorError::Panic(model_id, payload));
}
// Check for unprocessed messages.
self.context.msg_count = channel::THREAD_MSG_COUNT.replace(msg_count_stash);
if self.context.msg_count != 0 {
let msg_count: usize = self.context.msg_count.try_into().unwrap();
return Err(ExecutorError::UnprocessedMessages(msg_count));
}
Ok(())
}
}
impl Drop for ExecutorInner {
fn drop(&mut self) {
// Drop all tasks that have not completed.
//
// The executor context must be set because some tasks may schedule
// other tasks when dropped, which requires that the work queue be
// available.
EXECUTOR_CONTEXT.set(&self.context, || {
// Cancel all pending futures.
//
// `ACTIVE_TASKS` is explicitly unset to prevent
// `CancellableFuture::drop()` from trying to remove its own token
// from the list of active tasks as this would result in a nested
// call to `borrow_mut` and thus a panic. This is mainly to stay on
// the safe side: `ACTIVE_TASKS` should not be set anyway, unless
// for some reason the executor runs inside another executor.
ACTIVE_TASKS.unset(|| {
let mut tasks = self.active_tasks.borrow_mut();
for task in tasks.drain() {
task.cancel();
}
// Some of the dropped tasks may have scheduled other tasks that
// were not yet cancelled, preventing them from being dropped
// upon cancellation. This is OK: the scheduled tasks will be
// dropped when the work queue is dropped, and they cannot
// re-schedule one another since all tasks were cancelled.
});
});
}
}
impl fmt::Debug for Executor {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Executor").finish_non_exhaustive()
}
}
/// Shared executor context.
///
/// This contains all executor resources that can be shared between threads.
struct ExecutorContext {
/// Work queue.
queue: RefCell<Vec<Runnable>>,
/// Unique executor identifier inherited by all tasks spawned on this
/// executor instance.
executor_id: usize,
/// Number of in-flight messages.
msg_count: isize,
}
impl ExecutorContext {
/// Creates a new shared executor context.
fn new(executor_id: usize) -> Self {
Self {
queue: RefCell::new(Vec::with_capacity(QUEUE_MIN_CAPACITY)),
executor_id,
msg_count: 0,
}
}
}
/// A `Future` wrapper that removes its cancellation token from the list of
/// active tasks when dropped.
struct CancellableFuture<T: Future> {
inner: T,
cancellation_key: usize,
}
impl<T: Future> CancellableFuture<T> {
/// Creates a new `CancellableFuture`.
fn new(fut: T, cancellation_key: usize) -> Self {
Self {
inner: fut,
cancellation_key,
}
}
}
impl<T: Future> Future for CancellableFuture<T> {
type Output = T::Output;
#[inline(always)]
fn poll(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
unsafe { self.map_unchecked_mut(|s| &mut s.inner).poll(cx) }
}
}
impl<T: Future> Drop for CancellableFuture<T> {
fn drop(&mut self) {
// Remove the task from the list of active tasks while the executor is
// running (meaning that `ACTIVE_TASK` is set). Otherwise do nothing and
// let the executor's drop handler do the cleanup.
let _ = ACTIVE_TASKS.map(|active_tasks| {
// Don't use `borrow_mut()` because this function can be called from
// a destructor and should not panic. In the worse case, the cancel
// token will be left in the list of active tasks, which does
// prevents eager task deallocation but does not cause any issue
// otherwise.
if let Ok(mut active_tasks) = active_tasks.try_borrow_mut() {
let _cancel_token = active_tasks.try_remove(self.cancellation_key);
}
});
}
}
/// Schedules a `Runnable` from within a worker thread.
///
/// # Panics
///
/// This function will panic if called from outside the executor worker thread
/// or from another executor instance than the one the task for this `Runnable`
/// was spawned on.
fn schedule_task(task: Runnable, executor_id: usize) {
EXECUTOR_CONTEXT
.map(|context| {
// Check that this task was indeed spawned on this executor.
assert_eq!(
executor_id, context.executor_id,
"Tasks must be awaken on the same executor they are spawned on"
);
let mut queue = context.queue.borrow_mut();
queue.push(task);
})
.expect("Tasks may not be awaken outside executor threads");
}

View File

@ -125,13 +125,6 @@ where
S: Fn(Runnable, T) + Send + Sync + 'static,
T: Clone + Send + Sync + 'static,
{
const RAW_WAKER_VTABLE: RawWakerVTable = RawWakerVTable::new(
Self::clone_waker,
Self::wake_by_val,
Self::wake_by_ref,
Self::drop_waker,
);
/// Clones a waker.
unsafe fn clone_waker(ptr: *const ()) -> RawWaker {
let this = &*(ptr as *const Self);
@ -141,7 +134,7 @@ where
panic!("Attack of the clones: the waker was cloned too many times");
}
RawWaker::new(ptr, &Self::RAW_WAKER_VTABLE)
RawWaker::new(ptr, raw_waker_vtable::<F, S, T>())
}
/// Wakes the task by value.
@ -287,11 +280,42 @@ where
}
}
/// Returns a reference to the waker's virtual table.
///
/// Unfortunately, Rust will sometimes create multiple memory instances of the
/// virtual table for the same generic parameters, which defeats
/// `Waker::will_wake` as the latter tests the pointers to the virtual tables
/// for equality.
///
/// Preventing the function from being inlined appears to solve this problem,
/// but we may want to investigate more robust methods. For unrelated reasons,
/// Tokio has switched [1] to a single non-generic virtual table declared as
/// `static` which then delegates each call to another virtual call. This does
/// ensure that `Waker::will_wake` will always work, but the double indirection
/// is a bit unfortunate and its cost would need to be evaluated.
///
/// [1]: https://github.com/tokio-rs/tokio/pull/5213
#[inline(never)]
fn raw_waker_vtable<F, S, T>() -> &'static RawWakerVTable
where
F: Future + Send + 'static,
F::Output: Send + 'static,
S: Fn(Runnable, T) + Send + Sync + 'static,
T: Clone + Send + Sync + 'static,
{
&RawWakerVTable::new(
Task::<F, S, T>::clone_waker,
Task::<F, S, T>::wake_by_val,
Task::<F, S, T>::wake_by_ref,
Task::<F, S, T>::drop_waker,
)
}
/// Spawns a task.
///
/// An arbitrary tag can be attached to the task, a clone of which will be
/// passed to the scheduling function each time it is called.
///
/// The returned `Runnable` must be scheduled by the user.
pub(crate) fn spawn<F, S, T>(
future: F,

View File

@ -25,7 +25,7 @@ struct VTable {
/// but not currently scheduled (no `Runnable` exist) then the future is
/// dropped immediately. Otherwise, the future will be dropped at a later
/// time by the scheduled `Runnable` once it runs.
unsafe fn cancel<F: Future, S, T>(ptr: *const ())
unsafe fn cancel<F, S, T>(ptr: *const ())
where
F: Future + Send + 'static,
F::Output: Send + 'static,
@ -123,7 +123,7 @@ where
}
/// Drops the token without cancelling the task.
unsafe fn drop<F: Future, S, T>(ptr: *const ())
unsafe fn drop<F, S, T>(ptr: *const ())
where
F: Future + Send + 'static,
F::Output: Send + 'static,
@ -180,7 +180,7 @@ impl CancelToken {
/// allocator,
/// - the reference count has been incremented to account for this new task
/// reference.
pub(super) unsafe fn new_unchecked<F: Future, S, T>(task: *const Task<F, S, T>) -> Self
pub(super) unsafe fn new_unchecked<F, S, T>(task: *const Task<F, S, T>) -> Self
where
F: Future + Send + 'static,
F::Output: Send + 'static,

View File

@ -20,7 +20,7 @@ struct VTable<U: Send + 'static> {
}
/// Retrieves the output of the task if ready.
unsafe fn poll<F: Future, S, T>(ptr: *const ()) -> Stage<F::Output>
unsafe fn poll<F, S, T>(ptr: *const ()) -> Stage<F::Output>
where
F: Future + Send + 'static,
F::Output: Send + 'static,
@ -62,7 +62,7 @@ where
}
/// Drops the promise.
unsafe fn drop<F: Future, S, T>(ptr: *const ())
unsafe fn drop<F, S, T>(ptr: *const ())
where
F: Future + Send + 'static,
F::Output: Send + 'static,

View File

@ -11,7 +11,7 @@ use crate::loom_exports::debug_or_loom_assert;
use crate::loom_exports::sync::atomic::{self, AtomicU64, Ordering};
use super::util::RunOnDrop;
use super::Task;
use super::{raw_waker_vtable, Task};
use super::{CLOSED, POLLING, REF_MASK, WAKE_MASK};
/// Virtual table for a `Runnable`.
@ -22,7 +22,7 @@ struct VTable {
}
/// Polls the inner future.
unsafe fn run<F: Future, S, T>(ptr: *const ())
unsafe fn run<F, S, T>(ptr: *const ())
where
F: Future + Send + 'static,
F::Output: Send + 'static,
@ -77,7 +77,7 @@ where
}
// Poll the task.
let raw_waker = RawWaker::new(ptr, &Task::<F, S, T>::RAW_WAKER_VTABLE);
let raw_waker = RawWaker::new(ptr, raw_waker_vtable::<F, S, T>());
let waker = ManuallyDrop::new(Waker::from_raw(raw_waker));
let cx = &mut Context::from_waker(&waker);

View File

@ -0,0 +1,7 @@
use super::*;
#[cfg(not(nexosim_loom))]
mod general;
#[cfg(nexosim_loom)]
mod loom;

View File

@ -136,19 +136,41 @@ impl<F: Future> Drop for MonitoredFuture<F> {
}
}
// A future that checks whether the waker cloned from the first call to `poll`
// tests equal with `Waker::will_wake` on the second call to `poll`.
struct WillWakeFuture {
waker: Arc<Mutex<Option<std::task::Waker>>>,
}
impl Future for WillWakeFuture {
type Output = bool;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let waker = &mut self.waker.lock().unwrap();
match waker.as_ref() {
None => {
**waker = Some(cx.waker().clone());
Poll::Pending
}
Some(waker) => Poll::Ready(waker.will_wake(cx.waker())),
}
}
}
#[test]
fn task_schedule() {
test_prelude!();
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move { 42 });
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
assert_eq!(future_is_alive.get(), true);
assert_eq!(output_is_alive.get(), false);
assert!(future_is_alive.get());
assert!(!output_is_alive.get());
// The task should complete immediately when ran.
runnable.run();
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), true);
assert!(!future_is_alive.get());
assert!(output_is_alive.get());
assert_eq!(promise.poll().map(|v| *v), Stage::Ready(42));
}
@ -178,13 +200,13 @@ fn task_schedule_and_forget() {
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async {});
let (runnable, _cancel_token) = spawn_and_forget(future, schedule_runnable, ());
assert_eq!(future_is_alive.get(), true);
assert_eq!(output_is_alive.get(), false);
assert!(future_is_alive.get());
assert!(!output_is_alive.get());
// The task should complete immediately when ran.
runnable.run();
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), true);
assert!(!future_is_alive.get());
assert!(output_is_alive.get());
}
#[test]
@ -193,25 +215,23 @@ fn task_wake() {
let (sender, receiver) = oneshot::channel();
let (future, future_is_alive, output_is_alive) = MonitoredFuture::new(async move {
let result = receiver.await.unwrap();
result
});
let (future, future_is_alive, output_is_alive) =
MonitoredFuture::new(async move { receiver.await.unwrap() });
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
runnable.run();
// The future should have been polled but should not have completed.
assert_eq!(output_is_alive.get(), false);
assert!(!output_is_alive.get());
assert!(promise.poll().is_pending());
// Wake the task.
sender.send(42).unwrap();
// The task should have been scheduled by the channel sender.
assert_eq!(run_scheduled_runnable(), true);
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), true);
assert!(run_scheduled_runnable());
assert!(!future_is_alive.get());
assert!(output_is_alive.get());
assert_eq!(promise.poll().map(|v| *v), Stage::Ready(42));
}
@ -222,10 +242,7 @@ fn task_wake_mt() {
let (sender, receiver) = oneshot::channel();
let (promise, runnable, _cancel_token) = spawn(
async move {
let result = receiver.await.unwrap();
result
},
async move { receiver.await.unwrap() },
schedule_runnable,
(),
);
@ -262,15 +279,15 @@ fn task_wake_and_forget() {
runnable.run();
// The future should have been polled but should not have completed.
assert_eq!(output_is_alive.get(), false);
assert!(!output_is_alive.get());
// Wake the task.
sender.send(42).unwrap();
// The task should have been scheduled by the channel sender.
assert_eq!(run_scheduled_runnable(), true);
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), true);
assert!(run_scheduled_runnable());
assert!(!future_is_alive.get());
assert!(output_is_alive.get());
}
#[test]
@ -299,7 +316,7 @@ fn task_multiple_wake() {
sender.try_send(3).unwrap();
// The task should have been scheduled by the channel sender.
assert_eq!(run_scheduled_runnable(), true);
assert!(run_scheduled_runnable());
assert!(promise.poll().is_pending());
// The channel should be empty. Wake the task 2 more times.
@ -307,11 +324,11 @@ fn task_multiple_wake() {
sender.try_send(5).unwrap();
// The task should have been scheduled by the channel sender.
assert_eq!(run_scheduled_runnable(), true);
assert!(run_scheduled_runnable());
// The task should have completed.
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), true);
assert!(!future_is_alive.get());
assert!(output_is_alive.get());
assert_eq!(promise.poll().map(|v| *v), Stage::Ready(15));
}
@ -379,13 +396,13 @@ fn task_cancel_scheduled() {
// The future should not be dropped while the `Runnable` exists, even if the
// task is cancelled, but the task should be seen as cancelled.
assert_eq!(future_is_alive.get(), true);
assert!(future_is_alive.get());
assert!(promise.poll().is_cancelled());
// An attempt to run the task should now drop the future without polling it.
runnable.run();
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), false);
assert!(!future_is_alive.get());
assert!(!output_is_alive.get());
}
#[test]
@ -400,8 +417,8 @@ fn task_cancel_unscheduled() {
let (promise, runnable, cancel_token) = spawn(future, schedule_runnable, ());
runnable.run();
assert_eq!(future_is_alive.get(), true);
assert_eq!(output_is_alive.get(), false);
assert!(future_is_alive.get());
assert!(!output_is_alive.get());
// Cancel the task while no `Runnable` exists (the task is not scheduled as
// it needs to be woken by the channel sender first).
@ -411,8 +428,8 @@ fn task_cancel_unscheduled() {
// The future should be dropped immediately upon cancellation without
// completing.
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), false);
assert!(!future_is_alive.get());
assert!(!output_is_alive.get());
}
#[test]
@ -423,12 +440,12 @@ fn task_cancel_completed() {
let (promise, runnable, cancel_token) = spawn(future, schedule_runnable, ());
runnable.run();
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), true);
assert!(!future_is_alive.get());
assert!(output_is_alive.get());
// Cancel the already completed task.
cancel_token.cancel();
assert_eq!(output_is_alive.get(), true);
assert!(output_is_alive.get());
assert_eq!(promise.poll().map(|v| *v), Stage::Ready(42));
}
@ -457,8 +474,8 @@ fn task_drop_promise_scheduled() {
// The task should complete immediately when ran.
runnable.run();
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), true);
assert!(!future_is_alive.get());
assert!(output_is_alive.get());
}
#[test]
@ -482,9 +499,9 @@ fn task_drop_promise_unscheduled() {
assert!(sender.send(()).is_ok());
// The task should have been scheduled by the channel sender.
assert_eq!(run_scheduled_runnable(), true);
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), true);
assert!(run_scheduled_runnable());
assert!(!future_is_alive.get());
assert!(output_is_alive.get());
}
#[test]
@ -516,9 +533,9 @@ fn task_drop_runnable() {
assert!(sender.send(()).is_ok());
// Drop the task scheduled by the channel sender.
assert_eq!(drop_runnable(), true);
assert_eq!(future_is_alive.get(), false);
assert_eq!(output_is_alive.get(), false);
assert!(drop_runnable());
assert!(!future_is_alive.get());
assert!(!output_is_alive.get());
assert!(promise.poll().is_cancelled());
}
@ -623,3 +640,24 @@ fn task_drop_cycle() {
assert_eq!(DROP_COUNT.load(Ordering::Relaxed), 3);
}
#[test]
fn task_will_wake() {
test_prelude!();
let waker = Arc::new(Mutex::new(None));
let future = WillWakeFuture {
waker: waker.clone(),
};
let (promise, runnable, _cancel_token) = spawn(future, schedule_runnable, ());
runnable.run();
assert!(promise.poll().is_pending());
// Wake the future so it is scheduled another time.
waker.lock().unwrap().as_ref().unwrap().wake_by_ref();
assert!(run_scheduled_runnable());
assert_eq!(promise.poll(), Stage::Ready(true));
}

View File

@ -1,9 +1,9 @@
//! A high-performance, discrete-event computation framework for system
//! simulation.
//!
//! Asynchronix is a developer-friendly, yet highly optimized software simulator
//! able to scale to very large simulation with complex time-driven state
//! machines.
//! NeXosim (né Asynchronix) is a developer-friendly, yet highly optimized
//! software simulator able to scale to very large simulation with complex
//! time-driven state machines.
//!
//! It promotes a component-oriented architecture that is familiar to system
//! engineers and closely resembles [flow-based programming][FBP]: a model is
@ -12,10 +12,10 @@
//! defined during bench assembly. Unlike in conventional flow-based
//! programming, request-reply patterns are also possible.
//!
//! Asynchronix leverages asynchronous programming to perform
//! auto-parallelization in a manner that is fully transparent to model authors
//! and users, achieving high computational throughput on large simulation
//! benches by means of a custom multi-threaded executor.
//! NeXosim leverages asynchronous programming to perform auto-parallelization
//! in a manner that is fully transparent to model authors and users, achieving
//! high computational throughput on large simulation benches by means of a
//! custom multi-threaded executor.
//!
//!
//! [FBP]: https://en.wikipedia.org/wiki/Flow-based_programming
@ -36,32 +36,45 @@
//!
//! Models can contain four kinds of ports:
//!
//! * _output ports_, which are instances of the [`Output`](model::Output) type
//! * _output ports_, which are instances of the [`Output`](ports::Output) type
//! and can be used to broadcast a message,
//! * _requestor ports_, which are instances of the
//! [`Requestor`](model::Requestor) type and can be used to broadcast a
//! message and receive an iterator yielding the replies from all connected
//! replier ports,
//! [`Requestor`](ports::Requestor) or [`UniRequestor`](ports::UniRequestor)
//! types and can be used to broadcast a message and receive an iterator
//! yielding the replies from all connected replier ports,
//! * _input ports_, which are synchronous or asynchronous methods that
//! implement the [`InputFn`](model::InputFn) trait and take an `&mut self`
//! implement the [`InputFn`](ports::InputFn) trait and take an `&mut self`
//! argument, a message argument, and an optional
//! [`&Scheduler`](time::Scheduler) argument,
//! [`&mut Context`](model::Context) argument,
//! * _replier ports_, which are similar to input ports but implement the
//! [`ReplierFn`](model::ReplierFn) trait and return a reply.
//! [`ReplierFn`](ports::ReplierFn) trait and return a reply.
//!
//! Messages that are broadcast by an output port to an input port are referred
//! to as *events*, while messages exchanged between requestor and replier ports
//! are referred to as *requests* and *replies*.
//!
//! Models must implement the [`Model`](model::Model) trait. The main purpose of
//! this trait is to allow models to specify an `init()` method that is
//! guaranteed to run once and only once when the simulation is initialized,
//! _i.e._ after all models have been connected but before the simulation
//! starts. The `init()` method has a default implementation, so models that do
//! not require initialization can simply implement the trait with a one-liner
//! such as `impl Model for MyModel {}`.
//! this trait is to allow models to specify a
//! [`Model::init`](model::Model::init) method that is guaranteed to run once
//! and only once when the simulation is initialized, _i.e._ after all models
//! have been connected but before the simulation starts.
//!
//! #### A simple model
//! The [`Model::init`](model::Model::init) methods has a default
//! implementations, so models that do not require setup and initialization can
//! simply implement the trait with a one-liner such as `impl Model for MyModel
//! {}`.
//!
//! More complex models can be built with the [`ProtoModel`](model::ProtoModel)
//! trait. The [`ProtoModel::build`](model::ProtoModel::build) method makes it
//! possible to:
//!
//! * build the final [`Model`](model::Model) from a builder (the *model prototype*),
//! * perform possibly blocking actions when the model is added to the
//! simulation rather than when the simulation starts, such as establishing a
//! network connection or configuring hardware devices,
//! * connect submodels and add them to the simulation.
//!
//! ### A simple model
//!
//! Let us consider for illustration a simple model that forwards its input
//! after multiplying it by 2. This model has only one input and one output
@ -70,7 +83,7 @@
//! ```text
//! ┌────────────┐
//! │ │
//! Input ●───────▶│ Multiplier ├───────▶ Output
//! Input ●───────►│ Multiplier ├───────► Output
//! f64 │ │ f64
//! └────────────┘
//! ```
@ -78,7 +91,8 @@
//! `Multiplier` could be implemented as follows:
//!
//! ```
//! use asynchronix::model::{Model, Output};
//! use nexosim::model::Model;
//! use nexosim::ports::Output;
//!
//! #[derive(Default)]
//! pub struct Multiplier {
@ -92,28 +106,28 @@
//! impl Model for Multiplier {}
//! ```
//!
//! #### A model using the local scheduler
//! ### A model using the local context
//!
//! Models frequently need to schedule actions at a future time or simply get
//! access to the current simulation time. To do so, input and replier methods
//! can take an optional argument that gives them access to a local scheduler.
//! can take an optional argument that gives them access to a local context.
//!
//! To show how the local scheduler can be used in practice, let us implement
//! To show how the local context can be used in practice, let us implement
//! `Delay`, a model which simply forwards its input unmodified after a 1s
//! delay:
//!
//! ```
//! use std::time::Duration;
//! use asynchronix::model::{Model, Output};
//! use asynchronix::time::Scheduler;
//! use nexosim::model::{Context, Model};
//! use nexosim::ports::Output;
//!
//! #[derive(Default)]
//! pub struct Delay {
//! pub output: Output<f64>,
//! }
//! impl Delay {
//! pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
//! scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
//! pub fn input(&mut self, value: f64, cx: &mut Context<Self>) {
//! cx.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
//! }
//!
//! async fn send(&mut self, value: f64) {
@ -135,7 +149,7 @@
//! [`Address`](simulation::Mailbox)es pointing to that mailbox.
//!
//! Addresses are used among others to connect models: each output or requestor
//! ports has a `connect()` method that takes as argument a function pointer to
//! port has a `connect` method that takes as argument a function pointer to
//! the corresponding input or replier port method and the address of the
//! targeted model.
//!
@ -150,13 +164,13 @@
//! ```text
//! ┌────────────┐
//! │ │
//! ┌──│ Delay ├──┐
//! ┌──│ Delay ├──┐
//! ┌────────────┐ │ │ │ │ ┌────────────┐
//! │ │ │ └────────────┘ │ │ │
//! Input ●──▶│ Multiplier ├───┤ ├──▶│ Delay ├──▶ Output
//! Input ●──►│ Multiplier ├───┤ ├──►│ Delay ├──► Output
//! │ │ │ ┌────────────┐ │ │ │
//! └────────────┘ │ │ │ │ └────────────┘
//! └──│ Multiplier ├──┘
//! └──│ Multiplier ├──┘
//! │ │
//! └────────────┘
//! ```
@ -166,8 +180,8 @@
//! ```
//! # mod models {
//! # use std::time::Duration;
//! # use asynchronix::model::{Model, Output};
//! # use asynchronix::time::Scheduler;
//! # use nexosim::model::{Context, Model};
//! # use nexosim::ports::Output;
//! # #[derive(Default)]
//! # pub struct Multiplier {
//! # pub output: Output<f64>,
@ -183,8 +197,8 @@
//! # pub output: Output<f64>,
//! # }
//! # impl Delay {
//! # pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
//! # scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
//! # pub fn input(&mut self, value: f64, cx: &mut Context<Self>) {
//! # cx.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
//! # }
//! # async fn send(&mut self, value: f64) { // this method can be private
//! # self.output.send(value).await;
@ -193,8 +207,9 @@
//! # impl Model for Delay {}
//! # }
//! use std::time::Duration;
//! use asynchronix::simulation::{Mailbox, SimInit};
//! use asynchronix::time::MonotonicTime;
//! use nexosim::ports::EventSlot;
//! use nexosim::simulation::{Mailbox, SimInit};
//! use nexosim::time::MonotonicTime;
//!
//! use models::{Delay, Multiplier};
//!
@ -217,17 +232,20 @@
//! delay1.output.connect(Delay::input, &delay2_mbox);
//!
//! // Keep handles to the system input and output for the simulation.
//! let mut output_slot = delay2.output.connect_slot().0;
//! let mut output_slot = EventSlot::new();
//! delay2.output.connect_sink(&output_slot);
//! let input_address = multiplier1_mbox.address();
//!
//! // Pick an arbitrary simulation start time and build the simulation.
//! let t0 = MonotonicTime::EPOCH;
//! let mut simu = SimInit::new()
//! .add_model(multiplier1, multiplier1_mbox)
//! .add_model(multiplier2, multiplier2_mbox)
//! .add_model(delay1, delay1_mbox)
//! .add_model(delay2, delay2_mbox)
//! .init(t0);
//! let (mut simu, scheduler) = SimInit::new()
//! .add_model(multiplier1, multiplier1_mbox, "multiplier1")
//! .add_model(multiplier2, multiplier2_mbox, "multiplier2")
//! .add_model(delay1, delay1_mbox, "delay1")
//! .add_model(delay2, delay2_mbox, "delay2")
//! .init(t0)?;
//!
//! # Ok::<(), nexosim::simulation::SimulationError>(())
//! ```
//!
//! ## Running simulations
@ -235,30 +253,27 @@
//! The simulation can be controlled in several ways:
//!
//! 1. by advancing time, either until the next scheduled event with
//! [`Simulation::step()`](simulation::Simulation::step), or until a specific
//! deadline using for instance
//! [`Simulation::step_by()`](simulation::Simulation::step_by).
//! [`Simulation::step`](simulation::Simulation::step), until a specific
//! deadline with
//! [`Simulation::step_until`](simulation::Simulation::step_until), or
//! until there are no more scheduled events with
//! [`Simulation::step_unbounded`](simulation::Simulation::step_unbounded).
//! 2. by sending events or queries without advancing simulation time, using
//! [`Simulation::send_event()`](simulation::Simulation::send_event) or
//! [`Simulation::send_query()`](simulation::Simulation::send_query),
//! 3. by scheduling events, using for instance
//! [`Simulation::schedule_event()`](simulation::Simulation::schedule_event).
//! [`Simulation::process_event`](simulation::Simulation::process_event) or
//! [`Simulation::send_query`](simulation::Simulation::process_query),
//! 3. by scheduling events with a [`Scheduler`](simulation::Scheduler).
//!
//! When a simulation is initialized via
//! [`SimInit::init()`](simulation::SimInit::init) then the simulation will run
//! as fast as possible, without regard for the actual wall clock time.
//! Alternatively, it is possible to initialize a simulation via
//! [`SimInit::init_with_clock()`](simulation::SimInit::init_with_clock) to bind
//! the simulation time to the wall clock time using a custom
//! [`Clock`](time::Clock) type or a readily-available real-time clock such as
//! [`AutoSystemClock`](time::AutoSystemClock).
//! When initialized with the default clock, the simulation will run as fast as
//! possible, without regard for the actual wall clock time. Alternatively, the
//! simulation time can be synchronized to the wall clock time using
//! [`SimInit::set_clock`](simulation::SimInit::set_clock) and providing a
//! custom [`Clock`](time::Clock) type or a readily-available real-time clock
//! such as [`AutoSystemClock`](time::AutoSystemClock).
//!
//! Simulation outputs can be monitored using
//! [`EventSlot`](simulation::EventSlot)s and
//! [`EventStream`](simulation::EventStream)s, which can be connected to any
//! model's output port. While an event slot only gives access to the last value
//! sent from a port, an event stream is an iterator that yields all events that
//! were sent in first-in-first-out order.
//! Simulation outputs can be monitored using [`EventSlot`](ports::EventSlot)s,
//! [`EventBuffer`](ports::EventBuffer)s, or any implementer of the
//! [`EventSink`](ports::EventSink) trait, connected to one or several model
//! output ports.
//!
//! This is an example of simulation that could be performed using the above
//! bench assembly:
@ -266,8 +281,8 @@
//! ```
//! # mod models {
//! # use std::time::Duration;
//! # use asynchronix::model::{Model, Output};
//! # use asynchronix::time::Scheduler;
//! # use nexosim::model::{Context, Model};
//! # use nexosim::ports::Output;
//! # #[derive(Default)]
//! # pub struct Multiplier {
//! # pub output: Output<f64>,
@ -283,8 +298,8 @@
//! # pub output: Output<f64>,
//! # }
//! # impl Delay {
//! # pub fn input(&mut self, value: f64, scheduler: &Scheduler<Self>) {
//! # scheduler.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
//! # pub fn input(&mut self, value: f64, cx: &mut Context<Self>) {
//! # cx.schedule_event(Duration::from_secs(1), Self::send, value).unwrap();
//! # }
//! # async fn send(&mut self, value: f64) { // this method can be private
//! # self.output.send(value).await;
@ -293,8 +308,9 @@
//! # impl Model for Delay {}
//! # }
//! # use std::time::Duration;
//! # use asynchronix::simulation::{Mailbox, SimInit};
//! # use asynchronix::time::MonotonicTime;
//! # use nexosim::ports::EventSlot;
//! # use nexosim::simulation::{Mailbox, SimInit};
//! # use nexosim::time::MonotonicTime;
//! # use models::{Delay, Multiplier};
//! # let mut multiplier1 = Multiplier::default();
//! # let mut multiplier2 = Multiplier::default();
@ -308,47 +324,51 @@
//! # multiplier1.output.connect(Multiplier::input, &multiplier2_mbox);
//! # multiplier2.output.connect(Delay::input, &delay2_mbox);
//! # delay1.output.connect(Delay::input, &delay2_mbox);
//! # let mut output_slot = delay2.output.connect_slot().0;
//! # let mut output_slot = EventSlot::new();
//! # delay2.output.connect_sink(&output_slot);
//! # let input_address = multiplier1_mbox.address();
//! # let t0 = MonotonicTime::EPOCH;
//! # let mut simu = SimInit::new()
//! # .add_model(multiplier1, multiplier1_mbox)
//! # .add_model(multiplier2, multiplier2_mbox)
//! # .add_model(delay1, delay1_mbox)
//! # .add_model(delay2, delay2_mbox)
//! # .init(t0);
//! # .add_model(multiplier1, multiplier1_mbox, "multiplier1")
//! # .add_model(multiplier2, multiplier2_mbox, "multiplier2")
//! # .add_model(delay1, delay1_mbox, "delay1")
//! # .add_model(delay2, delay2_mbox, "delay2")
//! # .init(t0)?
//! # .0;
//! // Send a value to the first multiplier.
//! simu.send_event(Multiplier::input, 21.0, &input_address);
//! simu.process_event(Multiplier::input, 21.0, &input_address)?;
//!
//! // The simulation is still at t0 so nothing is expected at the output of the
//! // second delay gate.
//! assert!(output_slot.take().is_none());
//! assert!(output_slot.next().is_none());
//!
//! // Advance simulation time until the next event and check the time and output.
//! simu.step();
//! simu.step()?;
//! assert_eq!(simu.time(), t0 + Duration::from_secs(1));
//! assert_eq!(output_slot.take(), Some(84.0));
//! assert_eq!(output_slot.next(), Some(84.0));
//!
//! // Get the answer to the ultimate question of life, the universe & everything.
//! simu.step();
//! simu.step()?;
//! assert_eq!(simu.time(), t0 + Duration::from_secs(2));
//! assert_eq!(output_slot.take(), Some(42.0));
//! assert_eq!(output_slot.next(), Some(42.0));
//!
//! # Ok::<(), nexosim::simulation::SimulationError>(())
//! ```
//!
//! # Message ordering guarantees
//!
//! The Asynchronix runtime is based on the [actor model][actor_model], meaning
//! that every simulation model can be thought of as an isolated entity running
//! in its own thread. While in practice the runtime will actually multiplex and
//! The NeXosim runtime is based on the [actor model][actor_model], meaning that
//! every simulation model can be thought of as an isolated entity running in
//! its own thread. While in practice the runtime will actually multiplex and
//! migrate models over a fixed set of kernel threads, models will indeed run in
//! parallel whenever possible.
//!
//! Since Asynchronix is a time-based simulator, the runtime will always execute
//! Since NeXosim is a time-based simulator, the runtime will always execute
//! tasks in chronological order, thus eliminating most ordering ambiguities
//! that could result from parallel execution. Nevertheless, it is sometimes
//! possible for events and queries generated in the same time slice to lead to
//! ambiguous execution orders. In order to make it easier to reason about such
//! situations, Asynchronix provides a set of guarantees about message delivery
//! situations, NeXosim provides a set of guarantees about message delivery
//! order. Borrowing from the [Pony][pony] programming language, we refer to
//! this contract as *causal messaging*, a property that can be summarized by
//! these two rules:
@ -361,54 +381,108 @@
//! processed in any order by `B` and `C`, it is guaranteed that `B` will
//! process `M1` before `M3`.
//!
//! The first guarantee (and only the first) also extends to events scheduled
//! from a simulation with a
//! [`Simulation::schedule_*()`](simulation::Simulation::schedule_event) method:
//! if the scheduler contains several events to be delivered at the same time to
//! the same model, these events will always be processed in the order in which
//! they were scheduled.
//! Both guarantees also extend to same-time events scheduled from the global
//! [`Scheduler`](simulation::Scheduler), *i.e.* the relative ordering of events
//! scheduled for the same time is preserved and warranties 1 and 2 above
//! accordingly hold (assuming model `A` stands for the scheduler). Likewise,
//! the relative order of same-time events self-scheduled by a model using its
//! [`Context`](model::Context) is preserved.
//!
//! [actor_model]: https://en.wikipedia.org/wiki/Actor_model
//! [pony]: https://www.ponylang.io/
//!
//!
//! # Cargo feature flags
//!
//! ## Tracing
//!
//! The `tracing` feature flag provides support for the
//! [`tracing`](https://docs.rs/tracing/latest/tracing/) crate and can be
//! activated in `Cargo.toml` with:
//!
//! ```toml
//! [dependencies]
//! nexosim = { version = "0.3.1", features = ["tracing"] }
//! ```
//!
//! See the [`tracing`] module for more information.
//!
//! ## Server
//!
//! The `server` feature provides a gRPC server for remote control and monitoring,
//! e.g. from a Python client. It can be activated with:
//!
//! ```toml
//! [dependencies]
//! nexosim = { version = "0.3.1", features = ["server"] }
//! ```
//!
//! See the [`registry`] and [`server`] modules for more information.
//!
//! Front-end usage documentation will be added upon release of the NeXosim
//! Python client.
//!
//!
//! # Other resources
//!
//! ## Other examples
//!
//! The [`examples`][gh_examples] directory in the main repository contains more
//! fleshed out examples that demonstrate various capabilities of the simulation
//! Several [`examples`][gh_examples] are available that contain more fleshed
//! out examples and demonstrate various capabilities of the simulation
//! framework.
//!
//! [gh_examples]:
//! https://github.com/asynchronics/asynchronix/tree/main/asynchronix/examples
//! https://github.com/asynchronics/nexosim/tree/main/nexosim/examples
//!
//! ## Modules documentation
//!
//! While the above overview does cover the basic concepts, more information is
//! available in the documentation of the different modules:
//! ## Other features and advanced topics
//!
//! While the above overview does cover most basic concepts, more information is
//! available in the modules' documentation:
//!
//! * the [`model`] module provides more details about models, **model
//! prototypes** and **hierarchical models**; be sure to check as well the
//! documentation of [`model::Context`] for topics such as **self-scheduling**
//! methods and **event cancellation**,
//! * the [`ports`] module discusses in more details model ports and simulation
//! endpoints, as well as the ability to **modify and filter messages**
//! exchanged between ports; it also provides
//! [`EventSource`](ports::EventSource) and
//! [`QuerySource`](ports::QuerySource) objects which can be connected to
//! models just like [`Output`](ports::Output) and
//! [`Requestor`](ports::Requestor) ports, but for use as simulation
//! endpoints.
//! * the [`registry`] and [`server`] modules make it possible to manage and
//! monitor a simulation locally or remotely from a NeXosim Python client,
//! * the [`simulation`] module discusses **mailbox capacity** and pathological
//! situations that may lead to a **deadlock**,
//! * the [`time`] module introduces the [`time::MonotonicTime`] monotonic
//! timestamp object and **simulation clocks**.
//! * the [`tracing`] module discusses time-stamping and filtering of `tracing`
//! events.
//!
//! * the [`model`] module provides more details about the signatures of input
//! and replier port methods and discusses model initialization in the
//! documentation of [`model::Model`],
//! * the [`simulation`] module discusses how the capacity of mailboxes may
//! affect the simulation, how connections can be modified after the
//! simulation was instantiated, and which pathological situations can lead to
//! a deadlock,
//! * the [`time`] module discusses in particular self-scheduling methods and
//! scheduling cancellation in the documentation of [`time::Scheduler`] while
//! the monotonic timestamp format used for simulations is documented in
//! [`time::MonotonicTime`].
#![warn(missing_docs, missing_debug_implementations, unreachable_pub)]
#![cfg_attr(docsrs, feature(doc_auto_cfg, doc_cfg_hide))]
#![cfg_attr(docsrs, doc(cfg_hide(feature = "dev-hooks")))]
pub(crate) mod channel;
pub(crate) mod executor;
mod loom_exports;
pub(crate) mod macros;
pub mod model;
pub mod ports;
pub mod simulation;
pub mod time;
pub(crate) mod util;
#[cfg(feature = "server")]
pub mod registry;
#[cfg(feature = "server")]
pub mod server;
#[cfg(feature = "tracing")]
pub mod tracing;
#[cfg(feature = "dev-hooks")]
#[doc(hidden)]
pub mod dev_hooks;

View File

@ -1,7 +1,8 @@
#[cfg(asynchronix_loom)]
#[cfg(all(test, nexosim_loom))]
#[allow(unused_imports)]
pub(crate) mod sync {
pub(crate) use loom::sync::{Arc, Mutex};
pub(crate) use loom::sync::{Arc, LockResult, Mutex, MutexGuard};
pub(crate) use std::sync::PoisonError;
pub(crate) mod atomic {
pub(crate) use loom::sync::atomic::{
@ -9,10 +10,10 @@ pub(crate) mod sync {
};
}
}
#[cfg(not(asynchronix_loom))]
#[cfg(not(all(test, nexosim_loom)))]
#[allow(unused_imports)]
pub(crate) mod sync {
pub(crate) use std::sync::{Arc, Mutex};
pub(crate) use std::sync::{Arc, LockResult, Mutex, MutexGuard, PoisonError};
pub(crate) mod atomic {
pub(crate) use std::sync::atomic::{
@ -21,11 +22,11 @@ pub(crate) mod sync {
}
}
#[cfg(asynchronix_loom)]
#[cfg(all(test, nexosim_loom))]
pub(crate) mod cell {
pub(crate) use loom::cell::UnsafeCell;
}
#[cfg(not(asynchronix_loom))]
#[cfg(not(all(test, nexosim_loom)))]
pub(crate) mod cell {
#[derive(Debug)]
pub(crate) struct UnsafeCell<T>(std::cell::UnsafeCell<T>);
@ -49,11 +50,11 @@ pub(crate) mod cell {
#[allow(unused_macros)]
macro_rules! debug_or_loom_assert {
($($arg:tt)*) => (if cfg!(any(debug_assertions, asynchronix_loom)) { assert!($($arg)*); })
($($arg:tt)*) => (if cfg!(any(debug_assertions, nexosim_loom)) { assert!($($arg)*); })
}
#[allow(unused_macros)]
macro_rules! debug_or_loom_assert_eq {
($($arg:tt)*) => (if cfg!(any(debug_assertions, asynchronix_loom)) { assert_eq!($($arg)*); })
($($arg:tt)*) => (if cfg!(any(debug_assertions, nexosim_loom)) { assert_eq!($($arg)*); })
}
#[allow(unused_imports)]
pub(crate) use debug_or_loom_assert;

View File

@ -7,19 +7,18 @@ use std::ptr;
/// Declare a new thread-local storage scoped key of type `ScopedKey<T>`.
///
/// This is based on the `scoped-tls` crate, with slight modifications, such as
/// the use of the newly available `const` qualifier for TLS.
/// the addition of a `ScopedLocalKey::unset` method and the use of a `map`
/// method that returns `Option::None` when the value is not set, rather than
/// panicking as `with` would.
macro_rules! scoped_thread_local {
($(#[$attrs:meta])* $vis:vis static $name:ident: $ty:ty) => (
$(#[$attrs])*
$vis static $name: $crate::macros::scoped_thread_local::ScopedLocalKey<$ty>
= $crate::macros::scoped_thread_local::ScopedLocalKey {
inner: {
thread_local!(static FOO: ::std::cell::Cell<*const ()> = const {
std::cell::Cell::new(::std::ptr::null())
});
&FOO
},
_marker: ::std::marker::PhantomData,
= unsafe {
::std::thread_local!(static FOO: ::std::cell::Cell<*const ()> = const {
::std::cell::Cell::new(::std::ptr::null())
});
$crate::macros::scoped_thread_local::ScopedLocalKey::new(&FOO)
};
)
}
@ -28,13 +27,24 @@ pub(crate) use scoped_thread_local;
/// Type representing a thread local storage key corresponding to a reference
/// to the type parameter `T`.
pub(crate) struct ScopedLocalKey<T> {
pub(crate) inner: &'static LocalKey<Cell<*const ()>>,
pub(crate) _marker: marker::PhantomData<T>,
inner: &'static LocalKey<Cell<*const ()>>,
_marker: marker::PhantomData<T>,
}
unsafe impl<T> Sync for ScopedLocalKey<T> {}
impl<T> ScopedLocalKey<T> {
#[doc(hidden)]
/// # Safety
///
/// Should only be called through the public macro.
pub(crate) const unsafe fn new(inner: &'static LocalKey<Cell<*const ()>>) -> Self {
Self {
inner,
_marker: marker::PhantomData,
}
}
/// Inserts a value into this scoped thread local storage slot for the
/// duration of a closure.
pub(crate) fn set<F, R>(&'static self, t: &T, f: F) -> R
@ -113,7 +123,7 @@ impl<T> ScopedLocalKey<T> {
}
}
#[cfg(all(test, not(asynchronix_loom)))]
#[cfg(all(test, not(nexosim_loom)))]
mod tests {
use std::cell::Cell;
use std::sync::mpsc::{channel, Sender};

296
nexosim/src/model.rs Normal file
View File

@ -0,0 +1,296 @@
//! Model components.
//!
//! # Models and model prototypes
//!
//! Every model must implement the [`Model`] trait. This trait defines an
//! asynchronous initialization method, [`Model::init`], which main purpose is
//! to enable models to perform specific actions when the simulation starts,
//! *i.e.* after all models have been connected and added to the simulation.
//!
//! It is frequently convenient to expose to users a model builder type—called a
//! *model prototype*—rather than the final model. This can be done by
//! implementing the [`ProtoModel`] trait, which defines the associated model
//! type and a [`ProtoModel::build`] method invoked when a model is added the
//! simulation.
//!
//! Prototype models can be used whenever the Rust builder pattern is helpful,
//! for instance to set optional parameters. One of the use-cases that may
//! benefit from the use of prototype models is hierarchical model building.
//! When a parent model contains submodels, these submodels are often an
//! implementation detail that needs not be exposed to the user. One may then
//! define a prototype model that contains all outputs and requestors ports,
//! while the model itself contains the input and replier ports. Upon invocation
//! of [`ProtoModel::build`], the exit ports are moved to the model or its
//! submodels, and those submodels are added to the simulation.
//!
//! Note that a trivial [`ProtoModel`] implementation is generated by default
//! for any object implementing the [`Model`] trait, where the associated
//! [`ProtoModel::Model`] type is the model type itself. This is what makes it
//! possible to use either an explicitly-defined [`ProtoModel`] as argument to
//! the [`SimInit::add_model`](crate::simulation::SimInit::add_model) method, or
//! a plain [`Model`] type.
//!
//! #### Examples
//!
//! A model that does not require initialization or building can simply use the
//! default implementation of the [`Model`] trait:
//!
//! ```
//! use nexosim::model::Model;
//!
//! pub struct MyModel {
//! // ...
//! }
//! impl Model for MyModel {}
//! ```
//!
//! If a default action is required during simulation initialization, the `init`
//! methods can be explicitly implemented:
//!
//! ```
//! use nexosim::model::{Context, InitializedModel, Model};
//!
//! pub struct MyModel {
//! // ...
//! }
//! impl Model for MyModel {
//! async fn init(
//! mut self,
//! ctx: &mut Context<Self>
//! ) -> InitializedModel<Self> {
//! println!("...initialization...");
//!
//! self.into()
//! }
//! }
//! ```
//!
//! Finally, if a model builder is required, the [`ProtoModel`] trait can be
//! explicitly implemented. Note that the [`ProtoModel`] contains all output and
//! requestor ports, while the associated [`Model`] contains all input and
//! replier methods.
//!
//! ```
//! use nexosim::model::{BuildContext, InitializedModel, Model, ProtoModel};
//! use nexosim::ports::Output;
//!
//! /// The final model.
//! pub struct Multiplier {
//! // Private outputs and requestors stored in a form that constitutes an
//! // implementation detail and should not be exposed to the user.
//! my_outputs: Vec<Output<usize>>
//! }
//! impl Multiplier {
//! // Private constructor: the final model is built by the prototype model.
//! fn new(
//! value_times_1: Output<usize>,
//! value_times_2: Output<usize>,
//! value_times_3: Output<usize>,
//! ) -> Self {
//! Self {
//! my_outputs: vec![value_times_1, value_times_2, value_times_3]
//! }
//! }
//!
//! // Public input to be used during bench construction.
//! pub async fn my_input(&mut self, my_data: usize) {
//! for (i, output) in self.my_outputs.iter_mut().enumerate() {
//! output.send(my_data*(i + 1)).await;
//! }
//! }
//! }
//! impl Model for Multiplier {}
//!
//! pub struct ProtoMultiplier {
//! // Prettyfied outputs exposed to the user.
//! pub value_times_1: Output<usize>,
//! pub value_times_2: Output<usize>,
//! pub value_times_3: Output<usize>,
//! }
//! impl ProtoModel for ProtoMultiplier {
//! type Model = Multiplier;
//!
//! fn build(
//! mut self,
//! _: &mut BuildContext<Self>
//! ) -> Multiplier {
//! Multiplier::new(self.value_times_1, self.value_times_2, self.value_times_3)
//! }
//! }
//! ```
//!
//! # Hierarchical models
//!
//! Hierarchical models are models build from a prototype, which prototype adds
//! submodels to the simulation within its [`ProtoModel::build`] method. From a
//! formal point of view, however, hierarchical models are just regular models
//! implementing the [`Model`] trait, as are their submodels.
//!
//!
//! #### Example
//!
//! This example demonstrates a child model inside a parent model, where the
//! parent model simply forwards input data to the child and the child in turn
//! sends the data to the output exposed by the parent's prototype.
//!
//! For a more comprehensive example demonstrating hierarchical model
//! assemblies, see the [`assembly`][assembly] example.
//!
//! [assembly]:
//! https://github.com/asynchronics/nexosim/tree/main/nexosim/examples/assembly.rs
//!
//! ```
//! use nexosim::model::{BuildContext, Model, ProtoModel};
//! use nexosim::ports::Output;
//! use nexosim::simulation::Mailbox;
//!
//! pub struct ParentModel {
//! // Private internal port connected to the submodel.
//! to_child: Output<u64>,
//! }
//! impl ParentModel {
//! async fn input(&mut self, my_data: u64) {
//! // Forward to the submodel.
//! self.to_child.send(my_data).await;
//! }
//! }
//! impl Model for ParentModel {}
//!
//! pub struct ProtoParentModel {
//! pub output: Output<u64>,
//! }
//! impl ProtoModel for ProtoParentModel {
//! type Model = ParentModel;
//!
//! fn build(self, cx: &mut BuildContext<Self>) -> ParentModel {
//! // Move the output to the child model.
//! let child = ChildModel { output: self.output };
//! let mut parent = ParentModel {
//! to_child: Output::default(),
//! };
//!
//! let child_mailbox = Mailbox::new();
//!
//! // Establish an internal Parent -> Child connection.
//! parent
//! .to_child
//! .connect(ChildModel::input, child_mailbox.address());
//!
//! // Add the child model to the simulation.
//! cx.add_submodel(child, child_mailbox, "child");
//!
//! parent
//! }
//! }
//!
//! struct ChildModel {
//! output: Output<u64>,
//! }
//! impl ChildModel {
//! async fn input(&mut self, my_data: u64) {
//! self.output.send(my_data).await;
//! }
//! }
//! impl Model for ChildModel {}
//!
//! ```
use std::future::Future;
pub use context::{BuildContext, Context};
mod context;
/// Trait to be implemented by simulation models.
///
/// This trait enables models to perform specific actions during initialization.
/// The [`Model::init`] method is run only once all models have been connected
/// and migrated to the simulation bench, but before the simulation actually
/// starts. A common use for `init` is to send messages to connected models at
/// the beginning of the simulation.
///
/// The `init` function converts the model to the opaque `InitializedModel` type
/// to prevent an already initialized model from being added to the simulation
/// bench.
pub trait Model: Sized + Send + 'static {
/// Performs asynchronous model initialization.
///
/// This asynchronous method is executed exactly once for all models of the
/// simulation when the
/// [`SimInit::init`](crate::simulation::SimInit::init) method is called.
///
/// The default implementation simply converts the model to an
/// `InitializedModel` without any side effect.
///
/// # Examples
///
/// ```
/// use std::future::Future;
/// use std::pin::Pin;
///
/// use nexosim::model::{Context, InitializedModel, Model};
///
/// pub struct MyModel {
/// // ...
/// }
///
/// impl Model for MyModel {
/// async fn init(
/// self,
/// cx: &mut Context<Self>
/// ) -> InitializedModel<Self> {
/// println!("...initialization...");
///
/// self.into()
/// }
/// }
/// ```
fn init(self, _: &mut Context<Self>) -> impl Future<Output = InitializedModel<Self>> + Send {
async { self.into() }
}
}
/// Opaque type containing an initialized model.
///
/// A model can be converted to an `InitializedModel` using the `Into`/`From`
/// traits. The implementation of the simulation guarantees that the
/// [`Model::init`] method will never be called on a model after conversion to
/// an `InitializedModel`.
#[derive(Debug)]
pub struct InitializedModel<M: Model>(pub(crate) M);
impl<M: Model> From<M> for InitializedModel<M> {
fn from(model: M) -> Self {
InitializedModel(model)
}
}
/// Trait to be implemented by simulation model prototypes.
///
/// This trait makes it possible to build the final model from a builder type
/// when it is added to the simulation.
///
/// The [`ProtoModel::build`] method consumes the prototype. It is
/// automatically called when a model or submodel prototype is added to the
/// simulation using
/// [`Simulation::add_model`](crate::simulation::SimInit::add_model) or
/// [`BuildContext::add_submodel`].
pub trait ProtoModel: Sized {
/// Type of the model to be built.
type Model: Model;
/// Builds the model.
///
/// This method is invoked when the
/// [`SimInit::add_model`](crate::simulation::SimInit::add_model) or
/// [`BuildContext::add_submodel`] method are called.
fn build(self, cx: &mut BuildContext<Self>) -> Self::Model;
}
// Every model can be used as a prototype for itself.
impl<M: Model> ProtoModel for M {
type Model = Self;
fn build(self, _: &mut BuildContext<Self>) -> Self::Model {
self
}
}

View File

@ -0,0 +1,539 @@
use std::fmt;
use std::time::Duration;
use crate::executor::{Executor, Signal};
use crate::ports::InputFn;
use crate::simulation::{self, ActionKey, Address, GlobalScheduler, Mailbox, SchedulingError};
use crate::time::{Deadline, MonotonicTime};
use super::{Model, ProtoModel};
#[cfg(all(test, not(nexosim_loom)))]
use crate::channel::Receiver;
/// A local context for models.
///
/// A `Context` is a handle to the global context associated to a model
/// instance. It can be used by the model to retrieve the simulation time or
/// schedule delayed actions on itself.
///
/// ### Caveat: self-scheduling `async` methods
///
/// Due to a current rustc issue, `async` methods that schedule themselves will
/// not compile unless an explicit `Send` bound is added to the returned future.
/// This can be done by replacing the `async` signature with a partially
/// desugared signature such as:
///
/// ```ignore
/// fn self_scheduling_method<'a>(
/// &'a mut self,
/// arg: MyEventType,
/// cx: &'a mut Context<Self>
/// ) -> impl Future<Output=()> + Send + 'a {
/// async move {
/// /* implementation */
/// }
/// }
/// ```
///
/// Self-scheduling methods which are not `async` are not affected by this
/// issue.
///
/// # Examples
///
/// A model that sends a greeting after some delay.
///
/// ```
/// use std::time::Duration;
/// use nexosim::model::{Context, Model};
/// use nexosim::ports::Output;
///
/// #[derive(Default)]
/// pub struct DelayedGreeter {
/// msg_out: Output<String>,
/// }
///
/// impl DelayedGreeter {
/// // Triggers a greeting on the output port after some delay [input port].
/// pub async fn greet_with_delay(&mut self, delay: Duration, cx: &mut Context<Self>) {
/// let time = cx.time();
/// let greeting = format!("Hello, this message was scheduled at: {:?}.", time);
///
/// if delay.is_zero() {
/// self.msg_out.send(greeting).await;
/// } else {
/// cx.schedule_event(delay, Self::send_msg, greeting).unwrap();
/// }
/// }
///
/// // Sends a message to the output [private input port].
/// async fn send_msg(&mut self, msg: String) {
/// self.msg_out.send(msg).await;
/// }
/// }
/// impl Model for DelayedGreeter {}
/// ```
// The self-scheduling caveat seems related to this issue:
// https://github.com/rust-lang/rust/issues/78649
pub struct Context<M: Model> {
name: String,
scheduler: GlobalScheduler,
address: Address<M>,
origin_id: usize,
}
impl<M: Model> Context<M> {
/// Creates a new local context.
pub(crate) fn new(name: String, scheduler: GlobalScheduler, address: Address<M>) -> Self {
// The only requirement for the origin ID is that it must be (i)
// specific to each model and (ii) different from 0 (which is reserved
// for the global scheduler). The channel ID of the model mailbox
// fulfills this requirement.
let origin_id = address.0.channel_id();
Self {
name,
scheduler,
address,
origin_id,
}
}
/// Returns the fully qualified model instance name.
///
/// The fully qualified name is made of the unqualified model name, if
/// relevant prepended by the dot-separated names of all parent models.
pub fn name(&self) -> &str {
&self.name
}
/// Returns the current simulation time.
pub fn time(&self) -> MonotonicTime {
self.scheduler.time()
}
/// Schedules an event at a future time on this model.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
///
/// use nexosim::model::{Context, Model};
///
/// // A timer.
/// pub struct Timer {}
///
/// impl Timer {
/// // Sets an alarm [input port].
/// pub fn set(&mut self, setting: Duration, cx: &mut Context<Self>) {
/// if cx.schedule_event(setting, Self::ring, ()).is_err() {
/// println!("The alarm clock can only be set for a future time");
/// }
/// }
///
/// // Rings [private input port].
/// fn ring(&mut self) {
/// println!("Brringggg");
/// }
/// }
///
/// impl Model for Timer {}
/// ```
pub fn schedule_event<F, T, S>(
&self,
deadline: impl Deadline,
func: F,
arg: T,
) -> Result<(), SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
self.scheduler
.schedule_event_from(deadline, func, arg, &self.address, self.origin_id)
}
/// Schedules a cancellable event at a future time on this model and returns
/// an action key.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time.
///
/// # Examples
///
/// ```
/// use nexosim::model::{Context, Model};
/// use nexosim::simulation::ActionKey;
/// use nexosim::time::MonotonicTime;
///
/// // An alarm clock that can be cancelled.
/// #[derive(Default)]
/// pub struct CancellableAlarmClock {
/// event_key: Option<ActionKey>,
/// }
///
/// impl CancellableAlarmClock {
/// // Sets an alarm [input port].
/// pub fn set(&mut self, setting: MonotonicTime, cx: &mut Context<Self>) {
/// self.cancel();
/// match cx.schedule_keyed_event(setting, Self::ring, ()) {
/// Ok(event_key) => self.event_key = Some(event_key),
/// Err(_) => println!("The alarm clock can only be set for a future time"),
/// };
/// }
///
/// // Cancels the current alarm, if any [input port].
/// pub fn cancel(&mut self) {
/// self.event_key.take().map(|k| k.cancel());
/// }
///
/// // Rings the alarm [private input port].
/// fn ring(&mut self) {
/// println!("Brringggg!");
/// }
/// }
///
/// impl Model for CancellableAlarmClock {}
/// ```
pub fn schedule_keyed_event<F, T, S>(
&self,
deadline: impl Deadline,
func: F,
arg: T,
) -> Result<ActionKey, SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let event_key = self.scheduler.schedule_keyed_event_from(
deadline,
func,
arg,
&self.address,
self.origin_id,
)?;
Ok(event_key)
}
/// Schedules a periodically recurring event on this model at a future time.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time or if the specified period is null.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
///
/// use nexosim::model::{Context, Model};
/// use nexosim::time::MonotonicTime;
///
/// // An alarm clock beeping at 1Hz.
/// pub struct BeepingAlarmClock {}
///
/// impl BeepingAlarmClock {
/// // Sets an alarm [input port].
/// pub fn set(&mut self, setting: MonotonicTime, cx: &mut Context<Self>) {
/// if cx.schedule_periodic_event(
/// setting,
/// Duration::from_secs(1), // 1Hz = 1/1s
/// Self::beep,
/// ()
/// ).is_err() {
/// println!("The alarm clock can only be set for a future time");
/// }
/// }
///
/// // Emits a single beep [private input port].
/// fn beep(&mut self) {
/// println!("Beep!");
/// }
/// }
///
/// impl Model for BeepingAlarmClock {}
/// ```
pub fn schedule_periodic_event<F, T, S>(
&self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
) -> Result<(), SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
self.scheduler.schedule_periodic_event_from(
deadline,
period,
func,
arg,
&self.address,
self.origin_id,
)
}
/// Schedules a cancellable, periodically recurring event on this model at a
/// future time and returns an action key.
///
/// An error is returned if the specified deadline is not in the future of
/// the current simulation time or if the specified period is null.
///
/// # Examples
///
/// ```
/// use std::time::Duration;
///
/// use nexosim::model::{Context, Model};
/// use nexosim::simulation::ActionKey;
/// use nexosim::time::MonotonicTime;
///
/// // An alarm clock beeping at 1Hz that can be cancelled before it sets off, or
/// // stopped after it sets off.
/// #[derive(Default)]
/// pub struct CancellableBeepingAlarmClock {
/// event_key: Option<ActionKey>,
/// }
///
/// impl CancellableBeepingAlarmClock {
/// // Sets an alarm [input port].
/// pub fn set(&mut self, setting: MonotonicTime, cx: &mut Context<Self>) {
/// self.cancel();
/// match cx.schedule_keyed_periodic_event(
/// setting,
/// Duration::from_secs(1), // 1Hz = 1/1s
/// Self::beep,
/// ()
/// ) {
/// Ok(event_key) => self.event_key = Some(event_key),
/// Err(_) => println!("The alarm clock can only be set for a future time"),
/// };
/// }
///
/// // Cancels or stops the alarm [input port].
/// pub fn cancel(&mut self) {
/// self.event_key.take().map(|k| k.cancel());
/// }
///
/// // Emits a single beep [private input port].
/// fn beep(&mut self) {
/// println!("Beep!");
/// }
/// }
///
/// impl Model for CancellableBeepingAlarmClock {}
/// ```
pub fn schedule_keyed_periodic_event<F, T, S>(
&self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
) -> Result<ActionKey, SchedulingError>
where
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
let event_key = self.scheduler.schedule_keyed_periodic_event_from(
deadline,
period,
func,
arg,
&self.address,
self.origin_id,
)?;
Ok(event_key)
}
}
impl<M: Model> fmt::Debug for Context<M> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Context")
.field("name", &self.name())
.field("time", &self.time())
.field("address", &self.address)
.field("origin_id", &self.origin_id)
.finish_non_exhaustive()
}
}
/// Context available when building a model from a model prototype.
///
/// A `BuildContext` can be used to add the sub-models of a hierarchical model
/// to the simulation bench.
///
/// # Examples
///
/// A model that multiplies its input by four using two sub-models that each
/// multiply their input by two.
///
/// ```text
/// ┌───────────────────────────────────────┐
/// │ MyltiplyBy4 │
/// │ ┌─────────────┐ ┌─────────────┐ │
/// │ │ │ │ │ │
/// Input ●─────┼──►│ MultiplyBy2 ├──►│ MultiplyBy2 ├───┼─────► Output
/// f64 │ │ │ │ │ │ f64
/// │ └─────────────┘ └─────────────┘ │
/// │ │
/// └───────────────────────────────────────┘
/// ```
///
/// ```
/// use std::time::Duration;
/// use nexosim::model::{BuildContext, Model, ProtoModel};
/// use nexosim::ports::Output;
/// use nexosim::simulation::Mailbox;
///
/// #[derive(Default)]
/// struct MultiplyBy2 {
/// pub output: Output<i32>,
/// }
/// impl MultiplyBy2 {
/// pub async fn input(&mut self, value: i32) {
/// self.output.send(value * 2).await;
/// }
/// }
/// impl Model for MultiplyBy2 {}
///
/// pub struct MultiplyBy4 {
/// // Private forwarding output.
/// forward: Output<i32>,
/// }
/// impl MultiplyBy4 {
/// pub async fn input(&mut self, value: i32) {
/// self.forward.send(value).await;
/// }
/// }
/// impl Model for MultiplyBy4 {}
///
/// pub struct ProtoMultiplyBy4 {
/// pub output: Output<i32>,
/// }
/// impl ProtoModel for ProtoMultiplyBy4 {
/// type Model = MultiplyBy4;
///
/// fn build(
/// self,
/// cx: &mut BuildContext<Self>)
/// -> MultiplyBy4 {
/// let mut mult = MultiplyBy4 { forward: Output::default() };
/// let mut submult1 = MultiplyBy2::default();
///
/// // Move the prototype's output to the second multiplier.
/// let mut submult2 = MultiplyBy2 { output: self.output };
///
/// // Forward the parent's model input to the first multiplier.
/// let submult1_mbox = Mailbox::new();
/// mult.forward.connect(MultiplyBy2::input, &submult1_mbox);
///
/// // Connect the two multiplier submodels.
/// let submult2_mbox = Mailbox::new();
/// submult1.output.connect(MultiplyBy2::input, &submult2_mbox);
///
/// // Add the submodels to the simulation.
/// cx.add_submodel(submult1, submult1_mbox, "submultiplier 1");
/// cx.add_submodel(submult2, submult2_mbox, "submultiplier 2");
///
/// mult
/// }
/// }
///
/// ```
#[derive(Debug)]
pub struct BuildContext<'a, P: ProtoModel> {
mailbox: &'a Mailbox<P::Model>,
name: &'a String,
scheduler: &'a GlobalScheduler,
executor: &'a Executor,
abort_signal: &'a Signal,
model_names: &'a mut Vec<String>,
}
impl<'a, P: ProtoModel> BuildContext<'a, P> {
/// Creates a new local context.
pub(crate) fn new(
mailbox: &'a Mailbox<P::Model>,
name: &'a String,
scheduler: &'a GlobalScheduler,
executor: &'a Executor,
abort_signal: &'a Signal,
model_names: &'a mut Vec<String>,
) -> Self {
Self {
mailbox,
name,
scheduler,
executor,
abort_signal,
model_names,
}
}
/// Returns the fully qualified model instance name.
///
/// The fully qualified name is made of the unqualified model name, if
/// relevant prepended by the dot-separated names of all parent models.
pub fn name(&self) -> &str {
self.name
}
/// Returns a handle to the model's mailbox.
pub fn address(&self) -> Address<P::Model> {
self.mailbox.address()
}
/// Adds a sub-model to the simulation bench.
///
/// The `name` argument needs not be unique. It is appended to that of the
/// parent models' names using a dot separator (e.g.
/// `parent_name.child_name`) to build the fully qualified name. The use of
/// the dot character in the unqualified name is possible but discouraged.
/// If an empty string is provided, it is replaced by the string
/// `<unknown>`.
pub fn add_submodel<S: ProtoModel>(
&mut self,
model: S,
mailbox: Mailbox<S::Model>,
name: impl Into<String>,
) {
let mut submodel_name = name.into();
if submodel_name.is_empty() {
submodel_name = String::from("<unknown>");
};
submodel_name = self.name.to_string() + "." + &submodel_name;
simulation::add_model(
model,
mailbox,
submodel_name,
self.scheduler.clone(),
self.executor,
self.abort_signal,
self.model_names,
);
}
}
#[cfg(all(test, not(nexosim_loom)))]
impl<M: Model> Context<M> {
/// Creates a dummy context for testing purposes.
pub(crate) fn new_dummy() -> Self {
let dummy_address = Receiver::new(1).sender();
Context::new(
String::new(),
GlobalScheduler::new_dummy(),
Address(dummy_address),
)
}
}

281
nexosim/src/ports.rs Normal file
View File

@ -0,0 +1,281 @@
//! Ports for event and query broadcasting.
//!
//!
//! # Events and queries
//!
//! Models can exchange data via *events* and *queries*.
//!
//! Events are send-and-forget messages that can be broadcast from an [`Output`]
//! port or [`EventSource`] to an arbitrary number of *input ports* or
//! [`EventSink`]s with a matching event type.
//!
//! Queries actually involve two messages: a *request* that can be broadcast
//! from a [`Requestor`] port, a [`UniRequestor`] port or a [`QuerySource`] to
//! an arbitrary number of *replier ports* with a matching request type, and a
//! *reply* sent in response to such request. The response received by a
//! [`Requestor`] port is an iterator that yields as many items (replies) as
//! there are connected replier ports, while a [`UniRequestor`] received exactly
//! one reply.
//!
//! # Model ports
//!
//! ## Input and replier ports
//!
//! Input ports and replier ports are methods that implement the [`InputFn`] or
//! [`ReplierFn`] traits.
//!
//! In practice, an input port method for an event of type `T` may have any of
//! the following signatures, where the futures returned by the `async` variants
//! must implement `Send`:
//!
//! ```ignore
//! fn(&mut self) // argument elided, implies `T=()`
//! fn(&mut self, T)
//! fn(&mut self, T, &mut Context<Self>)
//! async fn(&mut self) // argument elided, implies `T=()`
//! async fn(&mut self, T)
//! async fn(&mut self, T, &mut Context<Self>)
//! where
//! Self: Model,
//! T: Clone + Send + 'static,
//! R: Send + 'static,
//! ```
//!
//! The context argument is useful for methods that need access to the
//! simulation time or that need to schedule an action at a future date.
//!
//! A replier port for a request of type `T` with a reply of type `R` may in
//! turn have any of the following signatures, where the futures must implement
//! `Send`:
//!
//! ```ignore
//! async fn(&mut self) -> R // argument elided, implies `T=()`
//! async fn(&mut self, T) -> R
//! async fn(&mut self, T, &mut Context<Self>) -> R
//! where
//! Self: Model,
//! T: Clone + Send + 'static,
//! R: Send + 'static,
//! ```
//!
//! Note that, due to type resolution ambiguities, non-async methods are not
//! allowed for replier ports.
//!
//! Input and replier ports will normally be exposed as public methods by a
//! [`Model`](crate::model::Model) so they can be connected to output and
//! requestor ports when assembling the simulation bench. However, input ports
//! may (and should) be defined as private methods if they are only used
//! internally by the model, for instance to schedule future actions on itself.
//!
//! Changing the signature of a public input or replier port is not considered
//! to alter the public interface of a model provided that the event, request
//! and reply types remain the same. In particular, adding a context argument or
//! changing a regular method to an `async` method will not cause idiomatic user
//! code to miscompile.
//!
//! #### Basic example
//!
//! ```
//! use nexosim::model::{Context, Model};
//!
//! pub struct MyModel {
//! // ...
//! }
//! impl MyModel {
//! pub fn my_input(&mut self, input: String, cx: &mut Context<Self>) {
//! // ...
//! }
//! pub async fn my_replier(&mut self, request: u32) -> bool { // context argument elided
//! // ...
//! # unimplemented!()
//! }
//! }
//! impl Model for MyModel {}
//! ```
//!
//! ## Output and requestor ports
//!
//! Output and requestor ports can be added to a model using composition, adding
//! [`Output`], [`Requestor`] or [`UniRequestor`] objects as members. They are
//! parametrized by the event type, or by the request and reply types.
//!
//! Output ports broadcast events to all connected input ports, while requestor
//! ports broadcast queries to, and retrieve replies from, all connected replier
//! ports.
//!
//! On the surface, output and requestor ports only differ in that sending a
//! query from a requestor port also returns an iterator over the replies from
//! all connected ports (or a single reply in the case of [`UniRequestor`]).
//! Sending a query is more costly, however, because of the need to wait until
//! the connected model(s) have processed the query. In contrast, since events
//! are buffered in the mailbox of the target model, sending an event is a
//! fire-and-forget operation. For this reason, output ports should generally be
//! preferred over requestor ports when possible.
//!
//! Models (or model prototypes, as appropriate) are expected to expose their
//! output and requestor ports as public members so they can be connected to
//! input and replier ports when assembling the simulation bench. Internal ports
//! used by hierarchical models to communicate with submodels are an exception
//! to this rule and are typically private.
//!
//! #### Basic example
//!
//! ```
//! use nexosim::model::Model;
//! use nexosim::ports::{Output, Requestor};
//!
//! pub struct MyModel {
//! pub my_output: Output<String>,
//! pub my_requestor: Requestor<u32, bool>,
//! }
//! impl MyModel {
//! // ...
//! }
//! impl Model for MyModel {}
//! ```
//!
//! #### Example with cloned ports
//!
//! [`Output`] and [`Requestor`] ports are clonable. The clones are shallow
//! copies, meaning that any modification of the ports connected to one instance
//! is immediately reflected by its clones.
//!
//! Clones of output and requestor ports should be used with care: even though
//! they uphold the usual [ordering
//! guaranties](crate#message-ordering-guarantees), their use can lead to
//! somewhat surprising message orderings.
//!
//! For instance, in the below [hierarchical
//! model](crate::model#hierarchical-models), while message `M0` is guaranteed
//! to reach the cloned output first, the relative arrival order of messages
//! `M1` and `M2` forwarded by the submodels to the cloned output is not
//! guaranteed.
//!
//! ```
//! use nexosim::model::{BuildContext, Model, ProtoModel};
//! use nexosim::ports::Output;
//! use nexosim::simulation::Mailbox;
//!
//! pub struct ParentModel {
//! output: Output<String>,
//! to_child1: Output<String>,
//! to_child2: Output<String>,
//! }
//! impl ParentModel {
//! pub async fn trigger(&mut self) {
//! // M0 is guaranteed to reach the cloned output first.
//! self.output.send("M0".to_string()).await;
//!
//! // M1 and M2 are forwarded by `Child1` and `Child2` to the cloned output
//! // but may reach it in any relative order.
//! self.to_child1.send("M1".to_string()).await;
//! self.to_child2.send("M2".to_string()).await;
//! }
//! }
//! impl Model for ParentModel {}
//!
//! pub struct ProtoParentModel {
//! pub output: Output<String>,
//! }
//! impl ProtoParentModel {
//! pub fn new() -> Self {
//! Self {
//! output: Default::default(),
//! }
//! }
//! }
//! impl ProtoModel for ProtoParentModel {
//! type Model = ParentModel;
//! fn build(self, cx: &mut BuildContext<Self>) -> ParentModel {
//! let child1 = ChildModel { output: self.output.clone() };
//! let child2 = ChildModel { output: self.output.clone() };
//! let mut parent = ParentModel {
//! output: self.output,
//! to_child1: Output::default(),
//! to_child2: Output::default(),
//! };
//!
//! let child1_mailbox = Mailbox::new();
//! let child2_mailbox = Mailbox::new();
//! parent
//! .to_child1
//! .connect(ChildModel::input, child1_mailbox.address());
//! parent
//! .to_child2
//! .connect(ChildModel::input, child2_mailbox.address());
//!
//! cx.add_submodel(child1, child1_mailbox, "child1");
//! cx.add_submodel(child2, child2_mailbox, "child2");
//!
//! parent
//! }
//! }
//!
//! pub struct ChildModel {
//! pub output: Output<String>,
//! }
//! impl ChildModel {
//! async fn input(&mut self, msg: String) {
//! self.output.send(msg).await;
//! }
//! }
//! impl Model for ChildModel {}
//! ```
//!
//! # Simulation endpoints
//!
//! Simulation endpoints can be seen as entry and exit ports for a simulation
//! bench.
//!
//! [`EventSource`] and [`QuerySource`] objects are similar to [`Output`] and
//! [`Requestor`] ports, respectively. They can be connected to models and can
//! be used to send events or queries to such models via
//! [`Action`](crate::simulation::Action)s.
//!
//! Objects implementing the [`EventSink`] trait, such as [`EventSlot`] and
//! [`EventBuffer`], are in turn similar to input ports. They can be connected
//! to model outputs and collect events sent by such models.
//!
//!
//! # Connections
//!
//! Model ports can be connected to other model ports and to simulation
//! endpoints using the `*connect` family of methods exposed by [`Output`],
//! [`Requestor`], [`UniRequestor`], [`EventSource`] and [`QuerySource`].
//!
//! Regular connections between two ports are made with the appropriate
//! `connect` method (for instance [`Output::connect`]). Such connections
//! broadcast events or requests from a sender port to one or several receiver
//! ports, cloning the event or request if necessary.
//!
//! Sometimes, however, it may be necessary to also map the event or request to
//! another type so it can be understood by the receiving port. While this
//! translation could be done by a model placed between the two ports, the
//! `map_connect` methods (for instance [`Output::map_connect`]) provide a
//! lightweight and computationally efficient alternative. These methods take a
//! mapping closure as argument which maps outgoing messages, and in the case of
//! requestors, a mapping closure which maps replies.
//!
//! Finally, it is sometime necessary to only forward messages or requests that
//! satisfy specific criteria. For instance, a model of a data bus may be
//! connected to many models (the "peripherals"), but its messages are usually
//! only addressed to selected models. The `filter_map_connect` methods (for
//! instance [`Output::filter_map_connect`]) enable this use-case by accepting a
//! closure that inspects the messages and determines whether they should be
//! forwarded, possibly after being mapped to another type.
//!
mod input;
mod output;
mod sink;
mod source;
pub use input::markers;
pub use input::{InputFn, ReplierFn};
pub use output::{Output, Requestor, UniRequestor};
pub use sink::{
blocking_event_queue::{BlockingEventQueue, BlockingEventQueueReader},
event_buffer::EventBuffer,
event_slot::EventSlot,
EventSink, EventSinkStream, EventSinkWriter,
};
pub use source::{EventSource, QuerySource, ReplyReceiver};

View File

@ -0,0 +1,4 @@
pub mod markers;
mod model_fn;
pub use model_fn::{InputFn, ReplierFn};

View File

@ -6,14 +6,14 @@
pub struct WithoutArguments {}
/// Marker type for regular simulation model methods that take a mutable
/// reference to the model and a message, without scheduler argument.
/// reference to the model and a message, without context argument.
#[derive(Debug)]
pub struct WithoutScheduler {}
pub struct WithoutContext {}
/// Marker type for regular simulation model methods that take a mutable
/// reference to the model, a message and an explicit scheduler argument.
/// reference to the model, a message and an explicit context argument.
#[derive(Debug)]
pub struct WithScheduler {}
pub struct WithContext {}
/// Marker type for asynchronous simulation model methods that take a mutable
/// reference to the model, without any other argument.
@ -21,11 +21,11 @@ pub struct WithScheduler {}
pub struct AsyncWithoutArguments {}
/// Marker type for asynchronous simulation model methods that take a mutable
/// reference to the model and a message, without scheduler argument.
/// reference to the model and a message, without context argument.
#[derive(Debug)]
pub struct AsyncWithoutScheduler {}
pub struct AsyncWithoutContext {}
/// Marker type for asynchronous simulation model methods that take a mutable
/// reference to the model, a message and an explicit scheduler argument.
/// reference to the model, a message and an explicit context argument.
#[derive(Debug)]
pub struct AsyncWithScheduler {}
pub struct AsyncWithContext {}

View File

@ -2,38 +2,34 @@
use std::future::{ready, Future, Ready};
use crate::model::{markers, Model};
use crate::time::Scheduler;
use crate::model::{Context, Model};
use super::markers;
/// A function, method or closures that can be used as an *input port*.
///
/// This trait is in particular implemented for any function or method with the
/// following signature, where it is implicitly assumed that the function
/// implements `Send + 'static`:
/// following signature, where the futures returned by the `async` variants must
/// implement `Send`:
///
/// ```ignore
/// FnOnce(&mut M, T)
/// FnOnce(&mut M, T, &Scheduler<M>)
/// fn(&mut M) // argument elided, implies `T=()`
/// fn(&mut M, T)
/// fn(&mut M, T, &mut Context<M>)
/// async fn(&mut M) // argument elided, implies `T=()`
/// async fn(&mut M, T)
/// async fn(&mut M, T, &Scheduler<M>)
/// async fn(&mut M, T, &mut Context<M>)
/// where
/// M: Model
/// ```
///
/// It is also implemented for the following signatures when `T=()`:
///
/// ```ignore
/// FnOnce(&mut M)
/// async fn(&mut M)
/// where
/// M: Model
/// M: Model,
/// T: Clone + Send + 'static,
/// R: Send + 'static,
/// ```
pub trait InputFn<'a, M: Model, T, S>: Send + 'static {
/// The `Future` returned by the asynchronous method.
type Future: Future<Output = ()> + Send + 'a;
/// Calls the method.
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future;
fn call(self, model: &'a mut M, arg: T, cx: &'a mut Context<M>) -> Self::Future;
}
impl<'a, M, F> InputFn<'a, M, (), markers::WithoutArguments> for F
@ -43,36 +39,36 @@ where
{
type Future = Ready<()>;
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
fn call(self, model: &'a mut M, _arg: (), _cx: &'a mut Context<M>) -> Self::Future {
self(model);
ready(())
}
}
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithoutScheduler> for F
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithoutContext> for F
where
M: Model,
F: FnOnce(&'a mut M, T) + Send + 'static,
{
type Future = Ready<()>;
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
fn call(self, model: &'a mut M, arg: T, _cx: &'a mut Context<M>) -> Self::Future {
self(model, arg);
ready(())
}
}
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithScheduler> for F
impl<'a, M, T, F> InputFn<'a, M, T, markers::WithContext> for F
where
M: Model,
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) + Send + 'static,
F: FnOnce(&'a mut M, T, &'a mut Context<M>) + Send + 'static,
{
type Future = Ready<()>;
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
self(model, arg, scheduler);
fn call(self, model: &'a mut M, arg: T, cx: &'a mut Context<M>) -> Self::Future {
self(model, arg, cx);
ready(())
}
@ -86,12 +82,12 @@ where
{
type Future = Fut;
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
fn call(self, model: &'a mut M, _arg: (), _cx: &'a mut Context<M>) -> Self::Future {
self(model)
}
}
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithoutScheduler> for F
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithoutContext> for F
where
M: Model,
Fut: Future<Output = ()> + Send + 'a,
@ -99,50 +95,44 @@ where
{
type Future = Fut;
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
fn call(self, model: &'a mut M, arg: T, _cx: &'a mut Context<M>) -> Self::Future {
self(model, arg)
}
}
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithScheduler> for F
impl<'a, M, T, Fut, F> InputFn<'a, M, T, markers::AsyncWithContext> for F
where
M: Model,
Fut: Future<Output = ()> + Send + 'a,
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) -> Fut + Send + 'static,
F: FnOnce(&'a mut M, T, &'a mut Context<M>) -> Fut + Send + 'static,
{
type Future = Fut;
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
self(model, arg, scheduler)
fn call(self, model: &'a mut M, arg: T, cx: &'a mut Context<M>) -> Self::Future {
self(model, arg, cx)
}
}
/// A function, method or closure that can be used as a *replier port*.
///
/// This trait is in particular implemented for any function or method with the
/// following signature, where it is implicitly assumed that the function
/// implements `Send + 'static`:
/// following signature, where the returned futures must implement `Send`:
///
/// ```ignore
/// async fn(&mut M) -> R // argument elided, implies `T=()`
/// async fn(&mut M, T) -> R
/// async fn(&mut M, T, &Scheduler<M>) -> R
/// async fn(&mut M, T, &mut Context<M>) -> R
/// where
/// M: Model
/// ```
///
/// It is also implemented for the following signatures when `T=()`:
///
/// ```ignore
/// async fn(&mut M) -> R
/// where
/// M: Model
/// M: Model,
/// T: Clone + Send + 'static,
/// R: Send + 'static,
/// ```
pub trait ReplierFn<'a, M: Model, T, R, S>: Send + 'static {
/// The `Future` returned by the asynchronous method.
type Future: Future<Output = R> + Send + 'a;
/// Calls the method.
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future;
fn call(self, model: &'a mut M, arg: T, cx: &'a mut Context<M>) -> Self::Future;
}
impl<'a, M, R, Fut, F> ReplierFn<'a, M, (), R, markers::AsyncWithoutArguments> for F
@ -153,12 +143,12 @@ where
{
type Future = Fut;
fn call(self, model: &'a mut M, _arg: (), _scheduler: &'a Scheduler<M>) -> Self::Future {
fn call(self, model: &'a mut M, _arg: (), _cx: &'a mut Context<M>) -> Self::Future {
self(model)
}
}
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithoutScheduler> for F
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithoutContext> for F
where
M: Model,
Fut: Future<Output = R> + Send + 'a,
@ -166,20 +156,20 @@ where
{
type Future = Fut;
fn call(self, model: &'a mut M, arg: T, _scheduler: &'a Scheduler<M>) -> Self::Future {
fn call(self, model: &'a mut M, arg: T, _cx: &'a mut Context<M>) -> Self::Future {
self(model, arg)
}
}
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithScheduler> for F
impl<'a, M, T, R, Fut, F> ReplierFn<'a, M, T, R, markers::AsyncWithContext> for F
where
M: Model,
Fut: Future<Output = R> + Send + 'a,
F: FnOnce(&'a mut M, T, &'a Scheduler<M>) -> Fut + Send + 'static,
F: FnOnce(&'a mut M, T, &'a mut Context<M>) -> Fut + Send + 'static,
{
type Future = Fut;
fn call(self, model: &'a mut M, arg: T, scheduler: &'a Scheduler<M>) -> Self::Future {
self(model, arg, scheduler)
fn call(self, model: &'a mut M, arg: T, cx: &'a mut Context<M>) -> Self::Future {
self(model, arg, cx)
}
}

418
nexosim/src/ports/output.rs Normal file
View File

@ -0,0 +1,418 @@
mod broadcaster;
mod sender;
use std::fmt;
use crate::model::Model;
use crate::ports::EventSink;
use crate::ports::{InputFn, ReplierFn};
use crate::simulation::Address;
use crate::util::cached_rw_lock::CachedRwLock;
use crate::util::unwrap_or_throw::UnwrapOrThrow;
use broadcaster::{EventBroadcaster, QueryBroadcaster};
use sender::{FilterMapReplierSender, Sender};
use self::sender::{
EventSinkSender, FilterMapEventSinkSender, FilterMapInputSender, InputSender,
MapEventSinkSender, MapInputSender, MapReplierSender, ReplierSender,
};
/// An output port.
///
/// `Output` ports can be connected to input ports, i.e. to asynchronous model
/// methods that return no value. They broadcast events to all connected input
/// ports.
///
/// When an `Output` is cloned, the information on connected ports remains
/// shared and therefore all clones use and modify the same list of connected
/// ports.
#[derive(Clone)]
pub struct Output<T: Clone + Send + 'static> {
broadcaster: CachedRwLock<EventBroadcaster<T>>,
}
impl<T: Clone + Send + 'static> Output<T> {
/// Creates a disconnected `Output` port.
pub fn new() -> Self {
Self::default()
}
/// Adds a connection to an input port of the model specified by the
/// address.
///
/// The input port must be an asynchronous method of a model of type `M`
/// taking as argument a value of type `T` plus, optionally, a scheduler
/// reference.
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>)
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
S: Send + 'static,
{
let sender = Box::new(InputSender::new(input, address.into().0));
self.broadcaster.write().unwrap().add(sender);
}
/// Adds a connection to an event sink such as an
/// [`EventSlot`](crate::ports::EventSlot) or
/// [`EventBuffer`](crate::ports::EventBuffer).
pub fn connect_sink<S: EventSink<T>>(&mut self, sink: &S) {
let sender = Box::new(EventSinkSender::new(sink.writer()));
self.broadcaster.write().unwrap().add(sender)
}
/// Adds an auto-converting connection to an input port of the model
/// specified by the address.
///
/// Events are mapped to another type using the closure provided in
/// argument.
///
/// The input port must be an asynchronous method of a model of type `M`
/// taking as argument a value of the type returned by the mapping
/// closure plus, optionally, a context reference.
pub fn map_connect<M, C, F, U, S>(&mut self, map: C, input: F, address: impl Into<Address<M>>)
where
M: Model,
C: Fn(&T) -> U + Send + Sync + 'static,
F: for<'a> InputFn<'a, M, U, S> + Clone,
U: Send + 'static,
S: Send + 'static,
{
let sender = Box::new(MapInputSender::new(map, input, address.into().0));
self.broadcaster.write().unwrap().add(sender);
}
/// Adds an auto-converting connection to an event sink such as an
/// [`EventSlot`](crate::ports::EventSlot) or
/// [`EventBuffer`](crate::ports::EventBuffer).
///
/// Events are mapped to another type using the closure provided in
/// argument.
pub fn map_connect_sink<C, U, S>(&mut self, map: C, sink: &S)
where
C: Fn(&T) -> U + Send + Sync + 'static,
U: Send + 'static,
S: EventSink<U>,
{
let sender = Box::new(MapEventSinkSender::new(map, sink.writer()));
self.broadcaster.write().unwrap().add(sender);
}
/// Adds an auto-converting, filtered connection to an input port of the
/// model specified by the address.
///
/// Events are mapped to another type using the closure provided in
/// argument, or ignored if the closure returns `None`.
///
/// The input port must be an asynchronous method of a model of type `M`
/// taking as argument a value of the type returned by the mapping
/// closure plus, optionally, a context reference.
pub fn filter_map_connect<M, C, F, U, S>(
&mut self,
filter_map: C,
input: F,
address: impl Into<Address<M>>,
) where
M: Model,
C: Fn(&T) -> Option<U> + Send + Sync + 'static,
F: for<'a> InputFn<'a, M, U, S> + Clone,
U: Send + 'static,
S: Send + 'static,
{
let sender = Box::new(FilterMapInputSender::new(
filter_map,
input,
address.into().0,
));
self.broadcaster.write().unwrap().add(sender);
}
/// Adds an auto-converting connection to an event sink such as an
/// [`EventSlot`](crate::ports::EventSlot) or
/// [`EventBuffer`](crate::ports::EventBuffer).
///
/// Events are mapped to another type using the closure provided in
/// argument.
pub fn filter_map_connect_sink<C, U, S>(&mut self, filter_map: C, sink: &S)
where
C: Fn(&T) -> Option<U> + Send + Sync + 'static,
U: Send + 'static,
S: EventSink<U>,
{
let sender = Box::new(FilterMapEventSinkSender::new(filter_map, sink.writer()));
self.broadcaster.write().unwrap().add(sender);
}
/// Broadcasts an event to all connected input ports.
pub async fn send(&mut self, arg: T) {
let broadcaster = self.broadcaster.write_scratchpad().unwrap();
broadcaster.broadcast(arg).await.unwrap_or_throw();
}
}
impl<T: Clone + Send + 'static> Default for Output<T> {
fn default() -> Self {
Self {
broadcaster: CachedRwLock::new(EventBroadcaster::default()),
}
}
}
impl<T: Clone + Send + 'static> fmt::Debug for Output<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Output ({} connected ports)",
self.broadcaster.read_unsync().len()
)
}
}
/// A requestor port.
///
/// `Requestor` ports can be connected to replier ports, i.e. to asynchronous
/// model methods that return a value. They broadcast queries to all connected
/// replier ports.
///
/// When a `Requestor` is cloned, the information on connected ports remains
/// shared and therefore all clones use and modify the same list of connected
/// ports.
#[derive(Clone)]
pub struct Requestor<T: Clone + Send + 'static, R: Send + 'static> {
broadcaster: CachedRwLock<QueryBroadcaster<T, R>>,
}
impl<T: Clone + Send + 'static, R: Send + 'static> Requestor<T, R> {
/// Creates a disconnected `Requestor` port.
pub fn new() -> Self {
Self::default()
}
/// Adds a connection to a replier port of the model specified by the
/// address.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of type `R` and taking as argument a value of type `T`
/// plus, optionally, a context reference.
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>)
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
S: Send + 'static,
{
let sender = Box::new(ReplierSender::new(replier, address.into().0));
self.broadcaster.write().unwrap().add(sender);
}
/// Adds an auto-converting connection to a replier port of the model
/// specified by the address.
///
/// Queries and replies are mapped to other types using the closures
/// provided in argument.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of the type returned by the reply mapping closure and
/// taking as argument a value of the type returned by the query mapping
/// closure plus, optionally, a context reference.
pub fn map_connect<M, C, D, F, U, Q, S>(
&mut self,
query_map: C,
reply_map: D,
replier: F,
address: impl Into<Address<M>>,
) where
M: Model,
C: Fn(&T) -> U + Send + Sync + 'static,
D: Fn(Q) -> R + Send + Sync + 'static,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone,
U: Send + 'static,
Q: Send + 'static,
S: Send + 'static,
{
let sender = Box::new(MapReplierSender::new(
query_map,
reply_map,
replier,
address.into().0,
));
self.broadcaster.write().unwrap().add(sender);
}
/// Adds an auto-converting, filtered connection to a replier port of the
/// model specified by the address.
///
/// Queries and replies are mapped to other types using the closures
/// provided in argument, or ignored if the query closure returns `None`.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of the type returned by the reply mapping closure and
/// taking as argument a value of the type returned by the query mapping
/// closure plus, optionally, a context reference.
pub fn filter_map_connect<M, C, D, F, U, Q, S>(
&mut self,
query_filter_map: C,
reply_map: D,
replier: F,
address: impl Into<Address<M>>,
) where
M: Model,
C: Fn(&T) -> Option<U> + Send + Sync + 'static,
D: Fn(Q) -> R + Send + Sync + 'static,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone,
U: Send + 'static,
Q: Send + 'static,
S: Send + 'static,
{
let sender = Box::new(FilterMapReplierSender::new(
query_filter_map,
reply_map,
replier,
address.into().0,
));
self.broadcaster.write().unwrap().add(sender);
}
/// Broadcasts a query to all connected replier ports.
pub async fn send(&mut self, arg: T) -> impl Iterator<Item = R> + '_ {
self.broadcaster
.write_scratchpad()
.unwrap()
.broadcast(arg)
.await
.unwrap_or_throw()
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> Default for Requestor<T, R> {
fn default() -> Self {
Self {
broadcaster: CachedRwLock::new(QueryBroadcaster::default()),
}
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for Requestor<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Requestor ({} connected ports)",
self.broadcaster.read_unsync().len()
)
}
}
/// A requestor port with exactly one connection.
///
/// A `UniRequestor` port is connected to a replier port, i.e. to an
/// asynchronous model method that returns a value.
#[derive(Clone)]
pub struct UniRequestor<T: Clone + Send + 'static, R: Send + 'static> {
sender: Box<dyn Sender<T, R>>,
}
impl<T: Clone + Send + 'static, R: Send + 'static> UniRequestor<T, R> {
/// Creates a `UniRequestor` port connected to a replier port of the model
/// specified by the address.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of type `R` and taking as argument a value of type `T`
/// plus, optionally, a context reference.
pub fn new<M, F, S>(replier: F, address: impl Into<Address<M>>) -> Self
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
S: Send + 'static,
{
let sender = Box::new(ReplierSender::new(replier, address.into().0));
Self { sender }
}
/// Creates an auto-converting `UniRequestor` port connected to a replier
/// port of the model specified by the address.
///
/// Queries and replies are mapped to other types using the closures
/// provided in argument.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of the type returned by the reply mapping closure and
/// taking as argument a value of the type returned by the query mapping
/// closure plus, optionally, a context reference.
pub fn with_map<M, C, D, F, U, Q, S>(
query_map: C,
reply_map: D,
replier: F,
address: impl Into<Address<M>>,
) -> Self
where
M: Model,
C: Fn(&T) -> U + Send + Sync + 'static,
D: Fn(Q) -> R + Send + Sync + 'static,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone,
U: Send + 'static,
Q: Send + 'static,
S: Send + 'static,
{
let sender = Box::new(MapReplierSender::new(
query_map,
reply_map,
replier,
address.into().0,
));
Self { sender }
}
/// Creates an auto-converting, filtered `UniRequestor` port connected to a
/// replier port of the model specified by the address.
///
/// Queries and replies are mapped to other types using the closures
/// provided in argument, or ignored if the query closure returns `None`.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of the type returned by the reply mapping closure and
/// taking as argument a value of the type returned by the query mapping
/// closure plus, optionally, a context reference.
pub fn with_filter_map<M, C, D, F, U, Q, S>(
query_filter_map: C,
reply_map: D,
replier: F,
address: impl Into<Address<M>>,
) -> Self
where
M: Model,
C: Fn(&T) -> Option<U> + Send + Sync + 'static,
D: Fn(Q) -> R + Send + Sync + 'static,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone,
U: Send + 'static,
Q: Send + 'static,
S: Send + 'static,
{
let sender = Box::new(FilterMapReplierSender::new(
query_filter_map,
reply_map,
replier,
address.into().0,
));
Self { sender }
}
/// Sends a query to the connected replier port.
pub async fn send(&mut self, arg: T) -> Option<R> {
if let Some(fut) = self.sender.send_owned(arg) {
let output = fut.await.unwrap_or_throw();
Some(output)
} else {
None
}
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for UniRequestor<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "UniRequestor")
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,756 @@
use std::future::Future;
use std::marker::PhantomData;
use std::mem::ManuallyDrop;
use std::pin::Pin;
use std::sync::Arc;
use std::task::{Context, Poll};
use dyn_clone::DynClone;
use recycle_box::{coerce_box, RecycleBox};
use crate::channel;
use crate::channel::SendError;
use crate::model::Model;
use crate::ports::{EventSinkWriter, InputFn, ReplierFn};
/// An event or query sender abstracting over the target model and input or
/// replier method.
pub(super) trait Sender<T, R>: DynClone + Send {
/// Asynchronously sends a message using a reference to the message.
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<R, SendError>>>;
/// Asynchronously sends an owned message.
fn send_owned(&mut self, arg: T) -> Option<RecycledFuture<'_, Result<R, SendError>>> {
self.send(&arg)
}
}
dyn_clone::clone_trait_object!(<T, R> Sender<T, R>);
/// An object that can send events to an input port.
pub(super) struct InputSender<M, F, T, S>
where
M: 'static,
{
func: F,
sender: channel::Sender<M>,
fut_storage: Option<RecycleBox<()>>,
_phantom_closure: PhantomData<fn(&mut M, T)>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, F, T, S> InputSender<M, F, T, S>
where
M: 'static,
{
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
Self {
func,
sender,
fut_storage: None,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, F, T, S> Sender<T, ()> for InputSender<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Clone + Send + 'static,
S: Send,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<(), SendError>>> {
self.send_owned(arg.clone())
}
fn send_owned(&mut self, arg: T) -> Option<RecycledFuture<'_, Result<(), SendError>>> {
let func = self.func.clone();
let fut = self.sender.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
});
Some(RecycledFuture::new(&mut self.fut_storage, fut))
}
}
impl<M, F, T, S> Clone for InputSender<M, F, T, S>
where
M: 'static,
F: Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
sender: self.sender.clone(),
fut_storage: None,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
/// An object that can send mapped events to an input port.
pub(super) struct MapInputSender<M, C, F, T, U, S>
where
M: 'static,
{
map: Arc<C>,
func: F,
sender: channel::Sender<M>,
fut_storage: Option<RecycleBox<()>>,
_phantom_map: PhantomData<fn(T) -> U>,
_phantom_closure: PhantomData<fn(&mut M, U)>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, C, F, T, U, S> MapInputSender<M, C, F, T, U, S>
where
M: 'static,
{
pub(super) fn new(map: C, func: F, sender: channel::Sender<M>) -> Self {
Self {
map: Arc::new(map),
func,
sender,
fut_storage: None,
_phantom_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, C, F, T, U, S> Sender<T, ()> for MapInputSender<M, C, F, T, U, S>
where
M: Model,
C: Fn(&T) -> U + Send + Sync,
F: for<'a> InputFn<'a, M, U, S> + Clone,
T: Send + 'static,
U: Send + 'static,
S: Send,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<(), SendError>>> {
let func = self.func.clone();
let arg = (self.map)(arg);
let fut = self.sender.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
});
Some(RecycledFuture::new(&mut self.fut_storage, fut))
}
}
impl<M, C, F, T, U, S> Clone for MapInputSender<M, C, F, T, U, S>
where
M: 'static,
F: Clone,
{
fn clone(&self) -> Self {
Self {
map: self.map.clone(),
func: self.func.clone(),
sender: self.sender.clone(),
fut_storage: None,
_phantom_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
/// An object that can filter and send mapped events to an input port.
pub(super) struct FilterMapInputSender<M, C, F, T, U, S>
where
M: 'static,
{
filter_map: Arc<C>,
func: F,
sender: channel::Sender<M>,
fut_storage: Option<RecycleBox<()>>,
_phantom_filter_map: PhantomData<fn(T) -> Option<U>>,
_phantom_closure: PhantomData<fn(&mut M, U)>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, C, F, T, U, S> FilterMapInputSender<M, C, F, T, U, S>
where
M: 'static,
{
pub(super) fn new(filter_map: C, func: F, sender: channel::Sender<M>) -> Self {
Self {
filter_map: Arc::new(filter_map),
func,
sender,
fut_storage: None,
_phantom_filter_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, C, F, T, U, S> Sender<T, ()> for FilterMapInputSender<M, C, F, T, U, S>
where
M: Model,
C: Fn(&T) -> Option<U> + Send + Sync,
F: for<'a> InputFn<'a, M, U, S> + Clone,
T: Send + 'static,
U: Send + 'static,
S: Send,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<(), SendError>>> {
(self.filter_map)(arg).map(|arg| {
let func = self.func.clone();
let fut = self.sender.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
});
RecycledFuture::new(&mut self.fut_storage, fut)
})
}
}
impl<M, C, F, T, U, S> Clone for FilterMapInputSender<M, C, F, T, U, S>
where
M: 'static,
F: Clone,
{
fn clone(&self) -> Self {
Self {
filter_map: self.filter_map.clone(),
func: self.func.clone(),
sender: self.sender.clone(),
fut_storage: None,
_phantom_filter_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
/// An object that can send an event to an event sink.
pub(super) struct EventSinkSender<T, W> {
writer: W,
fut_storage: Option<RecycleBox<()>>,
_phantom_event: PhantomData<T>,
}
impl<T, W> EventSinkSender<T, W> {
pub(super) fn new(writer: W) -> Self {
Self {
writer,
fut_storage: None,
_phantom_event: PhantomData,
}
}
}
impl<T, W> Sender<T, ()> for EventSinkSender<T, W>
where
T: Clone + Send + 'static,
W: EventSinkWriter<T>,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<(), SendError>>> {
self.send_owned(arg.clone())
}
fn send_owned(&mut self, arg: T) -> Option<RecycledFuture<'_, Result<(), SendError>>> {
let writer = &mut self.writer;
Some(RecycledFuture::new(&mut self.fut_storage, async move {
writer.write(arg);
Ok(())
}))
}
}
impl<T, W: Clone> Clone for EventSinkSender<T, W> {
fn clone(&self) -> Self {
Self {
writer: self.writer.clone(),
fut_storage: None,
_phantom_event: PhantomData,
}
}
}
/// An object that can send mapped events to an event sink.
pub(super) struct MapEventSinkSender<T, U, W, C>
where
C: Fn(&T) -> U,
{
writer: W,
map: Arc<C>,
fut_storage: Option<RecycleBox<()>>,
_phantom_event: PhantomData<T>,
}
impl<T, U, W, C> MapEventSinkSender<T, U, W, C>
where
C: Fn(&T) -> U,
{
pub(super) fn new(map: C, writer: W) -> Self {
Self {
writer,
map: Arc::new(map),
fut_storage: None,
_phantom_event: PhantomData,
}
}
}
impl<T, U, W, C> Sender<T, ()> for MapEventSinkSender<T, U, W, C>
where
T: Send + 'static,
U: Send + 'static,
C: Fn(&T) -> U + Send + Sync,
W: EventSinkWriter<U>,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<(), SendError>>> {
let writer = &mut self.writer;
let arg = (self.map)(arg);
Some(RecycledFuture::new(&mut self.fut_storage, async move {
writer.write(arg);
Ok(())
}))
}
}
impl<T, U, W, C> Clone for MapEventSinkSender<T, U, W, C>
where
C: Fn(&T) -> U,
W: Clone,
{
fn clone(&self) -> Self {
Self {
writer: self.writer.clone(),
map: self.map.clone(),
fut_storage: None,
_phantom_event: PhantomData,
}
}
}
/// An object that can filter and send mapped events to an event sink.
pub(super) struct FilterMapEventSinkSender<T, U, W, C>
where
C: Fn(&T) -> Option<U>,
{
writer: W,
filter_map: Arc<C>,
fut_storage: Option<RecycleBox<()>>,
_phantom_event: PhantomData<T>,
}
impl<T, U, W, C> FilterMapEventSinkSender<T, U, W, C>
where
C: Fn(&T) -> Option<U>,
{
pub(super) fn new(filter_map: C, writer: W) -> Self {
Self {
writer,
filter_map: Arc::new(filter_map),
fut_storage: None,
_phantom_event: PhantomData,
}
}
}
impl<T, U, W, C> Sender<T, ()> for FilterMapEventSinkSender<T, U, W, C>
where
T: Send + 'static,
U: Send + 'static,
C: Fn(&T) -> Option<U> + Send + Sync,
W: EventSinkWriter<U>,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<(), SendError>>> {
let writer = &mut self.writer;
(self.filter_map)(arg).map(|arg| {
RecycledFuture::new(&mut self.fut_storage, async move {
writer.write(arg);
Ok(())
})
})
}
}
impl<T, U, W, C> Clone for FilterMapEventSinkSender<T, U, W, C>
where
C: Fn(&T) -> Option<U>,
W: Clone,
{
fn clone(&self) -> Self {
Self {
writer: self.writer.clone(),
filter_map: self.filter_map.clone(),
fut_storage: None,
_phantom_event: PhantomData,
}
}
}
/// An object that can send requests to a replier port and retrieve responses.
pub(super) struct ReplierSender<M, F, T, R, S>
where
M: Model,
{
func: F,
sender: channel::Sender<M>,
receiver: multishot::Receiver<R>,
fut_storage: Option<RecycleBox<()>>,
_phantom_closure: PhantomData<fn(&mut M, T) -> R>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, F, T, R, S> ReplierSender<M, F, T, R, S>
where
M: Model,
{
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
Self {
func,
sender,
receiver: multishot::Receiver::new(),
fut_storage: None,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, F, T, R, S> Sender<T, R> for ReplierSender<M, F, T, R, S>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone,
T: Clone + Send + 'static,
R: Send + 'static,
S: Send,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<R, SendError>>> {
self.send_owned(arg.clone())
}
fn send_owned(&mut self, arg: T) -> Option<RecycledFuture<'_, Result<R, SendError>>> {
let func = self.func.clone();
let sender = &mut self.sender;
let reply_receiver = &mut self.receiver;
let fut_storage = &mut self.fut_storage;
// The previous future generated by this method should have been polled
// to completion so a new sender should be readily available.
let reply_sender = reply_receiver.sender().unwrap();
let send_fut = sender.send(move |model, scheduler, recycle_box| {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
reply_sender.send(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
});
Some(RecycledFuture::new(fut_storage, async move {
// Send the message.
send_fut.await?;
// Wait until the message is processed and the reply is sent back.
// If an error is received, it most likely means the mailbox was
// dropped before the message was processed.
reply_receiver.recv().await.map_err(|_| SendError)
}))
}
}
impl<M, F, T, R, S> Clone for ReplierSender<M, F, T, R, S>
where
M: Model,
F: Clone,
{
fn clone(&self) -> Self {
Self {
func: self.func.clone(),
sender: self.sender.clone(),
receiver: multishot::Receiver::new(),
fut_storage: None,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
/// An object that can send mapped requests to a replier port and retrieve
/// mapped responses.
pub(super) struct MapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
{
query_map: Arc<C>,
reply_map: Arc<D>,
func: F,
sender: channel::Sender<M>,
receiver: multishot::Receiver<Q>,
fut_storage: Option<RecycleBox<()>>,
_phantom_query_map: PhantomData<fn(T) -> U>,
_phantom_reply_map: PhantomData<fn(Q) -> R>,
_phantom_closure: PhantomData<fn(&mut M, U) -> Q>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, C, D, F, T, R, U, Q, S> MapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
{
pub(super) fn new(query_map: C, reply_map: D, func: F, sender: channel::Sender<M>) -> Self {
Self {
query_map: Arc::new(query_map),
reply_map: Arc::new(reply_map),
func,
sender,
receiver: multishot::Receiver::new(),
fut_storage: None,
_phantom_query_map: PhantomData,
_phantom_reply_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, C, D, F, T, R, U, Q, S> Sender<T, R> for MapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
C: Fn(&T) -> U + Send + Sync,
D: Fn(Q) -> R + Send + Sync,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone,
T: Send + 'static,
R: Send + 'static,
U: Send + 'static,
Q: Send + 'static,
S: Send,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<R, SendError>>> {
let func = self.func.clone();
let arg = (self.query_map)(arg);
let sender = &mut self.sender;
let reply_receiver = &mut self.receiver;
let fut_storage = &mut self.fut_storage;
let reply_map = &*self.reply_map;
// The previous future generated by this method should have been polled
// to completion so a new sender should be readily available.
let reply_sender = reply_receiver.sender().unwrap();
let send_fut = sender.send(move |model, scheduler, recycle_box| {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
reply_sender.send(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
});
Some(RecycledFuture::new(fut_storage, async move {
// Send the message.
send_fut.await?;
// Wait until the message is processed and the reply is sent back.
// If an error is received, it most likely means the mailbox was
// dropped before the message was processed.
reply_receiver
.recv()
.await
.map_err(|_| SendError)
.map(reply_map)
}))
}
}
impl<M, C, D, F, T, R, U, Q, S> Clone for MapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
F: Clone,
{
fn clone(&self) -> Self {
Self {
query_map: self.query_map.clone(),
reply_map: self.reply_map.clone(),
func: self.func.clone(),
sender: self.sender.clone(),
receiver: multishot::Receiver::new(),
fut_storage: None,
_phantom_query_map: PhantomData,
_phantom_reply_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
/// An object that can filter and send mapped requests to a replier port and
/// retrieve mapped responses.
pub(super) struct FilterMapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
{
query_filter_map: Arc<C>,
reply_map: Arc<D>,
func: F,
sender: channel::Sender<M>,
receiver: multishot::Receiver<Q>,
fut_storage: Option<RecycleBox<()>>,
_phantom_query_map: PhantomData<fn(T) -> U>,
_phantom_reply_map: PhantomData<fn(Q) -> R>,
_phantom_closure: PhantomData<fn(&mut M, U) -> Q>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, C, D, F, T, R, U, Q, S> FilterMapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
{
pub(super) fn new(
query_filter_map: C,
reply_map: D,
func: F,
sender: channel::Sender<M>,
) -> Self {
Self {
query_filter_map: Arc::new(query_filter_map),
reply_map: Arc::new(reply_map),
func,
sender,
receiver: multishot::Receiver::new(),
fut_storage: None,
_phantom_query_map: PhantomData,
_phantom_reply_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, C, D, F, T, R, U, Q, S> Sender<T, R> for FilterMapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
C: Fn(&T) -> Option<U> + Send + Sync,
D: Fn(Q) -> R + Send + Sync,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone,
T: Send + 'static,
R: Send + 'static,
U: Send + 'static,
Q: Send + 'static,
S: Send,
{
fn send(&mut self, arg: &T) -> Option<RecycledFuture<'_, Result<R, SendError>>> {
(self.query_filter_map)(arg).map(|arg| {
let func = self.func.clone();
let sender = &mut self.sender;
let reply_receiver = &mut self.receiver;
let fut_storage = &mut self.fut_storage;
let reply_map = &*self.reply_map;
// The previous future generated by this method should have been polled
// to completion so a new sender should be readily available.
let reply_sender = reply_receiver.sender().unwrap();
let send_fut = sender.send(move |model, scheduler, recycle_box| {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
reply_sender.send(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
});
RecycledFuture::new(fut_storage, async move {
// Send the message.
send_fut.await?;
// Wait until the message is processed and the reply is sent back.
// If an error is received, it most likely means the mailbox was
// dropped before the message was processed.
reply_receiver
.recv()
.await
.map_err(|_| SendError)
.map(reply_map)
})
})
}
}
impl<M, C, D, F, T, R, U, Q, S> Clone for FilterMapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
F: Clone,
{
fn clone(&self) -> Self {
Self {
query_filter_map: self.query_filter_map.clone(),
reply_map: self.reply_map.clone(),
func: self.func.clone(),
sender: self.sender.clone(),
receiver: multishot::Receiver::new(),
fut_storage: None,
_phantom_query_map: PhantomData,
_phantom_reply_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
pub(super) struct RecycledFuture<'a, T> {
fut: ManuallyDrop<Pin<RecycleBox<dyn Future<Output = T> + Send + 'a>>>,
lender_box: &'a mut Option<RecycleBox<()>>,
}
impl<'a, T> RecycledFuture<'a, T> {
pub(super) fn new<F: Future<Output = T> + Send + 'a>(
lender_box: &'a mut Option<RecycleBox<()>>,
fut: F,
) -> Self {
let vacated_box = lender_box.take().unwrap_or_else(|| RecycleBox::new(()));
let fut: RecycleBox<dyn Future<Output = T> + Send + 'a> =
coerce_box!(RecycleBox::recycle(vacated_box, fut));
Self {
fut: ManuallyDrop::new(RecycleBox::into_pin(fut)),
lender_box,
}
}
}
impl<T> Drop for RecycledFuture<'_, T> {
fn drop(&mut self) {
// Return the box to the lender.
//
// Safety: taking the `fut` member is safe since it is never used again.
*self.lender_box = Some(RecycleBox::vacate_pinned(unsafe {
ManuallyDrop::take(&mut self.fut)
}));
}
}
impl<T> Future for RecycledFuture<'_, T> {
type Output = T;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.fut.as_mut().poll(cx)
}
}

58
nexosim/src/ports/sink.rs Normal file
View File

@ -0,0 +1,58 @@
pub(crate) mod blocking_event_queue;
pub(crate) mod event_buffer;
pub(crate) mod event_slot;
/// A simulation endpoint that can receive events sent by model outputs.
///
/// An `EventSink` can be thought of as a self-standing input meant to
/// externally monitor the simulated system.
pub trait EventSink<T> {
/// Writer handle to an event sink.
type Writer: EventSinkWriter<T>;
/// Returns the writer handle associated to this sink.
fn writer(&self) -> Self::Writer;
}
/// A writer handle to an event sink.
pub trait EventSinkWriter<T>: Clone + Send + Sync + 'static {
/// Writes a value to the associated sink.
fn write(&self, event: T);
}
/// An iterator over collected events with the ability to pause and resume event
/// collection.
///
/// An `EventSinkStream` will typically be implemented on an [`EventSink`] for
/// which it will constitute a draining iterator.
pub trait EventSinkStream: Iterator {
/// Starts or resumes the collection of new events.
fn open(&mut self);
/// Pauses the collection of new events.
///
/// Events that were previously in the stream remain available.
fn close(&mut self);
/// This is a stop-gap method that serves the exact same purpose as
/// `Iterator::try_fold` but is specialized for `Result` rather than the
/// `Try` trait so it can be implemented on stable Rust.
///
/// It makes it possible to provide a faster implementation when the event
/// sink stream can be iterated over more rapidly than by repeatably calling
/// `Iterator::next`, for instance if the implementation of the stream
/// relies on a mutex that must be locked on each call.
///
/// It is not publicly implementable because it may be removed at any time
/// once the `Try` trait is stabilized, without regard for backward
/// compatibility.
#[doc(hidden)]
#[allow(private_interfaces)]
fn __try_fold<B, F, E>(&mut self, init: B, f: F) -> Result<B, E>
where
Self: Sized,
F: FnMut(B, Self::Item) -> Result<B, E>,
{
Iterator::try_fold(self, init, f)
}
}

View File

@ -0,0 +1,145 @@
use std::fmt;
use std::iter::FusedIterator;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::mpsc::{channel, Receiver, Sender};
use std::sync::Arc;
use super::{EventSink, EventSinkStream, EventSinkWriter};
/// A blocking event queue with an unbounded size.
///
/// Implements [`EventSink`].
///
/// Note that [`EventSinkStream`] is implemented by
/// [`BlockingEventQueueReader`], created with the
/// [`BlockingEventQueue::into_reader`] method.
pub struct BlockingEventQueue<T> {
is_open: Arc<AtomicBool>,
sender: Sender<T>,
receiver: Receiver<T>,
}
impl<T> BlockingEventQueue<T> {
/// Creates an open `BlockingEventQueue`.
pub fn new() -> Self {
Self::new_with_state(true)
}
/// Creates a closed `BlockingEventQueue`.
pub fn new_closed() -> Self {
Self::new_with_state(false)
}
/// Returns a consumer handle.
pub fn into_reader(self) -> BlockingEventQueueReader<T> {
BlockingEventQueueReader {
is_open: self.is_open,
receiver: self.receiver,
}
}
/// Creates a new `BlockingEventQueue` in the specified state.
fn new_with_state(is_open: bool) -> Self {
let (sender, receiver) = channel();
Self {
is_open: Arc::new(AtomicBool::new(is_open)),
sender,
receiver,
}
}
}
impl<T: Send + 'static> EventSink<T> for BlockingEventQueue<T> {
type Writer = BlockingEventQueueWriter<T>;
fn writer(&self) -> Self::Writer {
BlockingEventQueueWriter {
is_open: self.is_open.clone(),
sender: self.sender.clone(),
}
}
}
impl<T> Default for BlockingEventQueue<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> fmt::Debug for BlockingEventQueue<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BlockingEventQueue").finish_non_exhaustive()
}
}
/// A consumer handle of a `BlockingEventQueue`.
///
/// Implements [`EventSinkStream`]. Calls to the iterator's `next` method are
/// blocking. `None` is returned when all writer handles have been dropped.
pub struct BlockingEventQueueReader<T> {
is_open: Arc<AtomicBool>,
receiver: Receiver<T>,
}
impl<T> Iterator for BlockingEventQueueReader<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self.receiver.recv() {
Ok(event) => Some(event),
Err(_) => None,
}
}
}
impl<T> FusedIterator for BlockingEventQueueReader<T> {}
impl<T: Send + 'static> EventSinkStream for BlockingEventQueueReader<T> {
fn open(&mut self) {
self.is_open.store(true, Ordering::Relaxed);
}
fn close(&mut self) {
self.is_open.store(false, Ordering::Relaxed);
}
}
impl<T> fmt::Debug for BlockingEventQueueReader<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BlockingEventQueueReader")
.finish_non_exhaustive()
}
}
/// A producer handle of a `BlockingEventQueue`.
pub struct BlockingEventQueueWriter<T> {
is_open: Arc<AtomicBool>,
sender: Sender<T>,
}
impl<T: Send + 'static> EventSinkWriter<T> for BlockingEventQueueWriter<T> {
/// Pushes an event onto the queue.
fn write(&self, event: T) {
if !self.is_open.load(Ordering::Relaxed) {
return;
}
// Ignore sending failure.
let _ = self.sender.send(event);
}
}
impl<T> Clone for BlockingEventQueueWriter<T> {
fn clone(&self) -> Self {
Self {
is_open: self.is_open.clone(),
sender: self.sender.clone(),
}
}
}
impl<T> fmt::Debug for BlockingEventQueueWriter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("BlockingEventQueueWriter")
.finish_non_exhaustive()
}
}

View File

@ -0,0 +1,149 @@
use std::collections::VecDeque;
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use super::{EventSink, EventSinkStream, EventSinkWriter};
/// The shared data of an `EventBuffer`.
struct Inner<T> {
capacity: usize,
is_open: AtomicBool,
buffer: Mutex<VecDeque<T>>,
}
/// An iterator implementing [`EventSink`] and [`EventSinkStream`], backed by a
/// fixed-capacity buffer.
///
/// If the maximum capacity is exceeded, older events are overwritten. Events
/// are returned in first-in-first-out order. Note that even if the iterator
/// returns `None`, it may still produce more items in the future (in other
/// words, it is not a [`FusedIterator`](std::iter::FusedIterator)).
pub struct EventBuffer<T> {
inner: Arc<Inner<T>>,
}
impl<T> EventBuffer<T> {
/// Default capacity when constructed with `new`.
pub const DEFAULT_CAPACITY: usize = 16;
/// Creates an open `EventBuffer` with the default capacity.
pub fn new() -> Self {
Self::with_capacity(Self::DEFAULT_CAPACITY)
}
/// Creates a closed `EventBuffer` with the default capacity.
pub fn new_closed() -> Self {
Self::with_capacity_closed(Self::DEFAULT_CAPACITY)
}
/// Creates an open `EventBuffer` with the specified capacity.
pub fn with_capacity(capacity: usize) -> Self {
Self {
inner: Arc::new(Inner {
capacity,
is_open: AtomicBool::new(true),
buffer: Mutex::new(VecDeque::new()),
}),
}
}
/// Creates a closed `EventBuffer` with the specified capacity.
pub fn with_capacity_closed(capacity: usize) -> Self {
Self {
inner: Arc::new(Inner {
capacity,
is_open: AtomicBool::new(false),
buffer: Mutex::new(VecDeque::new()),
}),
}
}
}
impl<T: Send + 'static> EventSink<T> for EventBuffer<T> {
type Writer = EventBufferWriter<T>;
fn writer(&self) -> Self::Writer {
EventBufferWriter {
inner: self.inner.clone(),
}
}
}
impl<T> Iterator for EventBuffer<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.inner.buffer.lock().unwrap().pop_front()
}
}
impl<T: Send + 'static> EventSinkStream for EventBuffer<T> {
fn open(&mut self) {
self.inner.is_open.store(true, Ordering::Relaxed);
}
fn close(&mut self) {
self.inner.is_open.store(false, Ordering::Relaxed);
}
#[doc(hidden)]
#[allow(private_interfaces)]
fn __try_fold<B, F, E>(&mut self, init: B, f: F) -> Result<B, E>
where
Self: Sized,
F: FnMut(B, Self::Item) -> Result<B, E>,
{
let mut inner = self.inner.buffer.lock().unwrap();
let mut drain = inner.drain(..);
drain.try_fold(init, f)
}
}
impl<T> Default for EventBuffer<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> fmt::Debug for EventBuffer<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventBuffer").finish_non_exhaustive()
}
}
/// A producer handle of an `EventStream`.
pub struct EventBufferWriter<T> {
inner: Arc<Inner<T>>,
}
impl<T: Send + 'static> EventSinkWriter<T> for EventBufferWriter<T> {
/// Pushes an event onto the queue.
fn write(&self, event: T) {
if !self.inner.is_open.load(Ordering::Relaxed) {
return;
}
let mut buffer = self.inner.buffer.lock().unwrap();
if buffer.len() == self.inner.capacity {
buffer.pop_front();
}
buffer.push_back(event);
}
}
impl<T> Clone for EventBufferWriter<T> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl<T> fmt::Debug for EventBufferWriter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventBufferWriter").finish_non_exhaustive()
}
}

View File

@ -0,0 +1,129 @@
use std::fmt;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex, TryLockError, TryLockResult};
use super::{EventSink, EventSinkStream, EventSinkWriter};
/// The shared data of an `EventBuffer`.
struct Inner<T> {
is_open: AtomicBool,
slot: Mutex<Option<T>>,
}
/// An iterator implementing [`EventSink`] and [`EventSinkStream`] that only
/// keeps the last event.
///
/// Once the value is read, the iterator will return `None` until a new value is
/// received. If the slot contains a value when a new value is received, the
/// previous value is overwritten.
pub struct EventSlot<T> {
inner: Arc<Inner<T>>,
}
impl<T> EventSlot<T> {
/// Creates an open `EventSlot`.
pub fn new() -> Self {
Self {
inner: Arc::new(Inner {
is_open: AtomicBool::new(true),
slot: Mutex::new(None),
}),
}
}
/// Creates a closed `EventSlot`.
pub fn new_closed() -> Self {
Self {
inner: Arc::new(Inner {
is_open: AtomicBool::new(false),
slot: Mutex::new(None),
}),
}
}
}
impl<T: Send + 'static> EventSink<T> for EventSlot<T> {
type Writer = EventSlotWriter<T>;
/// Returns a writer handle.
fn writer(&self) -> EventSlotWriter<T> {
EventSlotWriter {
inner: self.inner.clone(),
}
}
}
impl<T> Iterator for EventSlot<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self.inner.slot.try_lock() {
TryLockResult::Ok(mut v) => v.take(),
TryLockResult::Err(TryLockError::WouldBlock) => None,
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
}
}
}
impl<T: Send + 'static> EventSinkStream for EventSlot<T> {
fn open(&mut self) {
self.inner.is_open.store(true, Ordering::Relaxed);
}
fn close(&mut self) {
self.inner.is_open.store(false, Ordering::Relaxed);
}
}
impl<T> Default for EventSlot<T> {
fn default() -> Self {
Self::new()
}
}
impl<T> fmt::Debug for EventSlot<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventSlot").finish_non_exhaustive()
}
}
/// A writer handle of an `EventSlot`.
pub struct EventSlotWriter<T> {
inner: Arc<Inner<T>>,
}
impl<T: Send + 'static> EventSinkWriter<T> for EventSlotWriter<T> {
/// Write an event into the slot.
fn write(&self, event: T) {
// Ignore if the sink is closed.
if !self.inner.is_open.load(Ordering::Relaxed) {
return;
}
// Why do we just use `try_lock` and abandon if the lock is taken? The
// reason is that (i) the reader is never supposed to access the slot
// when the simulation runs and (ii) as a rule the simulator does not
// warrant fairness when concurrently writing to an input. Therefore, if
// the mutex is already locked when this writer attempts to lock it, it
// means another writer is concurrently writing an event, and that event
// is just as legitimate as ours so there is not need to overwrite it.
match self.inner.slot.try_lock() {
TryLockResult::Ok(mut v) => *v = Some(event),
TryLockResult::Err(TryLockError::WouldBlock) => {}
TryLockResult::Err(TryLockError::Poisoned(_)) => panic!(),
}
}
}
impl<T> Clone for EventSlotWriter<T> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl<T> fmt::Debug for EventSlotWriter<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("EventStreamWriter").finish_non_exhaustive()
}
}

343
nexosim/src/ports/source.rs Normal file
View File

@ -0,0 +1,343 @@
mod broadcaster;
mod sender;
use std::fmt;
use std::sync::Arc;
use std::time::Duration;
use crate::model::Model;
use crate::ports::InputFn;
use crate::simulation::{
Action, ActionKey, Address, KeyedOnceAction, KeyedPeriodicAction, OnceAction, PeriodicAction,
};
use crate::util::slot;
use crate::util::unwrap_or_throw::UnwrapOrThrow;
use broadcaster::{EventBroadcaster, QueryBroadcaster, ReplyIterator};
use sender::{
FilterMapInputSender, FilterMapReplierSender, InputSender, MapInputSender, MapReplierSender,
ReplierSender,
};
use super::ReplierFn;
/// An event source port.
///
/// The `EventSource` port is similar to an [`Output`](crate::ports::Output)
/// port in that it can send events to connected input ports. It is not meant,
/// however, to be instantiated as a member of a model, but rather as a
/// simulation control endpoint instantiated during bench assembly.
pub struct EventSource<T: Clone + Send + 'static> {
broadcaster: EventBroadcaster<T>,
}
impl<T: Clone + Send + 'static> EventSource<T> {
/// Creates a disconnected `EventSource` port.
pub fn new() -> Self {
Self::default()
}
/// Adds a connection to an input port of the model specified by the
/// address.
///
/// The input port must be an asynchronous method of a model of type `M`
/// taking as argument a value of type `T` plus, optionally, a scheduler
/// reference.
pub fn connect<M, F, S>(&mut self, input: F, address: impl Into<Address<M>>)
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone + Sync,
S: Send + Sync + 'static,
{
let sender = Box::new(InputSender::new(input, address.into().0));
self.broadcaster.add(sender);
}
/// Adds an auto-converting connection to an input port of the model
/// specified by the address.
///
/// Events are mapped to another type using the closure provided in
/// argument.
///
/// The input port must be an asynchronous method of a model of type `M`
/// taking as argument a value of the type returned by the mapping closure
/// plus, optionally, a context reference.
pub fn map_connect<M, C, F, U, S>(&mut self, map: C, input: F, address: impl Into<Address<M>>)
where
M: Model,
C: for<'a> Fn(&'a T) -> U + Send + Sync + 'static,
F: for<'a> InputFn<'a, M, U, S> + Sync + Clone,
U: Send + 'static,
S: Send + Sync + 'static,
{
let sender = Box::new(MapInputSender::new(map, input, address.into().0));
self.broadcaster.add(sender);
}
/// Adds an auto-converting, filtered connection to an input port of the
/// model specified by the address.
///
/// Events are mapped to another type using the closure provided in
/// argument, or ignored if the closure returns `None`.
///
/// The input port must be an asynchronous method of a model of type `M`
/// taking as argument a value of the type returned by the mapping closure
/// plus, optionally, a context reference.
pub fn filter_map_connect<M, C, F, U, S>(
&mut self,
map: C,
input: F,
address: impl Into<Address<M>>,
) where
M: Model,
C: for<'a> Fn(&'a T) -> Option<U> + Send + Sync + 'static,
F: for<'a> InputFn<'a, M, U, S> + Clone + Sync,
U: Send + 'static,
S: Send + Sync + 'static,
{
let sender = Box::new(FilterMapInputSender::new(map, input, address.into().0));
self.broadcaster.add(sender);
}
/// Returns an action which, when processed, broadcasts an event to all
/// connected input ports.
pub fn event(&self, arg: T) -> Action {
let fut = self.broadcaster.broadcast(arg);
let fut = async {
fut.await.unwrap_or_throw();
};
Action::new(OnceAction::new(fut))
}
/// Returns a cancellable action and a cancellation key; when processed, the
/// action broadcasts an event to all connected input ports.
pub fn keyed_event(&self, arg: T) -> (Action, ActionKey) {
let action_key = ActionKey::new();
let fut = self.broadcaster.broadcast(arg);
let action = Action::new(KeyedOnceAction::new(
// Cancellation is ignored once the action is already spawned on the
// executor. This means the action cannot be cancelled once the
// simulation step targeted by the action is running, but since an
// event source is meant to be used outside the simulator, this
// shouldn't be an issue in practice.
|_| async {
fut.await.unwrap_or_throw();
},
action_key.clone(),
));
(action, action_key)
}
/// Returns a periodically recurring action which, when processed,
/// broadcasts an event to all connected input ports.
pub fn periodic_event(self: &Arc<Self>, period: Duration, arg: T) -> Action {
let source = self.clone();
Action::new(PeriodicAction::new(
|| async move {
let fut = source.broadcaster.broadcast(arg);
fut.await.unwrap_or_throw();
},
period,
))
}
/// Returns a cancellable, periodically recurring action and a cancellation
/// key; when processed, the action broadcasts an event to all connected
/// input ports.
pub fn keyed_periodic_event(self: &Arc<Self>, period: Duration, arg: T) -> (Action, ActionKey) {
let action_key = ActionKey::new();
let source = self.clone();
let action = Action::new(KeyedPeriodicAction::new(
// Cancellation is ignored once the action is already spawned on the
// executor. This means the action cannot be cancelled while the
// simulation is running, but since an event source is meant to be
// used outside the simulator, this shouldn't be an issue in
// practice.
|_| async move {
let fut = source.broadcaster.broadcast(arg);
fut.await.unwrap_or_throw();
},
period,
action_key.clone(),
));
(action, action_key)
}
}
impl<T: Clone + Send + 'static> Default for EventSource<T> {
fn default() -> Self {
Self {
broadcaster: EventBroadcaster::default(),
}
}
}
impl<T: Clone + Send + 'static> fmt::Debug for EventSource<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Event source ({} connected ports)",
self.broadcaster.len()
)
}
}
/// A query source port.
///
/// The `QuerySource` port is similar to an
/// [`Requestor`](crate::ports::Requestor) port in that it can send requests to
/// connected replier ports and receive replies. It is not meant, however, to be
/// instantiated as a member of a model, but rather as a simulation monitoring
/// endpoint instantiated during bench assembly.
pub struct QuerySource<T: Clone + Send + 'static, R: Send + 'static> {
broadcaster: QueryBroadcaster<T, R>,
}
impl<T: Clone + Send + 'static, R: Send + 'static> QuerySource<T, R> {
/// Creates a disconnected `EventSource` port.
pub fn new() -> Self {
Self::default()
}
/// Adds a connection to a replier port of the model specified by the
/// address.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of type `R` and taking as argument a value of type `T`
/// plus, optionally, a context reference.
pub fn connect<M, F, S>(&mut self, replier: F, address: impl Into<Address<M>>)
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone + Sync,
S: Send + Sync + 'static,
{
let sender = Box::new(ReplierSender::new(replier, address.into().0));
self.broadcaster.add(sender);
}
/// Adds an auto-converting connection to a replier port of the model
/// specified by the address.
///
/// Queries and replies are mapped to other types using the closures
/// provided in argument.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of the type returned by the reply mapping closure and
/// taking as argument a value of the type returned by the query mapping
/// closure plus, optionally, a context reference.
pub fn map_connect<M, C, D, F, U, Q, S>(
&mut self,
query_map: C,
reply_map: D,
replier: F,
address: impl Into<Address<M>>,
) where
M: Model,
C: for<'a> Fn(&'a T) -> U + Send + Sync + 'static,
D: Fn(Q) -> R + Send + Sync + 'static,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone + Sync,
U: Send + 'static,
Q: Send + 'static,
S: Send + Sync + 'static,
{
let sender = Box::new(MapReplierSender::new(
query_map,
reply_map,
replier,
address.into().0,
));
self.broadcaster.add(sender);
}
/// Adds an auto-converting, filtered connection to a replier port of the
/// model specified by the address.
///
/// Queries and replies are mapped to other types using the closures
/// provided in argument, or ignored if the query closure returns `None`.
///
/// The replier port must be an asynchronous method of a model of type `M`
/// returning a value of the type returned by the reply mapping closure and
/// taking as argument a value of the type returned by the query mapping
/// closure plus, optionally, a context reference.
pub fn filter_map_connect<M, C, D, F, U, Q, S>(
&mut self,
query_filter_map: C,
reply_map: D,
replier: F,
address: impl Into<Address<M>>,
) where
M: Model,
C: for<'a> Fn(&'a T) -> Option<U> + Send + Sync + 'static,
D: Fn(Q) -> R + Send + Sync + 'static,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone + Sync,
U: Send + 'static,
Q: Send + 'static,
S: Send + Sync + 'static,
{
let sender = Box::new(FilterMapReplierSender::new(
query_filter_map,
reply_map,
replier,
address.into().0,
));
self.broadcaster.add(sender);
}
/// Returns an action which, when processed, broadcasts a query to all
/// connected replier ports.
pub fn query(&self, arg: T) -> (Action, ReplyReceiver<R>) {
let (writer, reader) = slot::slot();
let fut = self.broadcaster.broadcast(arg);
let fut = async move {
let replies = fut.await.unwrap_or_throw();
let _ = writer.write(replies);
};
let action = Action::new(OnceAction::new(fut));
(action, ReplyReceiver::<R>(reader))
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> Default for QuerySource<T, R> {
fn default() -> Self {
Self {
broadcaster: QueryBroadcaster::default(),
}
}
}
impl<T: Clone + Send + 'static, R: Send + 'static> fmt::Debug for QuerySource<T, R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(
f,
"Query source ({} connected ports)",
self.broadcaster.len()
)
}
}
/// A receiver for all replies collected from a single query broadcast.
pub struct ReplyReceiver<R>(slot::SlotReader<ReplyIterator<R>>);
impl<R> ReplyReceiver<R> {
/// Returns all replies to a query.
///
/// Returns `None` if the replies are not yet available or if they were
/// already taken in a previous call to `take`.
pub fn take(&mut self) -> Option<impl Iterator<Item = R>> {
self.0.try_read().ok()
}
}
impl<R> fmt::Debug for ReplyReceiver<R> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Replies")
}
}

View File

@ -0,0 +1,903 @@
use std::future::Future;
use std::mem;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::vec;
use pin_project::pin_project;
use diatomic_waker::WakeSink;
use super::sender::{Sender, SenderFuture};
use crate::channel::SendError;
use crate::util::task_set::TaskSet;
/// An object that can efficiently broadcast messages to several addresses.
///
/// This is very similar to `output::broadcaster::BroadcasterInner`, but
/// generates owned futures instead.
///
/// This object maintains a list of senders associated to each target address.
/// When a message is broadcast, the sender futures are awaited in parallel.
/// This is somewhat similar to what `FuturesOrdered` in the `futures` crate
/// does, but the outputs of all sender futures are returned all at once rather
/// than with an asynchronous iterator (a.k.a. async stream).
pub(super) struct BroadcasterInner<T: Clone, R> {
/// The list of senders with their associated line identifier.
senders: Vec<Box<dyn Sender<T, R>>>,
}
impl<T: Clone, R> BroadcasterInner<T, R> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1` due to limitations inherent to the task set
/// implementation.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>) {
assert!(self.senders.len() < (u32::MAX as usize - 2));
self.senders.push(sender);
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.senders.len()
}
/// Return a list of futures broadcasting an event or query to multiple
/// addresses.
fn futures(&self, arg: T) -> Vec<SenderFutureState<R>> {
let mut future_states = Vec::new();
// Broadcast the message and collect all futures.
let mut iter = self.senders.iter();
while let Some(sender) = iter.next() {
// Move the argument for the last future to avoid undue cloning.
if iter.len() == 0 {
if let Some(fut) = sender.send_owned(arg) {
future_states.push(SenderFutureState::Pending(fut));
}
break;
}
if let Some(fut) = sender.send(&arg) {
future_states.push(SenderFutureState::Pending(fut));
}
}
future_states
}
}
impl<T: Clone, R> Default for BroadcasterInner<T, R> {
fn default() -> Self {
Self {
senders: Vec::new(),
}
}
}
/// An object that can efficiently broadcast events to several input ports.
///
/// This is very similar to `output::broadcaster::EventBroadcaster`, but
/// generates owned futures instead.
///
/// See `BroadcasterInner` for implementation details.
pub(super) struct EventBroadcaster<T: Clone> {
/// The broadcaster core object.
inner: BroadcasterInner<T, ()>,
}
impl<T: Clone + Send> EventBroadcaster<T> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1` due to limitations inherent to the task set
/// implementation.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, ()>>) {
self.inner.add(sender);
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.inner.len()
}
/// Broadcasts an event to all addresses.
pub(super) fn broadcast(&self, arg: T) -> impl Future<Output = Result<(), SendError>> + Send {
enum Fut<F1, F2> {
Empty,
Single(F1),
Multiple(F2),
}
let fut = match self.inner.senders.as_slice() {
// No sender.
[] => Fut::Empty,
// One sender at most.
[sender] => Fut::Single(sender.send_owned(arg)),
// Possibly multiple senders.
_ => Fut::Multiple(self.inner.futures(arg)),
};
async {
match fut {
// No sender.
Fut::Empty | Fut::Single(None) => Ok(()),
Fut::Single(Some(fut)) => fut.await,
Fut::Multiple(mut futures) => match futures.as_mut_slice() {
// No sender.
[] => Ok(()),
// One sender.
[SenderFutureState::Pending(fut)] => fut.await,
// Multiple senders.
_ => BroadcastFuture::new(futures).await.map(|_| ()),
},
}
}
}
}
impl<T: Clone> Default for EventBroadcaster<T> {
fn default() -> Self {
Self {
inner: BroadcasterInner::default(),
}
}
}
/// An object that can efficiently broadcast queries to several replier ports.
///
/// This is very similar to `output::broadcaster::QueryBroadcaster`, but
/// generates owned futures instead.
///
/// See `BroadcasterInner` for implementation details.
pub(super) struct QueryBroadcaster<T: Clone, R> {
/// The broadcaster core object.
inner: BroadcasterInner<T, R>,
}
impl<T: Clone + Send, R: Send> QueryBroadcaster<T, R> {
/// Adds a new sender associated to the specified identifier.
///
/// # Panics
///
/// This method will panic if the total count of senders would reach
/// `u32::MAX - 1` due to limitations inherent to the task set
/// implementation.
pub(super) fn add(&mut self, sender: Box<dyn Sender<T, R>>) {
self.inner.add(sender);
}
/// Returns the number of connected senders.
pub(super) fn len(&self) -> usize {
self.inner.len()
}
/// Broadcasts an event to all addresses.
pub(super) fn broadcast(
&self,
arg: T,
) -> impl Future<Output = Result<ReplyIterator<R>, SendError>> + Send {
enum Fut<F1, F2> {
Empty,
Single(F1),
Multiple(F2),
}
let fut = match self.inner.senders.as_slice() {
// No sender.
[] => Fut::Empty,
// One sender at most.
[sender] => Fut::Single(sender.send_owned(arg)),
// Possibly multiple senders.
_ => Fut::Multiple(self.inner.futures(arg)),
};
async {
match fut {
// No sender.
Fut::Empty | Fut::Single(None) => Ok(ReplyIterator(Vec::new().into_iter())),
Fut::Single(Some(fut)) => fut
.await
.map(|reply| ReplyIterator(vec![SenderFutureState::Ready(reply)].into_iter())),
Fut::Multiple(mut futures) => match futures.as_mut_slice() {
// No sender.
[] => Ok(ReplyIterator(Vec::new().into_iter())),
// One sender.
[SenderFutureState::Pending(fut)] => fut.await.map(|reply| {
ReplyIterator(vec![SenderFutureState::Ready(reply)].into_iter())
}),
// Multiple senders.
_ => BroadcastFuture::new(futures).await,
},
}
}
}
}
impl<T: Clone, R> Default for QueryBroadcaster<T, R> {
fn default() -> Self {
Self {
inner: BroadcasterInner::default(),
}
}
}
#[pin_project]
/// A future aggregating the outputs of a collection of sender futures.
///
/// The idea is to join all sender futures as efficiently as possible, meaning:
///
/// - the sender futures are polled simultaneously rather than waiting for their
/// completion in a sequential manner,
/// - the happy path (all futures immediately ready) is very fast.
pub(super) struct BroadcastFuture<R> {
// Thread-safe waker handle.
wake_sink: WakeSink,
// Tasks associated to the sender futures.
task_set: TaskSet,
// List of all sender futures or their outputs.
future_states: Vec<SenderFutureState<R>>,
// The total count of futures that have not yet been polled to completion.
pending_futures_count: usize,
// State of completion of the future.
state: FutureState,
}
impl<R> BroadcastFuture<R> {
/// Creates a new `BroadcastFuture`.
fn new(future_states: Vec<SenderFutureState<R>>) -> Self {
let wake_sink = WakeSink::new();
let wake_src = wake_sink.source();
let pending_futures_count = future_states.len();
BroadcastFuture {
wake_sink,
task_set: TaskSet::with_len(wake_src, pending_futures_count),
future_states,
pending_futures_count,
state: FutureState::Uninit,
}
}
}
impl<R> Future for BroadcastFuture<R> {
type Output = Result<ReplyIterator<R>, SendError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = &mut *self;
assert_ne!(this.state, FutureState::Completed);
// Poll all sender futures once if this is the first time the broadcast
// future is polled.
if this.state == FutureState::Uninit {
for task_idx in 0..this.future_states.len() {
if let SenderFutureState::Pending(future) = &mut this.future_states[task_idx] {
let task_waker_ref = this.task_set.waker_of(task_idx);
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
match future.as_mut().poll(task_cx_ref) {
Poll::Ready(Ok(output)) => {
this.future_states[task_idx] = SenderFutureState::Ready(output);
this.pending_futures_count -= 1;
}
Poll::Ready(Err(SendError)) => {
this.state = FutureState::Completed;
return Poll::Ready(Err(SendError));
}
Poll::Pending => {}
}
}
}
if this.pending_futures_count == 0 {
this.state = FutureState::Completed;
let outputs = mem::take(&mut this.future_states).into_iter();
return Poll::Ready(Ok(ReplyIterator(outputs)));
}
this.state = FutureState::Pending;
}
// Repeatedly poll the futures of all scheduled tasks until there are no
// more scheduled tasks.
loop {
// No need to register the waker if some tasks have been scheduled.
if !this.task_set.has_scheduled() {
this.wake_sink.register(cx.waker());
}
// Retrieve the indices of the scheduled tasks if any. If there are
// no scheduled tasks, `Poll::Pending` is returned and this future
// will be awaken again when enough tasks have been scheduled.
//
// NOTE: the current implementation requires a notification to be
// sent each time a sub-future has made progress. We may try at some
// point to benchmark an alternative strategy where a notification
// is requested only when all pending sub-futures have made progress,
// using `take_scheduled(this.pending_futures_count)`. This would
// reduce the cost of context switch but could hurt latency.
let scheduled_tasks = match this.task_set.take_scheduled(1) {
Some(st) => st,
None => return Poll::Pending,
};
for task_idx in scheduled_tasks {
if let SenderFutureState::Pending(future) = &mut this.future_states[task_idx] {
let task_waker_ref = this.task_set.waker_of(task_idx);
let task_cx_ref = &mut Context::from_waker(&task_waker_ref);
match future.as_mut().poll(task_cx_ref) {
Poll::Ready(Ok(output)) => {
this.future_states[task_idx] = SenderFutureState::Ready(output);
this.pending_futures_count -= 1;
}
Poll::Ready(Err(SendError)) => {
this.state = FutureState::Completed;
return Poll::Ready(Err(SendError));
}
Poll::Pending => {}
}
}
}
if this.pending_futures_count == 0 {
this.state = FutureState::Completed;
let outputs = mem::take(&mut this.future_states).into_iter();
return Poll::Ready(Ok(ReplyIterator(outputs)));
}
}
}
}
#[derive(Debug, PartialEq)]
enum FutureState {
Uninit,
Pending,
Completed,
}
/// The state of a `SenderFuture`.
enum SenderFutureState<R> {
Pending(SenderFuture<R>),
Ready(R),
}
/// An iterator over the replies to a broadcasted request.
pub(crate) struct ReplyIterator<R>(vec::IntoIter<SenderFutureState<R>>);
impl<R> Iterator for ReplyIterator<R> {
type Item = R;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|state| match state {
SenderFutureState::Ready(reply) => reply,
_ => panic!("reply missing in replies iterator"),
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
}
#[cfg(all(test, not(nexosim_loom)))]
mod tests {
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::thread;
use futures_executor::block_on;
use crate::channel::Receiver;
use super::super::sender::{
FilterMapInputSender, FilterMapReplierSender, InputSender, ReplierSender,
};
use super::*;
use crate::model::{Context, Model};
struct SumModel {
inner: Arc<AtomicUsize>,
}
impl SumModel {
fn new(counter: Arc<AtomicUsize>) -> Self {
Self { inner: counter }
}
async fn increment(&mut self, by: usize) {
self.inner.fetch_add(by, Ordering::Relaxed);
}
}
impl Model for SumModel {}
struct DoubleModel {}
impl DoubleModel {
fn new() -> Self {
Self {}
}
async fn double(&mut self, value: usize) -> usize {
2 * value
}
}
impl Model for DoubleModel {}
#[test]
fn broadcast_event_smoke() {
const N_RECV: usize = 4;
const MESSAGE: usize = 42;
let mut mailboxes = Vec::new();
let mut broadcaster = EventBroadcaster::default();
for _ in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(InputSender::new(SumModel::increment, address));
broadcaster.add(sender);
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
block_on(broadcaster.broadcast(MESSAGE)).unwrap();
});
let sum = Arc::new(AtomicUsize::new(0));
let th_recv: Vec<_> = mailboxes
.into_iter()
.map(|mut mailbox| {
thread::spawn({
let mut sum_model = SumModel::new(sum.clone());
move || {
let mut dummy_cx = Context::new_dummy();
block_on(mailbox.recv(&mut sum_model, &mut dummy_cx)).unwrap();
}
})
})
.collect();
th_broadcast.join().unwrap();
for th in th_recv {
th.join().unwrap();
}
assert_eq!(sum.load(Ordering::Relaxed), N_RECV * MESSAGE);
}
#[test]
fn broadcast_event_filter_map() {
const N_RECV: usize = 4;
const BROADCAST_ALL: usize = 42; // special ID signaling that the message must reach all receivers.
let mut mailboxes = Vec::new();
let mut broadcaster = EventBroadcaster::default();
for id in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let id_filter_sender = Box::new(FilterMapInputSender::new(
move |x: &usize| (*x == id || *x == BROADCAST_ALL).then_some(*x),
SumModel::increment,
address,
));
broadcaster.add(id_filter_sender);
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
block_on(async {
// Send messages reaching only one receiver each.
for id in 0..N_RECV {
broadcaster.broadcast(id).await.unwrap();
}
// Broadcast the special value to all receivers.
broadcaster.broadcast(BROADCAST_ALL).await.unwrap();
// Send again messages reaching only one receiver each.
for id in 0..N_RECV {
broadcaster.broadcast(id).await.unwrap();
}
})
});
let sum = Arc::new(AtomicUsize::new(0));
// Spawn all models.
let th_recv: Vec<_> = mailboxes
.into_iter()
.map(|mut mailbox| {
thread::spawn({
let mut sum_model = SumModel::new(sum.clone());
move || {
let mut dummy_cx = Context::new_dummy();
block_on(async {
mailbox.recv(&mut sum_model, &mut dummy_cx).await.unwrap();
mailbox.recv(&mut sum_model, &mut dummy_cx).await.unwrap();
mailbox.recv(&mut sum_model, &mut dummy_cx).await.unwrap();
});
}
})
})
.collect();
th_broadcast.join().unwrap();
for th in th_recv {
th.join().unwrap();
}
assert_eq!(
sum.load(Ordering::Relaxed),
N_RECV * ((N_RECV - 1) + BROADCAST_ALL) // Twice the sum of all IDs + N_RECV times the special value
);
}
#[test]
fn broadcast_query_smoke() {
const N_RECV: usize = 4;
const MESSAGE: usize = 42;
let mut mailboxes = Vec::new();
let mut broadcaster = QueryBroadcaster::default();
for _ in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(ReplierSender::new(DoubleModel::double, address));
broadcaster.add(sender);
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
let iter = block_on(broadcaster.broadcast(MESSAGE)).unwrap();
iter.sum::<usize>()
});
let th_recv: Vec<_> = mailboxes
.into_iter()
.map(|mut mailbox| {
thread::spawn({
let mut double_model = DoubleModel::new();
move || {
let mut dummy_cx = Context::new_dummy();
block_on(mailbox.recv(&mut double_model, &mut dummy_cx)).unwrap();
thread::sleep(std::time::Duration::from_millis(100));
}
})
})
.collect();
let sum = th_broadcast.join().unwrap();
for th in th_recv {
th.join().unwrap();
}
assert_eq!(sum, N_RECV * MESSAGE * 2);
}
#[test]
fn broadcast_query_filter_map() {
const N_RECV: usize = 4;
const BROADCAST_ALL: usize = 42; // special ID signaling that the message must reach all receivers.
let mut mailboxes = Vec::new();
let mut broadcaster = QueryBroadcaster::default();
for id in 0..N_RECV {
let mailbox = Receiver::new(10);
let address = mailbox.sender();
let sender = Box::new(FilterMapReplierSender::new(
move |x: &usize| (*x == id || *x == BROADCAST_ALL).then_some(*x),
|x| 3 * x,
DoubleModel::double,
address,
));
broadcaster.add(sender);
mailboxes.push(mailbox);
}
let th_broadcast = thread::spawn(move || {
block_on(async {
let mut sum = 0;
// Send messages reaching only one receiver each.
for id in 0..N_RECV {
sum += broadcaster.broadcast(id).await.unwrap().sum::<usize>();
}
// Broadcast the special value to all receivers.
sum += broadcaster
.broadcast(BROADCAST_ALL)
.await
.unwrap()
.sum::<usize>();
// Send again messages reaching only one receiver each.
for id in 0..N_RECV {
sum += broadcaster.broadcast(id).await.unwrap().sum::<usize>();
}
sum
})
});
let th_recv: Vec<_> = mailboxes
.into_iter()
.map(|mut mailbox| {
thread::spawn({
let mut double_model = DoubleModel::new();
move || {
let mut dummy_cx = Context::new_dummy();
block_on(async {
mailbox
.recv(&mut double_model, &mut dummy_cx)
.await
.unwrap();
mailbox
.recv(&mut double_model, &mut dummy_cx)
.await
.unwrap();
mailbox
.recv(&mut double_model, &mut dummy_cx)
.await
.unwrap();
});
thread::sleep(std::time::Duration::from_millis(100));
}
})
})
.collect();
let sum = th_broadcast.join().unwrap();
for th in th_recv {
th.join().unwrap();
}
assert_eq!(
sum,
N_RECV * ((N_RECV - 1) + BROADCAST_ALL) * 2 * 3, // Twice the sum of all IDs + N_RECV times the special value, then doubled and tripled
);
}
}
#[cfg(all(test, nexosim_loom))]
mod tests {
use std::sync::Mutex;
use futures_channel::mpsc;
use futures_util::StreamExt;
use loom::model::Builder;
use loom::sync::atomic::{AtomicBool, Ordering};
use loom::thread;
use waker_fn::waker_fn;
use super::*;
// An event that may be waken spuriously.
struct TestEvent<R> {
// The receiver is actually used only once in tests, so it is moved out
// of the `Option` on first use.
receiver: Mutex<Option<mpsc::UnboundedReceiver<Option<R>>>>,
}
impl<R: Send + 'static> Sender<(), R> for TestEvent<R> {
fn send(
&self,
_arg: &(),
) -> Option<Pin<Box<dyn Future<Output = Result<R, SendError>> + Send>>> {
let receiver = self.receiver.lock().unwrap().take().unwrap();
Some(Box::pin(async move {
let mut stream = Box::pin(receiver.filter_map(|item| async { item }));
Ok(stream.next().await.unwrap())
}))
}
}
// An object that can wake a `TestEvent`.
#[derive(Clone)]
struct TestEventWaker<R> {
sender: mpsc::UnboundedSender<Option<R>>,
}
impl<R> TestEventWaker<R> {
fn wake_spurious(&self) {
let _ = self.sender.unbounded_send(None);
}
fn wake_final(&self, value: R) {
let _ = self.sender.unbounded_send(Some(value));
}
}
fn test_event<R>() -> (TestEvent<R>, TestEventWaker<R>) {
let (sender, receiver) = mpsc::unbounded();
(
TestEvent {
receiver: Mutex::new(Some(receiver)),
},
TestEventWaker { sender },
)
}
// This tests fails with "Concurrent load and mut accesses" even though the
// `task_list` implementation which triggers it does not use any unsafe.
// This is most certainly related to this Loom bug:
//
// https://github.com/tokio-rs/loom/issues/260
//
// Disabling until the bug is fixed.
#[ignore]
#[test]
fn loom_broadcast_query_basic() {
const DEFAULT_PREEMPTION_BOUND: usize = 3;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (test_event1, waker1) = test_event::<usize>();
let (test_event2, waker2) = test_event::<usize>();
let (test_event3, waker3) = test_event::<usize>();
let mut broadcaster = QueryBroadcaster::default();
broadcaster.add(Box::new(test_event1));
broadcaster.add(Box::new(test_event2));
broadcaster.add(Box::new(test_event3));
let mut fut = Box::pin(broadcaster.broadcast(()));
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
let is_scheduled_waker = is_scheduled.clone();
let waker = waker_fn(move || {
// We use swap rather than a plain store to work around this
// bug: <https://github.com/tokio-rs/loom/issues/254>
is_scheduled_waker.swap(true, Ordering::Release);
});
let mut cx = Context::from_waker(&waker);
let th1 = thread::spawn(move || waker1.wake_final(3));
let th2 = thread::spawn(move || waker2.wake_final(7));
let th3 = thread::spawn(move || waker3.wake_final(42));
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), Some(42));
assert_eq!(res.next(), None);
return;
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => {}
}
// If the task has not been scheduled, exit the polling loop.
if !is_scheduled.swap(false, Ordering::Acquire) {
break;
}
}
th1.join().unwrap();
th2.join().unwrap();
th3.join().unwrap();
assert!(is_scheduled.load(Ordering::Acquire));
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), Some(42));
assert_eq!(res.next(), None);
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => panic!("the future has not completed"),
};
});
}
// This tests fails with "Concurrent load and mut accesses" even though the
// `task_list` implementation which triggers it does not use any unsafe.
// This is most certainly related to this Loom bug:
//
// https://github.com/tokio-rs/loom/issues/260
//
// Disabling until the bug is fixed.
#[ignore]
#[test]
fn loom_broadcast_query_spurious() {
const DEFAULT_PREEMPTION_BOUND: usize = 3;
let mut builder = Builder::new();
if builder.preemption_bound.is_none() {
builder.preemption_bound = Some(DEFAULT_PREEMPTION_BOUND);
}
builder.check(move || {
let (test_event1, waker1) = test_event::<usize>();
let (test_event2, waker2) = test_event::<usize>();
let mut broadcaster = QueryBroadcaster::default();
broadcaster.add(Box::new(test_event1));
broadcaster.add(Box::new(test_event2));
let mut fut = Box::pin(broadcaster.broadcast(()));
let is_scheduled = loom::sync::Arc::new(AtomicBool::new(false));
let is_scheduled_waker = is_scheduled.clone();
let waker = waker_fn(move || {
// We use swap rather than a plain store to work around this
// bug: <https://github.com/tokio-rs/loom/issues/254>
is_scheduled_waker.swap(true, Ordering::Release);
});
let mut cx = Context::from_waker(&waker);
let spurious_waker = waker1.clone();
let th1 = thread::spawn(move || waker1.wake_final(3));
let th2 = thread::spawn(move || waker2.wake_final(7));
let th_spurious = thread::spawn(move || spurious_waker.wake_spurious());
loop {
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), None);
return;
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => {}
}
// If the task has not been scheduled, exit the polling loop.
if !is_scheduled.swap(false, Ordering::Acquire) {
break;
}
}
th1.join().unwrap();
th2.join().unwrap();
th_spurious.join().unwrap();
assert!(is_scheduled.load(Ordering::Acquire));
match fut.as_mut().poll(&mut cx) {
Poll::Ready(Ok(mut res)) => {
assert_eq!(res.next(), Some(3));
assert_eq!(res.next(), Some(7));
assert_eq!(res.next(), None);
}
Poll::Ready(Err(_)) => panic!("sender error"),
Poll::Pending => panic!("the future has not completed"),
};
});
}
}

View File

@ -0,0 +1,393 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::sync::Arc;
use futures_channel::oneshot;
use recycle_box::{coerce_box, RecycleBox};
use crate::channel;
use crate::channel::SendError;
use crate::model::Model;
use crate::ports::{InputFn, ReplierFn};
pub(super) type SenderFuture<R> = Pin<Box<dyn Future<Output = Result<R, SendError>> + Send>>;
/// An event or query sender abstracting over the target model and input method.
pub(super) trait Sender<T, R>: Send + Sync {
/// Asynchronously sends a message using a reference to the message.
fn send(&self, arg: &T) -> Option<SenderFuture<R>>;
/// Asynchronously sends an owned message.
fn send_owned(&self, arg: T) -> Option<SenderFuture<R>> {
self.send(&arg)
}
}
/// An object that can send events to an input port.
pub(super) struct InputSender<M, F, T, S>
where
M: 'static,
{
func: F,
sender: channel::Sender<M>,
_phantom_closure: PhantomData<fn(&mut M, T)>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, F, T, S> InputSender<M, F, T, S>
where
M: 'static,
{
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
Self {
func,
sender,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, F, T, S> Sender<T, ()> for InputSender<M, F, T, S>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone + Sync,
T: Clone + Send + 'static,
S: Send + Sync,
{
fn send(&self, arg: &T) -> Option<SenderFuture<()>> {
self.send_owned(arg.clone())
}
fn send_owned(&self, arg: T) -> Option<SenderFuture<()>> {
let func = self.func.clone();
let sender = self.sender.clone();
Some(Box::pin(async move {
sender
.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
})
.await
}))
}
}
/// An object that can send mapped events to an input port.
pub(super) struct MapInputSender<M, C, F, T, U, S>
where
M: 'static,
{
map: C,
func: F,
sender: channel::Sender<M>,
_phantom_map: PhantomData<fn(T) -> U>,
_phantom_closure: PhantomData<fn(&mut M, T)>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, C, F, T, U, S> MapInputSender<M, C, F, T, U, S>
where
M: 'static,
{
pub(super) fn new(map: C, func: F, sender: channel::Sender<M>) -> Self {
Self {
map,
func,
sender,
_phantom_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, C, F, T, U, S> Sender<T, ()> for MapInputSender<M, C, F, T, U, S>
where
M: Model,
C: Fn(&T) -> U + Send + Sync,
F: for<'a> InputFn<'a, M, U, S> + Clone + Sync,
T: Send + 'static,
U: Send + 'static,
S: Send + Sync,
{
fn send(&self, arg: &T) -> Option<SenderFuture<()>> {
let func = self.func.clone();
let arg = (self.map)(arg);
let sender = self.sender.clone();
Some(Box::pin(async move {
sender
.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
})
.await
}))
}
}
/// An object that can filter and send mapped events to an input port.
pub(super) struct FilterMapInputSender<M, C, F, T, U, S>
where
M: 'static,
{
filter_map: C,
func: F,
sender: channel::Sender<M>,
_phantom_map: PhantomData<fn(T) -> U>,
_phantom_closure: PhantomData<fn(&mut M, T)>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, C, F, T, U, S> FilterMapInputSender<M, C, F, T, U, S>
where
M: 'static,
{
pub(super) fn new(filter_map: C, func: F, sender: channel::Sender<M>) -> Self {
Self {
filter_map,
func,
sender,
_phantom_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, C, F, T, U, S> Sender<T, ()> for FilterMapInputSender<M, C, F, T, U, S>
where
M: Model,
C: Fn(&T) -> Option<U> + Send + Sync,
F: for<'a> InputFn<'a, M, U, S> + Clone + Sync,
T: Send + 'static,
U: Send + 'static,
S: Send + Sync,
{
fn send(&self, arg: &T) -> Option<SenderFuture<()>> {
(self.filter_map)(arg).map(|arg| {
let func = self.func.clone();
let sender = self.sender.clone();
Box::pin(async move {
sender
.send(move |model, scheduler, recycle_box| {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
})
.await
}) as SenderFuture<()>
})
}
}
/// An object that can send a request to a replier port and retrieve a response.
pub(super) struct ReplierSender<M, F, T, R, S>
where
M: 'static,
{
func: F,
sender: channel::Sender<M>,
_phantom_closure: PhantomData<fn(&mut M, T) -> R>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, F, T, R, S> ReplierSender<M, F, T, R, S>
where
M: 'static,
{
pub(super) fn new(func: F, sender: channel::Sender<M>) -> Self {
Self {
func,
sender,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, F, T, R, S> Sender<T, R> for ReplierSender<M, F, T, R, S>
where
M: Model,
F: for<'a> ReplierFn<'a, M, T, R, S> + Clone + Sync,
T: Clone + Send + 'static,
R: Send + 'static,
S: Send + Sync,
{
fn send(&self, arg: &T) -> Option<SenderFuture<R>> {
self.send_owned(arg.clone())
}
fn send_owned(&self, arg: T) -> Option<SenderFuture<R>> {
let func = self.func.clone();
let sender = self.sender.clone();
let (reply_sender, reply_receiver) = oneshot::channel();
Some(Box::pin(async move {
sender
.send(move |model, scheduler, recycle_box| {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
let _ = reply_sender.send(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
})
.await?;
reply_receiver.await.map_err(|_| SendError)
}))
}
}
/// An object that can send a mapped request to a replier port and retrieve a
/// mapped response.
pub(super) struct MapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: 'static,
{
query_map: C,
reply_map: Arc<D>,
func: F,
sender: channel::Sender<M>,
_phantom_query_map: PhantomData<fn(T) -> U>,
_phantom_reply_map: PhantomData<fn(Q) -> R>,
_phantom_closure: PhantomData<fn(&mut M, U) -> Q>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, C, D, F, T, R, U, Q, S> MapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: 'static,
{
pub(super) fn new(query_map: C, reply_map: D, func: F, sender: channel::Sender<M>) -> Self {
Self {
query_map,
reply_map: Arc::new(reply_map),
func,
sender,
_phantom_query_map: PhantomData,
_phantom_reply_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, C, D, F, T, R, U, Q, S> Sender<T, R> for MapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
C: Fn(&T) -> U + Send + Sync,
D: Fn(Q) -> R + Send + Sync + 'static,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone + Sync,
T: Send + 'static,
R: Send + 'static,
U: Send + 'static,
Q: Send + 'static,
S: Send + Sync,
{
fn send(&self, arg: &T) -> Option<SenderFuture<R>> {
let func = self.func.clone();
let arg = (self.query_map)(arg);
let sender = self.sender.clone();
let reply_map = self.reply_map.clone();
let (reply_sender, reply_receiver) = oneshot::channel();
Some(Box::pin(async move {
sender
.send(move |model, scheduler, recycle_box| {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
let _ = reply_sender.send(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
})
.await?;
reply_receiver.await.map_err(|_| SendError).map(&*reply_map)
}))
}
}
/// An object that can filter and send a mapped request to a replier port and
/// retrieve a mapped response.
pub(super) struct FilterMapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: 'static,
{
query_filter_map: C,
reply_map: Arc<D>,
func: F,
sender: channel::Sender<M>,
_phantom_query_map: PhantomData<fn(T) -> Option<U>>,
_phantom_reply_map: PhantomData<fn(Q) -> R>,
_phantom_closure: PhantomData<fn(&mut M, U) -> Q>,
_phantom_closure_marker: PhantomData<S>,
}
impl<M, C, D, F, T, R, U, Q, S> FilterMapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: 'static,
{
pub(super) fn new(
query_filter_map: C,
reply_map: D,
func: F,
sender: channel::Sender<M>,
) -> Self {
Self {
query_filter_map,
reply_map: Arc::new(reply_map),
func,
sender,
_phantom_query_map: PhantomData,
_phantom_reply_map: PhantomData,
_phantom_closure: PhantomData,
_phantom_closure_marker: PhantomData,
}
}
}
impl<M, C, D, F, T, R, U, Q, S> Sender<T, R> for FilterMapReplierSender<M, C, D, F, T, R, U, Q, S>
where
M: Model,
C: Fn(&T) -> Option<U> + Send + Sync,
D: Fn(Q) -> R + Send + Sync + 'static,
F: for<'a> ReplierFn<'a, M, U, Q, S> + Clone + Sync,
T: Send + 'static,
R: Send + 'static,
U: Send + 'static,
Q: Send + 'static,
S: Send + Sync,
{
fn send(&self, arg: &T) -> Option<SenderFuture<R>> {
(self.query_filter_map)(arg).map(|arg| {
let func = self.func.clone();
let sender = self.sender.clone();
let reply_map = self.reply_map.clone();
let (reply_sender, reply_receiver) = oneshot::channel();
Box::pin(async move {
sender
.send(move |model, scheduler, recycle_box| {
let fut = async move {
let reply = func.call(model, arg, scheduler).await;
let _ = reply_sender.send(reply);
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
})
.await?;
reply_receiver.await.map_err(|_| SendError).map(&*reply_map)
}) as SenderFuture<R>
})
}
}

75
nexosim/src/registry.rs Normal file
View File

@ -0,0 +1,75 @@
//! Registry for sinks and sources.
//!
//! This module provides the `EndpointRegistry` object which associates each
//! event sink, event source and query source in a simulation bench to a unique
//! name.
mod event_sink_registry;
mod event_source_registry;
mod query_source_registry;
use serde::{de::DeserializeOwned, ser::Serialize};
use crate::ports::{EventSinkStream, EventSource, QuerySource};
pub(crate) use event_sink_registry::EventSinkRegistry;
pub(crate) use event_source_registry::EventSourceRegistry;
pub(crate) use query_source_registry::QuerySourceRegistry;
/// A registry that holds the sources and sinks of a simulation bench.
#[derive(Default, Debug)]
pub struct EndpointRegistry {
pub(crate) event_sink_registry: EventSinkRegistry,
pub(crate) event_source_registry: EventSourceRegistry,
pub(crate) query_source_registry: QuerySourceRegistry,
}
impl EndpointRegistry {
/// Creates a new, empty registry.
pub fn new() -> Self {
Self::default()
}
/// Adds an event source to the registry.
///
/// If the specified name is already in use for another event source, the source
/// provided as argument is returned in the error.
pub fn add_event_source<T>(
&mut self,
source: EventSource<T>,
name: impl Into<String>,
) -> Result<(), EventSource<T>>
where
T: DeserializeOwned + Clone + Send + 'static,
{
self.event_source_registry.add(source, name)
}
/// Adds a query source to the registry.
///
/// If the specified name is already in use for another query source, the
/// source provided as argument is returned in the error.
pub fn add_query_source<T, R>(
&mut self,
source: QuerySource<T, R>,
name: impl Into<String>,
) -> Result<(), QuerySource<T, R>>
where
T: DeserializeOwned + Clone + Send + 'static,
R: Serialize + Send + 'static,
{
self.query_source_registry.add(source, name)
}
/// Adds an event sink to the registry.
///
/// If the specified name is already in use for another event sink, the
/// event sink provided as argument is returned in the error.
pub fn add_event_sink<S>(&mut self, sink: S, name: impl Into<String>) -> Result<(), S>
where
S: EventSinkStream + Send + 'static,
S::Item: Serialize,
{
self.event_sink_registry.add(sink, name)
}
}

View File

@ -0,0 +1,93 @@
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt;
use ciborium;
use serde::Serialize;
use crate::ports::EventSinkStream;
type SerializationError = ciborium::ser::Error<std::io::Error>;
/// A registry that holds all sinks meant to be accessed through remote
/// procedure calls.
#[derive(Default)]
pub(crate) struct EventSinkRegistry(HashMap<String, Box<dyn EventSinkStreamAny>>);
impl EventSinkRegistry {
/// Adds a sink to the registry.
///
/// If the specified name is already in use for another sink, the sink
/// provided as argument is returned in the error.
pub(crate) fn add<S>(&mut self, sink: S, name: impl Into<String>) -> Result<(), S>
where
S: EventSinkStream + Send + 'static,
S::Item: Serialize,
{
match self.0.entry(name.into()) {
Entry::Vacant(s) => {
s.insert(Box::new(sink));
Ok(())
}
Entry::Occupied(_) => Err(sink),
}
}
/// Returns a mutable reference to the specified sink if it is in the
/// registry.
pub(crate) fn get_mut(&mut self, name: &str) -> Option<&mut dyn EventSinkStreamAny> {
self.0.get_mut(name).map(|s| s.as_mut())
}
}
impl fmt::Debug for EventSinkRegistry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EventSinkRegistry ({} sinks)", self.0.len())
}
}
/// A type-erased `EventSinkStream`.
pub(crate) trait EventSinkStreamAny: Send + 'static {
/// Human-readable name of the event type, as returned by
/// `any::type_name`.
fn event_type_name(&self) -> &'static str;
/// Starts or resumes the collection of new events.
fn open(&mut self);
/// Pauses the collection of new events.
fn close(&mut self);
/// Encode and collect all events in a vector.
fn collect(&mut self) -> Result<Vec<Vec<u8>>, SerializationError>;
}
impl<E> EventSinkStreamAny for E
where
E: EventSinkStream + Send + 'static,
E::Item: Serialize,
{
fn event_type_name(&self) -> &'static str {
std::any::type_name::<E::Item>()
}
fn open(&mut self) {
self.open();
}
fn close(&mut self) {
self.close();
}
fn collect(&mut self) -> Result<Vec<Vec<u8>>, SerializationError> {
self.__try_fold(Vec::new(), |mut encoded_events, event| {
let mut buffer = Vec::new();
ciborium::into_writer(&event, &mut buffer).map(|_| {
encoded_events.push(buffer);
encoded_events
})
})
}
}

View File

@ -0,0 +1,130 @@
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt;
use std::sync::Arc;
use std::time::Duration;
use ciborium;
use serde::de::DeserializeOwned;
use crate::ports::EventSource;
use crate::simulation::{Action, ActionKey};
type DeserializationError = ciborium::de::Error<std::io::Error>;
/// A registry that holds all sources and sinks meant to be accessed through
/// remote procedure calls.
#[derive(Default)]
pub(crate) struct EventSourceRegistry(HashMap<String, Box<dyn EventSourceAny>>);
impl EventSourceRegistry {
/// Adds an event source to the registry.
///
/// If the specified name is already in use for another event source, the source
/// provided as argument is returned in the error.
pub(crate) fn add<T>(
&mut self,
source: EventSource<T>,
name: impl Into<String>,
) -> Result<(), EventSource<T>>
where
T: DeserializeOwned + Clone + Send + 'static,
{
match self.0.entry(name.into()) {
Entry::Vacant(s) => {
s.insert(Box::new(Arc::new(source)));
Ok(())
}
Entry::Occupied(_) => Err(source),
}
}
/// Returns a mutable reference to the specified event source if it is in
/// the registry.
pub(crate) fn get(&self, name: &str) -> Option<&dyn EventSourceAny> {
self.0.get(name).map(|s| s.as_ref())
}
}
impl fmt::Debug for EventSourceRegistry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "EventSourceRegistry ({} sources)", self.0.len())
}
}
/// A type-erased `EventSource` that operates on CBOR-encoded serialized events.
pub(crate) trait EventSourceAny: Send + Sync + 'static {
/// Returns an action which, when processed, broadcasts an event to all
/// connected input ports.
///
/// The argument is expected to conform to the serde CBOR encoding.
fn event(&self, serialized_arg: &[u8]) -> Result<Action, DeserializationError>;
/// Returns a cancellable action and a cancellation key; when processed, the
/// action broadcasts an event to all connected input ports.
///
/// The argument is expected to conform to the serde CBOR encoding.
fn keyed_event(
&self,
serialized_arg: &[u8],
) -> Result<(Action, ActionKey), DeserializationError>;
/// Returns a periodically recurring action which, when processed,
/// broadcasts an event to all connected input ports.
///
/// The argument is expected to conform to the serde CBOR encoding.
fn periodic_event(
&self,
period: Duration,
serialized_arg: &[u8],
) -> Result<Action, DeserializationError>;
/// Returns a cancellable, periodically recurring action and a cancellation
/// key; when processed, the action broadcasts an event to all connected
/// input ports.
///
/// The argument is expected to conform to the serde CBOR encoding.
fn keyed_periodic_event(
&self,
period: Duration,
serialized_arg: &[u8],
) -> Result<(Action, ActionKey), DeserializationError>;
/// Human-readable name of the event type, as returned by
/// `any::type_name`.
fn event_type_name(&self) -> &'static str;
}
impl<T> EventSourceAny for Arc<EventSource<T>>
where
T: DeserializeOwned + Clone + Send + 'static,
{
fn event(&self, serialized_arg: &[u8]) -> Result<Action, DeserializationError> {
ciborium::from_reader(serialized_arg).map(|arg| EventSource::event(self, arg))
}
fn keyed_event(
&self,
serialized_arg: &[u8],
) -> Result<(Action, ActionKey), DeserializationError> {
ciborium::from_reader(serialized_arg).map(|arg| EventSource::keyed_event(self, arg))
}
fn periodic_event(
&self,
period: Duration,
serialized_arg: &[u8],
) -> Result<Action, DeserializationError> {
ciborium::from_reader(serialized_arg)
.map(|arg| EventSource::periodic_event(self, period, arg))
}
fn keyed_periodic_event(
&self,
period: Duration,
serialized_arg: &[u8],
) -> Result<(Action, ActionKey), DeserializationError> {
ciborium::from_reader(serialized_arg).map(|arg| self.keyed_periodic_event(period, arg))
}
fn event_type_name(&self) -> &'static str {
std::any::type_name::<T>()
}
}

View File

@ -0,0 +1,128 @@
use std::collections::hash_map::Entry;
use std::collections::HashMap;
use std::fmt;
use ciborium;
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::ports::{QuerySource, ReplyReceiver};
use crate::simulation::Action;
type DeserializationError = ciborium::de::Error<std::io::Error>;
type SerializationError = ciborium::ser::Error<std::io::Error>;
/// A registry that holds all sources and sinks meant to be accessed through
/// remote procedure calls.
#[derive(Default)]
pub(crate) struct QuerySourceRegistry(HashMap<String, Box<dyn QuerySourceAny>>);
impl QuerySourceRegistry {
/// Adds a query source to the registry.
///
/// If the specified name is already in use for another query source, the
/// source provided as argument is returned in the error.
pub(crate) fn add<T, R>(
&mut self,
source: QuerySource<T, R>,
name: impl Into<String>,
) -> Result<(), QuerySource<T, R>>
where
T: DeserializeOwned + Clone + Send + 'static,
R: Serialize + Send + 'static,
{
match self.0.entry(name.into()) {
Entry::Vacant(s) => {
s.insert(Box::new(source));
Ok(())
}
Entry::Occupied(_) => Err(source),
}
}
/// Returns a mutable reference to the specified query source if it is in
/// the registry.
pub(crate) fn get(&self, name: &str) -> Option<&dyn QuerySourceAny> {
self.0.get(name).map(|s| s.as_ref())
}
}
impl fmt::Debug for QuerySourceRegistry {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "QuerySourceRegistry ({} query sources)", self.0.len(),)
}
}
/// A type-erased `QuerySource` that operates on CBOR-encoded serialized queries
/// and returns CBOR-encoded replies.
pub(crate) trait QuerySourceAny: Send + Sync + 'static {
/// Returns an action which, when processed, broadcasts a query to all
/// connected replier ports.
///
///
/// The argument is expected to conform to the serde CBOR encoding.
fn query(
&self,
arg: &[u8],
) -> Result<(Action, Box<dyn ReplyReceiverAny>), DeserializationError>;
/// Human-readable name of the request type, as returned by
/// `any::type_name`.
fn request_type_name(&self) -> &'static str;
/// Human-readable name of the reply type, as returned by
/// `any::type_name`.
fn reply_type_name(&self) -> &'static str;
}
impl<T, R> QuerySourceAny for QuerySource<T, R>
where
T: DeserializeOwned + Clone + Send + 'static,
R: Serialize + Send + 'static,
{
fn query(
&self,
arg: &[u8],
) -> Result<(Action, Box<dyn ReplyReceiverAny>), DeserializationError> {
ciborium::from_reader(arg).map(|arg| {
let (action, reply_recv) = self.query(arg);
let reply_recv: Box<dyn ReplyReceiverAny> = Box::new(reply_recv);
(action, reply_recv)
})
}
fn request_type_name(&self) -> &'static str {
std::any::type_name::<T>()
}
fn reply_type_name(&self) -> &'static str {
std::any::type_name::<R>()
}
}
/// A type-erased `ReplyReceiver` that returns CBOR-encoded replies.
pub(crate) trait ReplyReceiverAny {
/// Take the replies, if any, encode them and collect them in a vector.
fn take_collect(&mut self) -> Option<Result<Vec<Vec<u8>>, SerializationError>>;
}
impl<R: Serialize + 'static> ReplyReceiverAny for ReplyReceiver<R> {
fn take_collect(&mut self) -> Option<Result<Vec<Vec<u8>>, SerializationError>> {
let replies = self.take()?;
let encoded_replies = (move || {
let mut encoded_replies = Vec::new();
for reply in replies {
let mut encoded_reply = Vec::new();
ciborium::into_writer(&reply, &mut encoded_reply)?;
encoded_replies.push(encoded_reply);
}
Ok(encoded_replies)
})();
Some(encoded_replies)
}
}

11
nexosim/src/server.rs Normal file
View File

@ -0,0 +1,11 @@
//! Simulation management through remote procedure calls.
mod codegen;
mod key_registry;
mod run;
mod services;
pub use run::run;
#[cfg(unix)]
pub use run::run_local;

View File

@ -0,0 +1,198 @@
// The main simulation protocol.
syntax = "proto3";
package simulation.v1;
import "google/protobuf/duration.proto";
import "google/protobuf/timestamp.proto";
import "google/protobuf/empty.proto";
enum ErrorCode {
INTERNAL_ERROR = 0;
MISSING_ARGUMENT = 1;
INVALID_TIME = 2;
INVALID_PERIOD = 3;
INVALID_DEADLINE = 4;
INVALID_MESSAGE = 5;
INVALID_KEY = 6;
INITIALIZER_PANIC = 10;
SIMULATION_NOT_STARTED = 11;
SIMULATION_HALTED = 12;
SIMULATION_TERMINATED = 13;
SIMULATION_DEADLOCK = 14;
SIMULATION_MESSAGE_LOSS = 15;
SIMULATION_NO_RECIPIENT = 16;
SIMULATION_PANIC = 17;
SIMULATION_TIMEOUT = 18;
SIMULATION_OUT_OF_SYNC = 19;
SIMULATION_BAD_QUERY = 20;
SIMULATION_TIME_OUT_OF_RANGE = 21;
SOURCE_NOT_FOUND = 30;
SINK_NOT_FOUND = 31;
}
message Error {
ErrorCode code = 1;
string message = 2;
}
message EventKey {
uint64 subkey1 = 1;
uint64 subkey2 = 2;
}
message InitRequest { bytes cfg = 2; }
message InitReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
Error error = 100;
}
}
message HaltRequest {}
message HaltReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
Error error = 100;
}
}
message TimeRequest {}
message TimeReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Timestamp time = 1;
Error error = 100;
}
}
message StepRequest {}
message StepReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Timestamp time = 1;
Error error = 100;
}
}
message StepUntilRequest {
oneof deadline { // Always returns exactly 1 variant.
google.protobuf.Timestamp time = 1;
google.protobuf.Duration duration = 2;
}
}
message StepUntilReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Timestamp time = 1;
Error error = 100;
}
}
message ScheduleEventRequest {
oneof deadline { // Expects exactly 1 variant.
google.protobuf.Timestamp time = 1;
google.protobuf.Duration duration = 2;
}
string source_name = 3;
bytes event = 4;
google.protobuf.Duration period = 5;
bool with_key = 6;
}
message ScheduleEventReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
EventKey key = 2;
Error error = 100;
}
}
message CancelEventRequest { EventKey key = 1; }
message CancelEventReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
Error error = 100;
}
}
message ProcessEventRequest {
string source_name = 1;
bytes event = 2;
}
message ProcessEventReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 1;
Error error = 100;
}
}
message ProcessQueryRequest {
string source_name = 1;
bytes request = 2;
}
message ProcessQueryReply {
// This field is hoisted because protobuf3 does not support `repeated` within
// a `oneof`. It is Always empty if an error is returned
repeated bytes replies = 1;
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 10;
Error error = 100;
}
}
message ReadEventsRequest { string sink_name = 1; }
message ReadEventsReply {
// This field is hoisted because protobuf3 does not support `repeated` within
// a `oneof`. It is Always empty if an error is returned
repeated bytes events = 1;
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 10;
Error error = 100;
}
}
message OpenSinkRequest { string sink_name = 1; }
message OpenSinkReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 10;
Error error = 100;
}
}
message CloseSinkRequest { string sink_name = 1; }
message CloseSinkReply {
oneof result { // Always returns exactly 1 variant.
google.protobuf.Empty empty = 10;
Error error = 100;
}
}
// A convenience message type for custom transport implementation.
message AnyRequest {
oneof request { // Expects exactly 1 variant.
InitRequest init_request = 1;
HaltRequest halt_request = 2;
TimeRequest time_request = 3;
StepRequest step_request = 4;
StepUntilRequest step_until_request = 5;
ScheduleEventRequest schedule_event_request = 6;
CancelEventRequest cancel_event_request = 7;
ProcessEventRequest process_event_request = 8;
ProcessQueryRequest process_query_request = 9;
ReadEventsRequest read_events_request = 10;
OpenSinkRequest open_sink_request = 11;
CloseSinkRequest close_sink_request = 12;
}
}
service Simulation {
rpc Init(InitRequest) returns (InitReply);
rpc Halt(HaltRequest) returns (HaltReply);
rpc Time(TimeRequest) returns (TimeReply);
rpc Step(StepRequest) returns (StepReply);
rpc StepUntil(StepUntilRequest) returns (StepUntilReply);
rpc ScheduleEvent(ScheduleEventRequest) returns (ScheduleEventReply);
rpc CancelEvent(CancelEventRequest) returns (CancelEventReply);
rpc ProcessEvent(ProcessEventRequest) returns (ProcessEventReply);
rpc ProcessQuery(ProcessQueryRequest) returns (ProcessQueryReply);
rpc ReadEvents(ReadEventsRequest) returns (ReadEventsReply);
rpc OpenSink(OpenSinkRequest) returns (OpenSinkReply);
rpc CloseSink(CloseSinkRequest) returns (CloseSinkReply);
}

View File

@ -0,0 +1,7 @@
#![allow(unreachable_pub)]
#![allow(clippy::enum_variant_names)]
#![allow(missing_docs)]
#[rustfmt::skip]
#[path = "codegen/simulation.v1.rs"]
pub(crate) mod simulation;

View File

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,48 @@
use crate::simulation::ActionKey;
use crate::time::MonotonicTime;
use crate::util::indexed_priority_queue::{IndexedPriorityQueue, InsertKey};
pub(crate) type KeyRegistryId = InsertKey;
/// A collection of `ActionKey`s indexed by a unique identifier.
#[derive(Default)]
pub(crate) struct KeyRegistry {
keys: IndexedPriorityQueue<MonotonicTime, ActionKey>,
}
impl KeyRegistry {
/// Inserts an `ActionKey` into the registry.
///
/// The provided expiration deadline is the latest time at which the key is
/// guaranteed to be extractable.
pub(crate) fn insert_key(
&mut self,
action_key: ActionKey,
expiration: MonotonicTime,
) -> KeyRegistryId {
self.keys.insert(expiration, action_key)
}
/// Inserts a non-expiring `ActionKey` into the registry.
pub(crate) fn insert_eternal_key(&mut self, action_key: ActionKey) -> KeyRegistryId {
self.keys.insert(MonotonicTime::MAX, action_key)
}
/// Removes an `ActionKey` from the registry and returns it.
///
/// Returns `None` if the key was not found in the registry.
pub(crate) fn extract_key(&mut self, key_id: KeyRegistryId) -> Option<ActionKey> {
self.keys.extract(key_id).map(|(_, key)| key)
}
/// Remove keys with an expiration deadline strictly predating the argument.
pub(crate) fn remove_expired_keys(&mut self, now: MonotonicTime) {
while let Some(expiration) = self.keys.peek_key() {
if *expiration >= now {
return;
}
self.keys.pull();
}
}
}

293
nexosim/src/server/run.rs Normal file
View File

@ -0,0 +1,293 @@
//! Simulation server.
use std::net::SocketAddr;
#[cfg(unix)]
use std::path::Path;
use std::sync::Arc;
use std::sync::Mutex;
use std::sync::MutexGuard;
use serde::de::DeserializeOwned;
use tonic::{transport::Server, Request, Response, Status};
use crate::registry::EndpointRegistry;
use crate::simulation::{Simulation, SimulationError};
use super::codegen::simulation::*;
use super::key_registry::KeyRegistry;
use super::services::InitService;
use super::services::{ControllerService, MonitorService, SchedulerService};
/// Runs a simulation from a network server.
///
/// The first argument is a closure that takes an initialization configuration
/// and is called every time the simulation is (re)started by the remote client.
/// It must create a new simulation, complemented by a registry that exposes the
/// public event and query interface.
pub fn run<F, I>(sim_gen: F, addr: SocketAddr) -> Result<(), Box<dyn std::error::Error>>
where
F: FnMut(I) -> Result<(Simulation, EndpointRegistry), SimulationError> + Send + 'static,
I: DeserializeOwned,
{
run_service(GrpcSimulationService::new(sim_gen), addr)
}
/// Monomorphization of the network server.
///
/// Keeping this as a separate monomorphized fragment can even triple
/// compilation speed for incremental release builds.
fn run_service(
service: GrpcSimulationService,
addr: SocketAddr,
) -> Result<(), Box<dyn std::error::Error>> {
// Use 2 threads so that even if the controller service is blocked due to
// ongoing simulation execution, other services can still be used
// concurrently.
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_io()
.build()?;
rt.block_on(async move {
Server::builder()
.add_service(simulation_server::SimulationServer::new(service))
.serve(addr)
.await?;
Ok(())
})
}
/// Runs a simulation locally from a Unix Domain Sockets server.
///
/// The first argument is a closure that takes an initialization configuration
/// and is called every time the simulation is (re)started by the remote client.
/// It must create a new simulation, complemented by a registry that exposes the
/// public event and query interface.
#[cfg(unix)]
pub fn run_local<F, I, P>(sim_gen: F, path: P) -> Result<(), Box<dyn std::error::Error>>
where
F: FnMut(I) -> Result<(Simulation, EndpointRegistry), SimulationError> + Send + 'static,
I: DeserializeOwned,
P: AsRef<Path>,
{
let path = path.as_ref();
run_local_service(GrpcSimulationService::new(sim_gen), path)
}
/// Monomorphization of the Unix Domain Sockets server.
///
/// Keeping this as a separate monomorphized fragment can even triple
/// compilation speed for incremental release builds.
#[cfg(unix)]
fn run_local_service(
service: GrpcSimulationService,
path: &Path,
) -> Result<(), Box<dyn std::error::Error>> {
use std::fs;
use std::io;
use std::os::unix::fs::FileTypeExt;
use tokio::net::UnixListener;
use tokio_stream::wrappers::UnixListenerStream;
// Unlink the socket if it already exists to prevent an `AddrInUse` error.
match fs::metadata(path) {
// The path is valid: make sure it actually points to a socket.
Ok(socket_meta) => {
if !socket_meta.file_type().is_socket() {
return Err(Box::new(io::Error::new(
io::ErrorKind::AlreadyExists,
"the specified path points to an existing non-socket file",
)));
}
fs::remove_file(path)?;
}
// Nothing to do: the socket does not exist yet.
Err(e) if e.kind() == io::ErrorKind::NotFound => {}
// We don't have permission to use the socket.
Err(e) => return Err(Box::new(e)),
}
// (Re-)Create the socket.
fs::create_dir_all(path.parent().unwrap())?;
// Use 2 threads so that even if the controller service is blocked due to
// ongoing simulation execution, other services can still be used
// concurrently.
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(2)
.enable_io()
.build()?;
rt.block_on(async move {
let uds = UnixListener::bind(path)?;
let uds_stream = UnixListenerStream::new(uds);
Server::builder()
.add_service(simulation_server::SimulationServer::new(service))
.serve_with_incoming(uds_stream)
.await?;
Ok(())
})
}
struct GrpcSimulationService {
init_service: Mutex<InitService>,
controller_service: Mutex<ControllerService>,
monitor_service: Mutex<MonitorService>,
scheduler_service: Mutex<SchedulerService>,
}
impl GrpcSimulationService {
/// Creates a new `GrpcSimulationService` without any active simulation.
///
/// The argument is a closure that takes an initialization configuration and
/// is called every time the simulation is (re)started by the remote client.
/// It must create a new simulation, complemented by a registry that exposes
/// the public event and query interface.
pub(crate) fn new<F, I>(sim_gen: F) -> Self
where
F: FnMut(I) -> Result<(Simulation, EndpointRegistry), SimulationError> + Send + 'static,
I: DeserializeOwned,
{
Self {
init_service: Mutex::new(InitService::new(sim_gen)),
controller_service: Mutex::new(ControllerService::NotStarted),
monitor_service: Mutex::new(MonitorService::NotStarted),
scheduler_service: Mutex::new(SchedulerService::NotStarted),
}
}
/// Locks the initializer and returns the mutex guard.
fn initializer(&self) -> MutexGuard<'_, InitService> {
self.init_service.lock().unwrap()
}
/// Locks the controller and returns the mutex guard.
fn controller(&self) -> MutexGuard<'_, ControllerService> {
self.controller_service.lock().unwrap()
}
/// Locks the monitor and returns the mutex guard.
fn monitor(&self) -> MutexGuard<'_, MonitorService> {
self.monitor_service.lock().unwrap()
}
/// Locks the scheduler and returns the mutex guard.
fn scheduler(&self) -> MutexGuard<'_, SchedulerService> {
self.scheduler_service.lock().unwrap()
}
}
#[tonic::async_trait]
impl simulation_server::Simulation for GrpcSimulationService {
async fn init(&self, request: Request<InitRequest>) -> Result<Response<InitReply>, Status> {
let request = request.into_inner();
let (reply, bench) = self.initializer().init(request);
if let Some((simulation, scheduler, endpoint_registry)) = bench {
let event_source_registry = Arc::new(endpoint_registry.event_source_registry);
let query_source_registry = endpoint_registry.query_source_registry;
let event_sink_registry = endpoint_registry.event_sink_registry;
*self.controller() = ControllerService::Started {
simulation,
event_source_registry: event_source_registry.clone(),
query_source_registry,
};
*self.monitor() = MonitorService::Started {
event_sink_registry,
};
*self.scheduler() = SchedulerService::Started {
scheduler,
event_source_registry,
key_registry: KeyRegistry::default(),
};
}
Ok(Response::new(reply))
}
async fn halt(&self, request: Request<HaltRequest>) -> Result<Response<HaltReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.scheduler().halt(request)))
}
async fn time(&self, request: Request<TimeRequest>) -> Result<Response<TimeReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.scheduler().time(request)))
}
async fn step(&self, request: Request<StepRequest>) -> Result<Response<StepReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.controller().step(request)))
}
async fn step_until(
&self,
request: Request<StepUntilRequest>,
) -> Result<Response<StepUntilReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.controller().step_until(request)))
}
async fn schedule_event(
&self,
request: Request<ScheduleEventRequest>,
) -> Result<Response<ScheduleEventReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.scheduler().schedule_event(request)))
}
async fn cancel_event(
&self,
request: Request<CancelEventRequest>,
) -> Result<Response<CancelEventReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.scheduler().cancel_event(request)))
}
async fn process_event(
&self,
request: Request<ProcessEventRequest>,
) -> Result<Response<ProcessEventReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.controller().process_event(request)))
}
async fn process_query(
&self,
request: Request<ProcessQueryRequest>,
) -> Result<Response<ProcessQueryReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.controller().process_query(request)))
}
async fn read_events(
&self,
request: Request<ReadEventsRequest>,
) -> Result<Response<ReadEventsReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.monitor().read_events(request)))
}
async fn open_sink(
&self,
request: Request<OpenSinkRequest>,
) -> Result<Response<OpenSinkReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.monitor().open_sink(request)))
}
async fn close_sink(
&self,
request: Request<CloseSinkRequest>,
) -> Result<Response<CloseSinkReply>, Status> {
let request = request.into_inner();
Ok(Response::new(self.monitor().close_sink(request)))
}
}

View File

@ -0,0 +1,139 @@
mod controller_service;
mod init_service;
mod monitor_service;
mod scheduler_service;
use std::time::Duration;
use prost_types::Timestamp;
use tai_time::MonotonicTime;
use super::codegen::simulation::{Error, ErrorCode};
use crate::simulation::{ExecutionError, SchedulingError, SimulationError};
pub(crate) use controller_service::ControllerService;
pub(crate) use init_service::InitService;
pub(crate) use monitor_service::MonitorService;
pub(crate) use scheduler_service::SchedulerService;
/// Transforms an error code and a message into a Protobuf error.
fn to_error(code: ErrorCode, message: impl Into<String>) -> Error {
Error {
code: code as i32,
message: message.into(),
}
}
/// An error returned when a simulation was not started.
fn simulation_not_started_error() -> Error {
to_error(
ErrorCode::SimulationNotStarted,
"the simulation was not started",
)
}
/// Map an `ExecutionError` to a Protobuf error.
fn map_execution_error(error: ExecutionError) -> Error {
let error_code = match error {
ExecutionError::Deadlock(_) => ErrorCode::SimulationDeadlock,
ExecutionError::MessageLoss(_) => ErrorCode::SimulationMessageLoss,
ExecutionError::NoRecipient { .. } => ErrorCode::SimulationNoRecipient,
ExecutionError::Panic { .. } => ErrorCode::SimulationPanic,
ExecutionError::Timeout => ErrorCode::SimulationTimeout,
ExecutionError::OutOfSync(_) => ErrorCode::SimulationOutOfSync,
ExecutionError::BadQuery => ErrorCode::SimulationBadQuery,
ExecutionError::Halted => ErrorCode::SimulationHalted,
ExecutionError::Terminated => ErrorCode::SimulationTerminated,
ExecutionError::InvalidDeadline(_) => ErrorCode::InvalidDeadline,
};
let error_message = error.to_string();
to_error(error_code, error_message)
}
/// Map a `SchedulingError` to a Protobuf error.
fn map_scheduling_error(error: SchedulingError) -> Error {
let error_code = match error {
SchedulingError::InvalidScheduledTime => ErrorCode::InvalidDeadline,
SchedulingError::NullRepetitionPeriod => ErrorCode::InvalidPeriod,
};
let error_message = error.to_string();
to_error(error_code, error_message)
}
/// Map a `SimulationError` to a Protobuf error.
fn map_simulation_error(error: SimulationError) -> Error {
match error {
SimulationError::ExecutionError(e) => map_execution_error(e),
SimulationError::SchedulingError(e) => map_scheduling_error(e),
}
}
/// Attempts a cast from a `MonotonicTime` to a protobuf `Timestamp`.
///
/// This will fail if the time is outside the protobuf-specified range for
/// timestamps (0001-01-01 00:00:00 to 9999-12-31 23:59:59).
pub(crate) fn monotonic_to_timestamp(monotonic_time: MonotonicTime) -> Option<Timestamp> {
// Unix timestamp for 0001-01-01 00:00:00, the minimum accepted by
// protobuf's specification for the `Timestamp` type.
const MIN_SECS: i64 = -62135596800;
// Unix timestamp for 9999-12-31 23:59:59, the maximum accepted by
// protobuf's specification for the `Timestamp` type.
const MAX_SECS: i64 = 253402300799;
let secs = monotonic_time.as_secs();
if !(MIN_SECS..=MAX_SECS).contains(&secs) {
return None;
}
Some(Timestamp {
seconds: secs,
nanos: monotonic_time.subsec_nanos() as i32,
})
}
/// Attempts a cast from a protobuf `Timestamp` to a `MonotonicTime`.
///
/// This should never fail provided that the `Timestamp` complies with the
/// protobuf specification. It can only fail if the nanosecond part is negative
/// or greater than 999'999'999.
pub(crate) fn timestamp_to_monotonic(timestamp: Timestamp) -> Option<MonotonicTime> {
let nanos: u32 = timestamp.nanos.try_into().ok()?;
MonotonicTime::new(timestamp.seconds, nanos)
}
/// Attempts a cast from a protobuf `Duration` to a `std::time::Duration`.
///
/// If the `Duration` complies with the protobuf specification, this can only
/// fail if the duration is negative.
pub(crate) fn to_positive_duration(duration: prost_types::Duration) -> Option<Duration> {
if duration.seconds < 0 || duration.nanos < 0 {
return None;
}
Some(Duration::new(
duration.seconds as u64,
duration.nanos as u32,
))
}
/// Attempts a cast from a protobuf `Duration` to a strictly positive
/// `std::time::Duration`.
///
/// If the `Duration` complies with the protobuf specification, this can only
/// fail if the duration is negative or null.
pub(crate) fn to_strictly_positive_duration(duration: prost_types::Duration) -> Option<Duration> {
if duration.seconds < 0 || duration.nanos < 0 || (duration.seconds == 0 && duration.nanos == 0)
{
return None;
}
Some(Duration::new(
duration.seconds as u64,
duration.nanos as u32,
))
}

View File

@ -0,0 +1,231 @@
use std::fmt;
use std::sync::Arc;
use prost_types::Timestamp;
use crate::registry::{EventSourceRegistry, QuerySourceRegistry};
use crate::simulation::Simulation;
use super::super::codegen::simulation::*;
use super::{
map_execution_error, monotonic_to_timestamp, simulation_not_started_error,
timestamp_to_monotonic, to_error, to_positive_duration,
};
/// Protobuf-based simulation controller.
///
/// A `ControllerService` controls the execution of the simulation. Note that
/// all its methods block until execution completes.
#[allow(clippy::large_enum_variant)]
pub(crate) enum ControllerService {
NotStarted,
Started {
simulation: Simulation,
event_source_registry: Arc<EventSourceRegistry>,
query_source_registry: QuerySourceRegistry,
},
}
impl ControllerService {
/// Advances simulation time to that of the next scheduled event, processing
/// that event as well as all other events scheduled for the same time.
///
/// Processing is gated by a (possibly blocking) call to
/// [`Clock::synchronize`](crate::time::Clock::synchronize) on the
/// configured simulation clock. This method blocks until all newly
/// processed events have completed.
pub(crate) fn step(&mut self, _request: StepRequest) -> StepReply {
let reply = match self {
Self::Started { simulation, .. } => match simulation.step() {
Ok(()) => {
if let Some(timestamp) = monotonic_to_timestamp(simulation.time()) {
step_reply::Result::Time(timestamp)
} else {
step_reply::Result::Error(to_error(
ErrorCode::SimulationTimeOutOfRange,
"the final simulation time is out of range",
))
}
}
Err(e) => step_reply::Result::Error(map_execution_error(e)),
},
Self::NotStarted => step_reply::Result::Error(simulation_not_started_error()),
};
StepReply {
result: Some(reply),
}
}
/// Iteratively advances the simulation time until the specified deadline,
/// as if by calling
/// [`Simulation::step`](crate::simulation::Simulation::step) repeatedly.
///
/// This method blocks until all events scheduled up to the specified target
/// time have completed. The simulation time upon completion is equal to the
/// specified target time, whether or not an event was scheduled for that
/// time.
pub(crate) fn step_until(&mut self, request: StepUntilRequest) -> StepUntilReply {
let reply = match self {
Self::Started { simulation, .. } => move || -> Result<Timestamp, Error> {
let deadline = request.deadline.ok_or(to_error(
ErrorCode::MissingArgument,
"missing deadline argument",
))?;
match deadline {
step_until_request::Deadline::Time(time) => {
let time = timestamp_to_monotonic(time).ok_or(to_error(
ErrorCode::InvalidTime,
"out-of-range nanosecond field",
))?;
simulation.step_until(time).map_err(|_| {
to_error(
ErrorCode::InvalidDeadline,
"the specified deadline lies in the past",
)
})?;
}
step_until_request::Deadline::Duration(duration) => {
let duration = to_positive_duration(duration).ok_or(to_error(
ErrorCode::InvalidDeadline,
"the specified deadline lies in the past",
))?;
simulation
.step_until(duration)
.map_err(map_execution_error)?;
}
};
let timestamp = monotonic_to_timestamp(simulation.time()).ok_or(to_error(
ErrorCode::SimulationTimeOutOfRange,
"the final simulation time is out of range",
))?;
Ok(timestamp)
}(),
Self::NotStarted => Err(simulation_not_started_error()),
};
StepUntilReply {
result: Some(match reply {
Ok(timestamp) => step_until_reply::Result::Time(timestamp),
Err(error) => step_until_reply::Result::Error(error),
}),
}
}
/// Broadcasts an event from an event source immediately, blocking until
/// completion.
///
/// Simulation time remains unchanged.
pub(crate) fn process_event(&mut self, request: ProcessEventRequest) -> ProcessEventReply {
let reply = match self {
Self::Started {
simulation,
event_source_registry,
..
} => move || -> Result<(), Error> {
let source_name = &request.source_name;
let event = &request.event;
let source = event_source_registry.get(source_name).ok_or(to_error(
ErrorCode::SourceNotFound,
"no source is registered with the name '{}'".to_string(),
))?;
let event = source.event(event).map_err(|e| {
to_error(
ErrorCode::InvalidMessage,
format!(
"the event could not be deserialized as type '{}': {}",
source.event_type_name(),
e
),
)
})?;
simulation.process(event).map_err(map_execution_error)
}(),
Self::NotStarted => Err(simulation_not_started_error()),
};
ProcessEventReply {
result: Some(match reply {
Ok(()) => process_event_reply::Result::Empty(()),
Err(error) => process_event_reply::Result::Error(error),
}),
}
}
/// Broadcasts a query from a query source immediately, blocking until
/// completion.
///
/// Simulation time remains unchanged.
pub(crate) fn process_query(&mut self, request: ProcessQueryRequest) -> ProcessQueryReply {
let reply = match self {
Self::Started {
simulation,
query_source_registry,
..
} => move || -> Result<Vec<Vec<u8>>, Error> {
let source_name = &request.source_name;
let request = &request.request;
let source = query_source_registry.get(source_name).ok_or(to_error(
ErrorCode::SourceNotFound,
"no source is registered with the name '{}'".to_string(),
))?;
let (query, mut promise) = source.query(request).map_err(|e| {
to_error(
ErrorCode::InvalidMessage,
format!(
"the request could not be deserialized as type '{}': {}",
source.request_type_name(),
e
),
)
})?;
simulation.process(query).map_err(map_execution_error)?;
let replies = promise.take_collect().ok_or(to_error(
ErrorCode::SimulationBadQuery,
"a reply to the query was expected but none was available; maybe the target model was not added to the simulation?".to_string(),
))?;
replies.map_err(|e| {
to_error(
ErrorCode::InvalidMessage,
format!(
"the reply could not be serialized as type '{}': {}",
source.reply_type_name(),
e
),
)
})
}(),
Self::NotStarted => Err(simulation_not_started_error()),
};
match reply {
Ok(replies) => ProcessQueryReply {
replies,
result: Some(process_query_reply::Result::Empty(())),
},
Err(error) => ProcessQueryReply {
replies: Vec::new(),
result: Some(process_query_reply::Result::Error(error)),
},
}
}
}
impl fmt::Debug for ControllerService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ControllerService").finish_non_exhaustive()
}
}

View File

@ -0,0 +1,106 @@
use std::panic::{self, AssertUnwindSafe};
use ciborium;
use serde::de::DeserializeOwned;
use crate::registry::EndpointRegistry;
use crate::simulation::{Scheduler, Simulation, SimulationError};
use super::{map_simulation_error, to_error};
use super::super::codegen::simulation::*;
type InitResult = Result<(Simulation, EndpointRegistry), SimulationError>;
type DeserializationError = ciborium::de::Error<std::io::Error>;
type SimGen = Box<dyn FnMut(&[u8]) -> Result<InitResult, DeserializationError> + Send + 'static>;
/// Protobuf-based simulation initializer.
///
/// An `InitService` creates a new simulation bench based on a serialized
/// initialization configuration.
pub(crate) struct InitService {
sim_gen: SimGen,
}
impl InitService {
/// Creates a new `InitService`.
///
/// The argument is a closure that takes a CBOR-serialized initialization
/// configuration and is called every time the simulation is (re)started by
/// the remote client. It must create a new simulation complemented by a
/// registry that exposes the public event and query interface.
pub(crate) fn new<F, I>(mut sim_gen: F) -> Self
where
F: FnMut(I) -> Result<(Simulation, EndpointRegistry), SimulationError> + Send + 'static,
I: DeserializeOwned,
{
// Wrap `sim_gen` so it accepts a serialized init configuration.
let sim_gen = move |serialized_cfg: &[u8]| -> Result<InitResult, DeserializationError> {
let cfg = ciborium::from_reader(serialized_cfg)?;
Ok(sim_gen(cfg))
};
Self {
sim_gen: Box::new(sim_gen),
}
}
/// Initializes the simulation based on the specified configuration.
pub(crate) fn init(
&mut self,
request: InitRequest,
) -> (InitReply, Option<(Simulation, Scheduler, EndpointRegistry)>) {
let reply = panic::catch_unwind(AssertUnwindSafe(|| (self.sim_gen)(&request.cfg)))
.map_err(|payload| {
let panic_msg: Option<&str> = if let Some(s) = payload.downcast_ref::<&str>() {
Some(s)
} else if let Some(s) = payload.downcast_ref::<String>() {
Some(s)
} else {
None
};
let error_msg = if let Some(panic_msg) = panic_msg {
format!(
"the simulation initializer has panicked with the message `{}`",
panic_msg
)
} else {
String::from("the simulation initializer has panicked")
};
to_error(ErrorCode::InitializerPanic, error_msg)
})
.and_then(|res| {
res.map_err(|e| {
to_error(
ErrorCode::InvalidMessage,
format!(
"the initializer configuration could not be deserialized: {}",
e
),
)
})
.and_then(|init_result| init_result.map_err(map_simulation_error))
});
let (reply, bench) = match reply {
Ok((simulation, registry)) => {
let scheduler = simulation.scheduler();
(
init_reply::Result::Empty(()),
Some((simulation, scheduler, registry)),
)
}
Err(e) => (init_reply::Result::Error(e), None),
};
(
InitReply {
result: Some(reply),
},
bench,
)
}
}

View File

@ -0,0 +1,118 @@
use std::fmt;
use crate::registry::EventSinkRegistry;
use super::super::codegen::simulation::*;
use super::{simulation_not_started_error, to_error};
/// Protobuf-based simulation monitor.
///
/// A `MonitorService` enables the monitoring of the event sinks of a
/// [`Simulation`](crate::simulation::Simulation).
pub(crate) enum MonitorService {
Started {
event_sink_registry: EventSinkRegistry,
},
NotStarted,
}
impl MonitorService {
/// Read all events from an event sink.
pub(crate) fn read_events(&mut self, request: ReadEventsRequest) -> ReadEventsReply {
let reply = match self {
Self::Started {
event_sink_registry,
} => move || -> Result<Vec<Vec<u8>>, Error> {
let sink_name = &request.sink_name;
let sink = event_sink_registry.get_mut(sink_name).ok_or(to_error(
ErrorCode::SinkNotFound,
format!("no sink is registered with the name '{}'", sink_name),
))?;
sink.collect().map_err(|e| {
to_error(
ErrorCode::InvalidMessage,
format!(
"the event could not be serialized from type '{}': {}",
sink.event_type_name(),
e
),
)
})
}(),
Self::NotStarted => Err(simulation_not_started_error()),
};
match reply {
Ok(events) => ReadEventsReply {
events,
result: Some(read_events_reply::Result::Empty(())),
},
Err(error) => ReadEventsReply {
events: Vec::new(),
result: Some(read_events_reply::Result::Error(error)),
},
}
}
/// Opens an event sink.
pub(crate) fn open_sink(&mut self, request: OpenSinkRequest) -> OpenSinkReply {
let reply = match self {
Self::Started {
event_sink_registry,
} => {
let sink_name = &request.sink_name;
if let Some(sink) = event_sink_registry.get_mut(sink_name) {
sink.open();
open_sink_reply::Result::Empty(())
} else {
open_sink_reply::Result::Error(to_error(
ErrorCode::SinkNotFound,
format!("no sink is registered with the name '{}'", sink_name),
))
}
}
Self::NotStarted => open_sink_reply::Result::Error(simulation_not_started_error()),
};
OpenSinkReply {
result: Some(reply),
}
}
/// Closes an event sink.
pub(crate) fn close_sink(&mut self, request: CloseSinkRequest) -> CloseSinkReply {
let reply = match self {
Self::Started {
event_sink_registry,
} => {
let sink_name = &request.sink_name;
if let Some(sink) = event_sink_registry.get_mut(sink_name) {
sink.close();
close_sink_reply::Result::Empty(())
} else {
close_sink_reply::Result::Error(to_error(
ErrorCode::SinkNotFound,
format!("no sink is registered with the name '{}'", sink_name),
))
}
}
Self::NotStarted => close_sink_reply::Result::Error(simulation_not_started_error()),
};
CloseSinkReply {
result: Some(reply),
}
}
}
impl fmt::Debug for MonitorService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SimulationService").finish_non_exhaustive()
}
}

View File

@ -0,0 +1,216 @@
use std::fmt;
use std::sync::Arc;
use crate::registry::EventSourceRegistry;
use crate::server::key_registry::{KeyRegistry, KeyRegistryId};
use crate::simulation::Scheduler;
use super::super::codegen::simulation::*;
use super::{
map_scheduling_error, monotonic_to_timestamp, simulation_not_started_error,
timestamp_to_monotonic, to_error, to_strictly_positive_duration,
};
/// Protobuf-based simulation scheduler.
///
/// A `SchedulerService` enables the scheduling of simulation events.
#[allow(clippy::large_enum_variant)]
pub(crate) enum SchedulerService {
NotStarted,
Started {
scheduler: Scheduler,
event_source_registry: Arc<EventSourceRegistry>,
key_registry: KeyRegistry,
},
}
impl SchedulerService {
/// Returns the current simulation time.
pub(crate) fn time(&mut self, _request: TimeRequest) -> TimeReply {
let reply = match self {
Self::Started { scheduler, .. } => {
if let Some(timestamp) = monotonic_to_timestamp(scheduler.time()) {
time_reply::Result::Time(timestamp)
} else {
time_reply::Result::Error(to_error(
ErrorCode::SimulationTimeOutOfRange,
"the final simulation time is out of range",
))
}
}
Self::NotStarted => time_reply::Result::Error(simulation_not_started_error()),
};
TimeReply {
result: Some(reply),
}
}
/// Schedules an event at a future time.
pub(crate) fn schedule_event(&mut self, request: ScheduleEventRequest) -> ScheduleEventReply {
let reply = match self {
Self::Started {
scheduler,
event_source_registry,
key_registry,
} => move || -> Result<Option<KeyRegistryId>, Error> {
let source_name = &request.source_name;
let event = &request.event;
let with_key = request.with_key;
let period = request
.period
.map(|period| {
to_strictly_positive_duration(period).ok_or(to_error(
ErrorCode::InvalidPeriod,
"the specified event period is not strictly positive",
))
})
.transpose()?;
let source = event_source_registry.get(source_name).ok_or(to_error(
ErrorCode::SourceNotFound,
"no event source is registered with the name '{}'".to_string(),
))?;
let (action, action_key) = match (with_key, period) {
(false, None) => source.event(event).map(|action| (action, None)),
(false, Some(period)) => source
.periodic_event(period, event)
.map(|action| (action, None)),
(true, None) => source
.keyed_event(event)
.map(|(action, key)| (action, Some(key))),
(true, Some(period)) => source
.keyed_periodic_event(period, event)
.map(|(action, key)| (action, Some(key))),
}
.map_err(|e| {
to_error(
ErrorCode::InvalidMessage,
format!(
"the event could not be deserialized as type '{}': {}",
source.event_type_name(),
e
),
)
})?;
let deadline = request.deadline.ok_or(to_error(
ErrorCode::MissingArgument,
"missing deadline argument",
))?;
let deadline = match deadline {
schedule_event_request::Deadline::Time(time) => timestamp_to_monotonic(time)
.ok_or(to_error(
ErrorCode::InvalidTime,
"out-of-range nanosecond field",
))?,
schedule_event_request::Deadline::Duration(duration) => {
let duration = to_strictly_positive_duration(duration).ok_or(to_error(
ErrorCode::InvalidDeadline,
"the specified scheduling deadline is not in the future",
))?;
scheduler.time() + duration
}
};
let key_id = action_key.map(|action_key| {
key_registry.remove_expired_keys(scheduler.time());
if period.is_some() {
key_registry.insert_eternal_key(action_key)
} else {
key_registry.insert_key(action_key, deadline)
}
});
scheduler
.schedule(deadline, action)
.map_err(map_scheduling_error)?;
Ok(key_id)
}(),
Self::NotStarted => Err(simulation_not_started_error()),
};
ScheduleEventReply {
result: Some(match reply {
Ok(Some(key_id)) => {
let (subkey1, subkey2) = key_id.into_raw_parts();
schedule_event_reply::Result::Key(EventKey {
subkey1: subkey1
.try_into()
.expect("action key index is too large to be serialized"),
subkey2,
})
}
Ok(None) => schedule_event_reply::Result::Empty(()),
Err(error) => schedule_event_reply::Result::Error(error),
}),
}
}
/// Cancels a keyed event.
pub(crate) fn cancel_event(&mut self, request: CancelEventRequest) -> CancelEventReply {
let reply = match self {
Self::Started {
scheduler,
key_registry,
..
} => move || -> Result<(), Error> {
let key = request
.key
.ok_or(to_error(ErrorCode::MissingArgument, "missing key argument"))?;
let subkey1: usize = key
.subkey1
.try_into()
.map_err(|_| to_error(ErrorCode::InvalidKey, "invalid event key"))?;
let subkey2 = key.subkey2;
let key_id = KeyRegistryId::from_raw_parts(subkey1, subkey2);
key_registry.remove_expired_keys(scheduler.time());
let key = key_registry.extract_key(key_id).ok_or(to_error(
ErrorCode::InvalidKey,
"invalid or expired event key",
))?;
key.cancel();
Ok(())
}(),
Self::NotStarted => Err(simulation_not_started_error()),
};
CancelEventReply {
result: Some(match reply {
Ok(()) => cancel_event_reply::Result::Empty(()),
Err(error) => cancel_event_reply::Result::Error(error),
}),
}
}
/// Requests the simulation to stop when advancing to the next step.
pub(crate) fn halt(&mut self, _request: HaltRequest) -> HaltReply {
let reply = match self {
Self::Started { scheduler, .. } => {
scheduler.halt();
halt_reply::Result::Empty(())
}
Self::NotStarted => halt_reply::Result::Error(simulation_not_started_error()),
};
HaltReply {
result: Some(reply),
}
}
}
impl fmt::Debug for SchedulerService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SchedulerService").finish_non_exhaustive()
}
}

1054
nexosim/src/simulation.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,7 @@ use crate::model::Model;
/// A mailbox is an entity associated to a model instance that collects all
/// messages sent to that model. The size of its internal buffer can be
/// optionally specified at construction time using
/// [`with_capacity()`](Mailbox::with_capacity).
/// [`with_capacity`](Mailbox::with_capacity).
pub struct Mailbox<M: Model>(pub(crate) Receiver<M>);
impl<M: Model> Mailbox<M> {
@ -58,7 +58,7 @@ impl<M: Model> fmt::Debug for Mailbox<M> {
/// For the sake of convenience, methods that require an address by value will
/// typically also accept an `&Address` or an `&Mailbox` since these references
/// implement the `Into<Address>` trait, automatically invoking
/// `Address::clone()` or `Mailbox::address()` as appropriate.
/// `Address::clone` or `Mailbox::address` as appropriate.
pub struct Address<M: Model>(pub(crate) Sender<M>);
impl<M: Model> Clone for Address<M> {
@ -80,8 +80,7 @@ impl<M: Model> From<&Address<M>> for Address<M> {
impl<M: Model> From<&Mailbox<M>> for Address<M> {
/// Converts a [Mailbox] reference into an [`Address`].
///
/// This calls [`Mailbox::address()`] on the mailbox and returns the
/// address.
/// This calls [`Mailbox::address`] on the mailbox and returns the address.
#[inline]
fn from(s: &Mailbox<M>) -> Address<M> {
s.address()

View File

@ -0,0 +1,855 @@
//! Scheduling functions and types.
use std::error::Error;
use std::future::Future;
use std::hash::{Hash, Hasher};
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::{Arc, Mutex};
use std::task::{Context, Poll};
use std::time::Duration;
use std::{fmt, ptr};
use pin_project::pin_project;
use recycle_box::{coerce_box, RecycleBox};
use crate::channel::Sender;
use crate::executor::Executor;
use crate::model::Model;
use crate::ports::InputFn;
use crate::simulation::Address;
use crate::time::{AtomicTimeReader, Deadline, MonotonicTime};
use crate::util::priority_queue::PriorityQueue;
#[cfg(all(test, not(nexosim_loom)))]
use crate::{time::TearableAtomicTime, util::sync_cell::SyncCell};
const GLOBAL_SCHEDULER_ORIGIN_ID: usize = 0;
/// A global simulation scheduler.
///
/// A `Scheduler` can be `Clone`d and sent to other threads.
#[derive(Clone)]
pub struct Scheduler(GlobalScheduler);
impl Scheduler {
pub(crate) fn new(
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: AtomicTimeReader,
is_halted: Arc<AtomicBool>,
) -> Self {
Self(GlobalScheduler::new(scheduler_queue, time, is_halted))
}
/// Returns the current simulation time.
///
/// # Examples
///
/// ```
/// use nexosim::simulation::Scheduler;
/// use nexosim::time::MonotonicTime;
///
/// fn is_third_millenium(scheduler: &Scheduler) -> bool {
/// let time = scheduler.time();
/// time >= MonotonicTime::new(978307200, 0).unwrap()
/// && time < MonotonicTime::new(32535216000, 0).unwrap()
/// }
/// ```
pub fn time(&self) -> MonotonicTime {
self.0.time()
}
/// Schedules an action at a future time.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time.
///
/// If multiple actions send events at the same simulation time to the same
/// model, these events are guaranteed to be processed according to the
/// scheduling order of the actions.
pub fn schedule(&self, deadline: impl Deadline, action: Action) -> Result<(), SchedulingError> {
self.0
.schedule_from(deadline, action, GLOBAL_SCHEDULER_ORIGIN_ID)
}
/// Schedules an event at a future time.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time.
///
/// Events scheduled for the same time and targeting the same model are
/// guaranteed to be processed according to the scheduling order.
pub fn schedule_event<M, F, T, S>(
&self,
deadline: impl Deadline,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<(), SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
self.0
.schedule_event_from(deadline, func, arg, address, GLOBAL_SCHEDULER_ORIGIN_ID)
}
/// Schedules a cancellable event at a future time and returns an event key.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time.
///
/// Events scheduled for the same time and targeting the same model are
/// guaranteed to be processed according to the scheduling order.
pub fn schedule_keyed_event<M, F, T, S>(
&self,
deadline: impl Deadline,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<ActionKey, SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
self.0
.schedule_keyed_event_from(deadline, func, arg, address, GLOBAL_SCHEDULER_ORIGIN_ID)
}
/// Schedules a periodically recurring event at a future time.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time or if the specified period is null.
///
/// Events scheduled for the same time and targeting the same model are
/// guaranteed to be processed according to the scheduling order.
pub fn schedule_periodic_event<M, F, T, S>(
&self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<(), SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
self.0.schedule_periodic_event_from(
deadline,
period,
func,
arg,
address,
GLOBAL_SCHEDULER_ORIGIN_ID,
)
}
/// Schedules a cancellable, periodically recurring event at a future time
/// and returns an event key.
///
/// An error is returned if the specified time is not in the future of the
/// current simulation time or if the specified period is null.
///
/// Events scheduled for the same time and targeting the same model are
/// guaranteed to be processed according to the scheduling order.
pub fn schedule_keyed_periodic_event<M, F, T, S>(
&self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
address: impl Into<Address<M>>,
) -> Result<ActionKey, SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
self.0.schedule_keyed_periodic_event_from(
deadline,
period,
func,
arg,
address,
GLOBAL_SCHEDULER_ORIGIN_ID,
)
}
/// Requests the simulation to stop when advancing to the next step.
pub fn halt(&mut self) {
self.0.halt()
}
}
impl fmt::Debug for Scheduler {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Scheduler")
.field("time", &self.time())
.finish_non_exhaustive()
}
}
/// Managed handle to a scheduled action.
///
/// An `AutoActionKey` is a managed handle to a scheduled action that cancels
/// its associated action on drop.
#[derive(Debug)]
#[must_use = "dropping this key immediately cancels the associated action"]
pub struct AutoActionKey {
is_cancelled: Arc<AtomicBool>,
}
impl Drop for AutoActionKey {
fn drop(&mut self) {
self.is_cancelled.store(true, Ordering::Relaxed);
}
}
/// Handle to a scheduled action.
///
/// An `ActionKey` can be used to cancel a scheduled action.
#[derive(Clone, Debug)]
#[must_use = "prefer unkeyed scheduling methods if the action is never cancelled"]
pub struct ActionKey {
is_cancelled: Arc<AtomicBool>,
}
impl ActionKey {
/// Creates a key for a pending action.
pub(crate) fn new() -> Self {
Self {
is_cancelled: Arc::new(AtomicBool::new(false)),
}
}
/// Checks whether the action was cancelled.
pub(crate) fn is_cancelled(&self) -> bool {
self.is_cancelled.load(Ordering::Relaxed)
}
/// Cancels the associated action.
pub fn cancel(self) {
self.is_cancelled.store(true, Ordering::Relaxed);
}
/// Converts action key to a managed key.
pub fn into_auto(self) -> AutoActionKey {
AutoActionKey {
is_cancelled: self.is_cancelled,
}
}
}
impl PartialEq for ActionKey {
/// Implements equality by considering clones to be equivalent, rather than
/// keys with the same `is_cancelled` value.
fn eq(&self, other: &Self) -> bool {
ptr::eq(&*self.is_cancelled, &*other.is_cancelled)
}
}
impl Eq for ActionKey {}
impl Hash for ActionKey {
/// Implements `Hash`` by considering clones to be equivalent, rather than
/// keys with the same `is_cancelled` value.
fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
ptr::hash(&*self.is_cancelled, state)
}
}
/// Error returned when the scheduled time or the repetition period are invalid.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum SchedulingError {
/// The scheduled time does not lie in the future of the current simulation
/// time.
InvalidScheduledTime,
/// The repetition period is zero.
NullRepetitionPeriod,
}
impl fmt::Display for SchedulingError {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidScheduledTime => write!(
fmt,
"the scheduled time should be in the future of the current simulation time"
),
Self::NullRepetitionPeriod => write!(fmt, "the repetition period cannot be zero"),
}
}
}
impl Error for SchedulingError {}
/// A possibly periodic, possibly cancellable action that can be scheduled or
/// processed immediately.
///
/// `Actions` can be created from an [`EventSource`](crate::ports::EventSource)
/// or [`QuerySource`](crate::ports::QuerySource). They can be used to schedule
/// events and requests with [`Scheduler::schedule`], or to process events and
/// requests immediately with
/// [`Simulation::process`](crate::simulation::Simulation::process).
pub struct Action {
inner: Box<dyn ActionInner>,
}
impl Action {
/// Creates a new `Action` from an `ActionInner`.
pub(crate) fn new<S: ActionInner>(s: S) -> Self {
Self { inner: Box::new(s) }
}
/// Reports whether the action was cancelled.
pub(crate) fn is_cancelled(&self) -> bool {
self.inner.is_cancelled()
}
/// If this is a periodic action, returns a boxed clone of this action and
/// its repetition period; otherwise returns `None`.
pub(crate) fn next(&self) -> Option<(Action, Duration)> {
self.inner
.next()
.map(|(inner, period)| (Self { inner }, period))
}
/// Returns a boxed future that performs the action.
pub(crate) fn into_future(self) -> Pin<Box<dyn Future<Output = ()> + Send>> {
self.inner.into_future()
}
/// Spawns the future that performs the action onto the provided executor.
///
/// This method is typically more efficient that spawning the boxed future
/// from `into_future` since it can directly spawn the unboxed future.
pub(crate) fn spawn_and_forget(self, executor: &Executor) {
self.inner.spawn_and_forget(executor)
}
}
impl fmt::Debug for Action {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("SchedulableEvent").finish_non_exhaustive()
}
}
/// Alias for the scheduler queue type.
///
/// Why use both time and origin ID as the key? The short answer is that this
/// allows to preserve the relative ordering of events which have the same
/// origin (where the origin is either a model instance or the global
/// scheduler). The preservation of this ordering is implemented by the event
/// loop, which aggregate events with the same origin into single sequential
/// futures, thus ensuring that they are not executed concurrently.
pub(crate) type SchedulerQueue = PriorityQueue<(MonotonicTime, usize), Action>;
/// Internal implementation of the global scheduler.
#[derive(Clone)]
pub(crate) struct GlobalScheduler {
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: AtomicTimeReader,
is_halted: Arc<AtomicBool>,
}
impl GlobalScheduler {
pub(crate) fn new(
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: AtomicTimeReader,
is_halted: Arc<AtomicBool>,
) -> Self {
Self {
scheduler_queue,
time,
is_halted,
}
}
/// Returns the current simulation time.
pub(crate) fn time(&self) -> MonotonicTime {
// We use `read` rather than `try_read` because the scheduler can be
// sent to another thread than the simulator's and could thus
// potentially see a torn read if the simulator increments time
// concurrently. The chances of this happening are very small since
// simulation time is not changed frequently.
self.time.read()
}
/// Schedules an action identified by its origin at a future time.
pub(crate) fn schedule_from(
&self,
deadline: impl Deadline,
action: Action,
origin_id: usize,
) -> Result<(), SchedulingError> {
// The scheduler queue must always be locked when reading the time,
// otherwise the following race could occur:
// 1) this method reads the time and concludes that it is not too late
// to schedule the action,
// 2) the `Simulation` object takes the lock, increments simulation time
// and runs the simulation step,
// 3) this method takes the lock and schedules the now-outdated action.
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
scheduler_queue.insert((time, origin_id), action);
Ok(())
}
/// Schedules an event identified by its origin at a future time.
pub(crate) fn schedule_event_from<M, F, T, S>(
&self,
deadline: impl Deadline,
func: F,
arg: T,
address: impl Into<Address<M>>,
origin_id: usize,
) -> Result<(), SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let sender = address.into().0;
let action = Action::new(OnceAction::new(process_event(func, arg, sender)));
// The scheduler queue must always be locked when reading the time (see
// `schedule_from`).
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
scheduler_queue.insert((time, origin_id), action);
Ok(())
}
/// Schedules a cancellable event identified by its origin at a future time
/// and returns an event key.
pub(crate) fn schedule_keyed_event_from<M, F, T, S>(
&self,
deadline: impl Deadline,
func: F,
arg: T,
address: impl Into<Address<M>>,
origin_id: usize,
) -> Result<ActionKey, SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
S: Send + 'static,
{
let event_key = ActionKey::new();
let sender = address.into().0;
let action = Action::new(KeyedOnceAction::new(
|ek| send_keyed_event(ek, func, arg, sender),
event_key.clone(),
));
// The scheduler queue must always be locked when reading the time (see
// `schedule_from`).
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
scheduler_queue.insert((time, origin_id), action);
Ok(event_key)
}
/// Schedules a periodically recurring event identified by its origin at a
/// future time.
pub(crate) fn schedule_periodic_event_from<M, F, T, S>(
&self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
address: impl Into<Address<M>>,
origin_id: usize,
) -> Result<(), SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
if period.is_zero() {
return Err(SchedulingError::NullRepetitionPeriod);
}
let sender = address.into().0;
let action = Action::new(PeriodicAction::new(
|| process_event(func, arg, sender),
period,
));
// The scheduler queue must always be locked when reading the time (see
// `schedule_from`).
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
scheduler_queue.insert((time, origin_id), action);
Ok(())
}
/// Schedules a cancellable, periodically recurring event identified by its
/// origin at a future time and returns an event key.
pub(crate) fn schedule_keyed_periodic_event_from<M, F, T, S>(
&self,
deadline: impl Deadline,
period: Duration,
func: F,
arg: T,
address: impl Into<Address<M>>,
origin_id: usize,
) -> Result<ActionKey, SchedulingError>
where
M: Model,
F: for<'a> InputFn<'a, M, T, S> + Clone,
T: Send + Clone + 'static,
S: Send + 'static,
{
if period.is_zero() {
return Err(SchedulingError::NullRepetitionPeriod);
}
let event_key = ActionKey::new();
let sender = address.into().0;
let action = Action::new(KeyedPeriodicAction::new(
|ek| send_keyed_event(ek, func, arg, sender),
period,
event_key.clone(),
));
// The scheduler queue must always be locked when reading the time (see
// `schedule_from`).
let mut scheduler_queue = self.scheduler_queue.lock().unwrap();
let now = self.time();
let time = deadline.into_time(now);
if now >= time {
return Err(SchedulingError::InvalidScheduledTime);
}
scheduler_queue.insert((time, origin_id), action);
Ok(event_key)
}
/// Requests the simulation to stop when advancing to the next step.
pub(crate) fn halt(&mut self) {
self.is_halted.store(true, Ordering::Relaxed);
}
}
impl fmt::Debug for GlobalScheduler {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SchedulerInner")
.field("time", &self.time())
.finish_non_exhaustive()
}
}
/// Trait abstracting over the inner type of an action.
pub(crate) trait ActionInner: Send + 'static {
/// Reports whether the action was cancelled.
fn is_cancelled(&self) -> bool;
/// If this is a periodic action, returns a boxed clone of this action and
/// its repetition period; otherwise returns `None`.
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)>;
/// Returns a boxed future that performs the action.
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>>;
/// Spawns the future that performs the action onto the provided executor.
///
/// This method is typically more efficient that spawning the boxed future
/// from `into_future` since it can directly spawn the unboxed future.
fn spawn_and_forget(self: Box<Self>, executor: &Executor);
}
/// An object that can be converted to a future performing a single
/// non-cancellable action.
///
/// Note that this particular action is in fact already a future: since the
/// future cannot be cancelled and the action does not need to be cloned,
/// there is no need to defer the construction of the future. This makes
/// `into_future` a trivial cast, which saves a boxing operation.
#[pin_project]
pub(crate) struct OnceAction<F> {
#[pin]
fut: F,
}
impl<F> OnceAction<F>
where
F: Future<Output = ()> + Send + 'static,
{
/// Constructs a new `OnceAction`.
pub(crate) fn new(fut: F) -> Self {
OnceAction { fut }
}
}
impl<F> Future for OnceAction<F>
where
F: Future,
{
type Output = F::Output;
#[inline(always)]
fn poll(self: std::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().fut.poll(cx)
}
}
impl<F> ActionInner for OnceAction<F>
where
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
false
}
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
None
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
// No need for boxing, type coercion is enough here.
Box::into_pin(self)
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
executor.spawn_and_forget(*self);
}
}
/// An object that can be converted to a future performing a non-cancellable,
/// periodic action.
pub(crate) struct PeriodicAction<G, F>
where
G: (FnOnce() -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// A clonable generator for the associated future.
gen: G,
/// The action repetition period.
period: Duration,
}
impl<G, F> PeriodicAction<G, F>
where
G: (FnOnce() -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// Constructs a new `PeriodicAction`.
pub(crate) fn new(gen: G, period: Duration) -> Self {
Self { gen, period }
}
}
impl<G, F> ActionInner for PeriodicAction<G, F>
where
G: (FnOnce() -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
false
}
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
let event = Box::new(Self::new(self.gen.clone(), self.period));
Some((event, self.period))
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
Box::pin((self.gen)())
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
executor.spawn_and_forget((self.gen)());
}
}
/// An object that can be converted to a future performing a single, cancellable
/// action.
pub(crate) struct KeyedOnceAction<G, F>
where
G: (FnOnce(ActionKey) -> F) + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// A generator for the associated future.
gen: G,
/// The event cancellation key.
event_key: ActionKey,
}
impl<G, F> KeyedOnceAction<G, F>
where
G: (FnOnce(ActionKey) -> F) + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// Constructs a new `KeyedOnceAction`.
pub(crate) fn new(gen: G, event_key: ActionKey) -> Self {
Self { gen, event_key }
}
}
impl<G, F> ActionInner for KeyedOnceAction<G, F>
where
G: (FnOnce(ActionKey) -> F) + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
self.event_key.is_cancelled()
}
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
None
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
Box::pin((self.gen)(self.event_key))
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
executor.spawn_and_forget((self.gen)(self.event_key));
}
}
/// An object that can be converted to a future performing a periodic,
/// cancellable action.
pub(crate) struct KeyedPeriodicAction<G, F>
where
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// A clonable generator for associated future.
gen: G,
/// The repetition period.
period: Duration,
/// The event cancellation key.
event_key: ActionKey,
}
impl<G, F> KeyedPeriodicAction<G, F>
where
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
/// Constructs a new `KeyedPeriodicAction`.
pub(crate) fn new(gen: G, period: Duration, event_key: ActionKey) -> Self {
Self {
gen,
period,
event_key,
}
}
}
impl<G, F> ActionInner for KeyedPeriodicAction<G, F>
where
G: (FnOnce(ActionKey) -> F) + Clone + Send + 'static,
F: Future<Output = ()> + Send + 'static,
{
fn is_cancelled(&self) -> bool {
self.event_key.is_cancelled()
}
fn next(&self) -> Option<(Box<dyn ActionInner>, Duration)> {
let event = Box::new(Self::new(
self.gen.clone(),
self.period,
self.event_key.clone(),
));
Some((event, self.period))
}
fn into_future(self: Box<Self>) -> Pin<Box<dyn Future<Output = ()> + Send>> {
Box::pin((self.gen)(self.event_key))
}
fn spawn_and_forget(self: Box<Self>, executor: &Executor) {
executor.spawn_and_forget((self.gen)(self.event_key));
}
}
/// Asynchronously sends a non-cancellable event to a model input.
pub(crate) async fn process_event<M, F, T, S>(func: F, arg: T, sender: Sender<M>)
where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + 'static,
{
let _ = sender
.send(
move |model: &mut M,
scheduler,
recycle_box: RecycleBox<()>|
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
let fut = func.call(model, arg, scheduler);
coerce_box!(RecycleBox::recycle(recycle_box, fut))
},
)
.await;
}
/// Asynchronously sends a cancellable event to a model input.
pub(crate) async fn send_keyed_event<M, F, T, S>(
event_key: ActionKey,
func: F,
arg: T,
sender: Sender<M>,
) where
M: Model,
F: for<'a> InputFn<'a, M, T, S>,
T: Send + Clone + 'static,
{
let _ = sender
.send(
move |model: &mut M,
scheduler,
recycle_box: RecycleBox<()>|
-> RecycleBox<dyn Future<Output = ()> + Send + '_> {
let fut = async move {
// Only perform the call if the event wasn't cancelled.
if !event_key.is_cancelled() {
func.call(model, arg, scheduler).await;
}
};
coerce_box!(RecycleBox::recycle(recycle_box, fut))
},
)
.await;
}
#[cfg(all(test, not(nexosim_loom)))]
impl GlobalScheduler {
/// Creates a dummy scheduler for testing purposes.
pub(crate) fn new_dummy() -> Self {
let dummy_priority_queue = Arc::new(Mutex::new(PriorityQueue::new()));
let dummy_time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH)).reader();
let dummy_halter = Arc::new(AtomicBool::new(false));
GlobalScheduler::new(dummy_priority_queue, dummy_time, dummy_halter)
}
}

View File

@ -0,0 +1,204 @@
use std::fmt;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Mutex};
use std::time::Duration;
use crate::channel::ChannelObserver;
use crate::executor::{Executor, SimulationContext};
use crate::model::ProtoModel;
use crate::time::{AtomicTime, Clock, MonotonicTime, NoClock, SyncStatus, TearableAtomicTime};
use crate::util::priority_queue::PriorityQueue;
use crate::util::sync_cell::SyncCell;
use super::{
add_model, ExecutionError, GlobalScheduler, Mailbox, Scheduler, SchedulerQueue, Signal,
Simulation,
};
/// Builder for a multi-threaded, discrete-event simulation.
pub struct SimInit {
executor: Executor,
scheduler_queue: Arc<Mutex<SchedulerQueue>>,
time: AtomicTime,
is_halted: Arc<AtomicBool>,
clock: Box<dyn Clock + 'static>,
clock_tolerance: Option<Duration>,
timeout: Duration,
observers: Vec<(String, Box<dyn ChannelObserver>)>,
abort_signal: Signal,
model_names: Vec<String>,
}
impl SimInit {
/// Creates a builder for a multithreaded simulation running on all
/// available logical threads.
pub fn new() -> Self {
Self::with_num_threads(num_cpus::get())
}
/// Creates a builder for a simulation running on the specified number of
/// threads.
///
/// Note that the number of worker threads is automatically constrained to
/// be between 1 and `usize::BITS` (inclusive). It is always set to 1 on
/// `wasm` targets.
pub fn with_num_threads(num_threads: usize) -> Self {
let num_threads = if cfg!(target_family = "wasm") {
1
} else {
num_threads.clamp(1, usize::BITS as usize)
};
let time = SyncCell::new(TearableAtomicTime::new(MonotonicTime::EPOCH));
let simulation_context = SimulationContext {
#[cfg(feature = "tracing")]
time_reader: time.reader(),
};
let abort_signal = Signal::new();
let executor = if num_threads == 1 {
Executor::new_single_threaded(simulation_context, abort_signal.clone())
} else {
Executor::new_multi_threaded(num_threads, simulation_context, abort_signal.clone())
};
Self {
executor,
scheduler_queue: Arc::new(Mutex::new(PriorityQueue::new())),
time,
is_halted: Arc::new(AtomicBool::new(false)),
clock: Box::new(NoClock::new()),
clock_tolerance: None,
timeout: Duration::ZERO,
observers: Vec::new(),
abort_signal,
model_names: Vec::new(),
}
}
/// Adds a model and its mailbox to the simulation bench.
///
/// The `name` argument needs not be unique. The use of the dot character in
/// the name is possible but discouraged as it can cause confusion with the
/// fully qualified name of a submodel. If an empty string is provided, it
/// is replaced by the string `<unknown>`.
pub fn add_model<P: ProtoModel>(
mut self,
model: P,
mailbox: Mailbox<P::Model>,
name: impl Into<String>,
) -> Self {
let mut name = name.into();
if name.is_empty() {
name = String::from("<unknown>");
};
self.observers
.push((name.clone(), Box::new(mailbox.0.observer())));
let scheduler = GlobalScheduler::new(
self.scheduler_queue.clone(),
self.time.reader(),
self.is_halted.clone(),
);
add_model(
model,
mailbox,
name,
scheduler,
&self.executor,
&self.abort_signal,
&mut self.model_names,
);
self
}
/// Synchronizes the simulation with the provided [`Clock`].
///
/// If the clock isn't explicitly set then the default [`NoClock`] is used,
/// resulting in the simulation running as fast as possible.
pub fn set_clock(mut self, clock: impl Clock + 'static) -> Self {
self.clock = Box::new(clock);
self
}
/// Specifies a tolerance for clock synchronization.
///
/// When a clock synchronization tolerance is set, then any report of
/// synchronization loss by [`Clock::synchronize`] that exceeds the
/// specified tolerance will trigger an [`ExecutionError::OutOfSync`] error.
pub fn set_clock_tolerance(mut self, tolerance: Duration) -> Self {
self.clock_tolerance = Some(tolerance);
self
}
/// Sets a timeout for the call to [`SimInit::init`] and for any subsequent
/// simulation step.
///
/// The timeout corresponds to the maximum wall clock time allocated for the
/// completion of a single simulation step before an
/// [`ExecutionError::Timeout`] error is raised.
///
/// A null duration disables the timeout, which is the default behavior.
///
/// See also [`Simulation::set_timeout`].
#[cfg(not(target_family = "wasm"))]
pub fn set_timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
/// Builds a simulation initialized at the specified simulation time,
/// executing the [`Model::init`](crate::model::Model::init) method on all
/// model initializers.
///
/// The simulation object and its associated scheduler are returned upon
/// success.
pub fn init(
mut self,
start_time: MonotonicTime,
) -> Result<(Simulation, Scheduler), ExecutionError> {
self.time.write(start_time);
if let SyncStatus::OutOfSync(lag) = self.clock.synchronize(start_time) {
if let Some(tolerance) = &self.clock_tolerance {
if &lag > tolerance {
return Err(ExecutionError::OutOfSync(lag));
}
}
}
let scheduler = Scheduler::new(
self.scheduler_queue.clone(),
self.time.reader(),
self.is_halted.clone(),
);
let mut simulation = Simulation::new(
self.executor,
self.scheduler_queue,
self.time,
self.clock,
self.clock_tolerance,
self.timeout,
self.observers,
self.model_names,
self.is_halted,
);
simulation.run()?;
Ok((simulation, scheduler))
}
}
impl Default for SimInit {
fn default() -> Self {
Self::new()
}
}
impl fmt::Debug for SimInit {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SimInit").finish_non_exhaustive()
}
}

81
nexosim/src/time.rs Normal file
View File

@ -0,0 +1,81 @@
//! Simulation time and clocks.
//!
//! This module provides most notably:
//!
//! * [`MonotonicTime`]: a monotonic timestamp based on the [TAI] time standard,
//! * [`Clock`]: a trait for types that can synchronize a simulation,
//! implemented for instance by [`SystemClock`] and [`AutoSystemClock`].
//!
//! [TAI]: https://en.wikipedia.org/wiki/International_Atomic_Time
//!
//!
//! # Examples
//!
//! An alarm clock model that prints a message when the simulation time reaches
//! the specified timestamp.
//!
//! ```
//! use nexosim::model::{Context, Model};
//! use nexosim::time::MonotonicTime;
//!
//! // An alarm clock model.
//! pub struct AlarmClock {
//! msg: String
//! }
//!
//! impl AlarmClock {
//! // Creates a new alarm clock.
//! pub fn new(msg: String) -> Self {
//! Self { msg }
//! }
//!
//! // Sets an alarm [input port].
//! pub fn set(&mut self, setting: MonotonicTime, cx: &mut Context<Self>) {
//! if cx.schedule_event(setting, Self::ring, ()).is_err() {
//! println!("The alarm clock can only be set for a future time");
//! }
//! }
//!
//! // Rings the alarm [private input port].
//! fn ring(&mut self) {
//! println!("{}", self.msg);
//! }
//! }
//!
//! impl Model for AlarmClock {}
//! ```
mod clock;
mod monotonic_time;
pub use tai_time::MonotonicTime;
pub use clock::{AutoSystemClock, Clock, NoClock, SyncStatus, SystemClock};
pub(crate) use monotonic_time::TearableAtomicTime;
pub(crate) type AtomicTime = crate::util::sync_cell::SyncCell<TearableAtomicTime>;
pub(crate) type AtomicTimeReader = crate::util::sync_cell::SyncCellReader<TearableAtomicTime>;
/// Trait abstracting over time-absolute and time-relative deadlines.
///
/// This trait is implemented by [`std::time::Duration`] and
/// [`MonotonicTime`].
pub trait Deadline {
/// Make this deadline into an absolute timestamp, using the provided
/// current time as a reference.
fn into_time(self, now: MonotonicTime) -> MonotonicTime;
}
impl Deadline for std::time::Duration {
#[inline(always)]
fn into_time(self, now: MonotonicTime) -> MonotonicTime {
now + self
}
}
impl Deadline for MonotonicTime {
#[inline(always)]
fn into_time(self, _: MonotonicTime) -> MonotonicTime {
self
}
}

View File

@ -1,25 +1,52 @@
use std::time::{Duration, Instant, SystemTime};
use tai_time::MonotonicClock;
use crate::time::MonotonicTime;
/// A type that can be used to synchronize a simulation.
///
/// This trait abstract over the different types of clocks, such as
/// This trait abstracts over different types of clocks, such as
/// as-fast-as-possible and real-time clocks.
///
/// A clock can be associated to a simulation at initialization time by calling
/// [`SimInit::init_with_clock()`](crate::simulation::SimInit::init_with_clock).
/// A clock can be associated to a simulation prior to initialization by calling
/// [`SimInit::set_clock`](crate::simulation::SimInit::set_clock).
pub trait Clock: Send {
/// Blocks until the deadline.
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus;
// Returns current (as of time of the call) sim time
fn now(&self) -> MonotonicTime;
}
impl<C: Clock + ?Sized> Clock for &mut C {
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus {
(**self).synchronize(deadline)
}
fn now(&self) -> MonotonicTime{
(**self).now()
}
}
impl<C: Clock + ?Sized> Clock for Box<C> {
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus {
(**self).synchronize(deadline)
}
fn now(&self) -> MonotonicTime{
(**self).now()
}
}
/// The current synchronization status of a clock.
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[must_use]
pub enum SyncStatus {
/// The clock is synchronized.
Synchronized,
/// The clock is lagging behind by the specified offset.
/// The deadline has already elapsed and lags behind the current clock time
/// by the duration given in the payload.
OutOfSync(Duration),
}
@ -42,6 +69,10 @@ impl Clock for NoClock {
fn synchronize(&mut self, _: MonotonicTime) -> SyncStatus {
SyncStatus::Synchronized
}
fn now(&self) -> MonotonicTime{
MonotonicTime::new(0, 0).unwrap()
}
}
/// A real-time [`Clock`] based on the system's monotonic clock.
@ -49,10 +80,7 @@ impl Clock for NoClock {
/// This clock accepts an arbitrary reference time and remains synchronized with
/// the system's monotonic clock.
#[derive(Copy, Clone, Debug)]
pub struct SystemClock {
wall_clock_ref: Instant,
simulation_ref: MonotonicTime,
}
pub struct SystemClock(MonotonicClock);
impl SystemClock {
/// Constructs a `SystemClock` with an offset between simulation clock and
@ -66,10 +94,10 @@ impl SystemClock {
/// ```
/// use std::time::{Duration, Instant};
///
/// use asynchronix::simulation::SimInit;
/// use asynchronix::time::{MonotonicTime, SystemClock};
/// use nexosim::simulation::SimInit;
/// use nexosim::time::{MonotonicTime, SystemClock};
///
/// let t0 = MonotonicTime::new(1_234_567_890, 0);
/// let t0 = MonotonicTime::new(1_234_567_890, 0).unwrap();
///
/// // Make the simulation start in 1s.
/// let clock = SystemClock::from_instant(t0, Instant::now() + Duration::from_secs(1));
@ -77,13 +105,14 @@ impl SystemClock {
/// let simu = SimInit::new()
/// // .add_model(...)
/// // .add_model(...)
/// .init_with_clock(t0, clock);
/// .set_clock(clock)
/// .init(t0);
/// ```
pub fn from_instant(simulation_ref: MonotonicTime, wall_clock_ref: Instant) -> Self {
Self {
wall_clock_ref,
Self(MonotonicClock::init_from_instant(
simulation_ref,
}
wall_clock_ref,
))
}
/// Constructs a `SystemClock` with an offset between simulation clock and
@ -93,7 +122,7 @@ impl SystemClock {
/// The provided reference time may lie in the past or in the future.
///
/// Note that, even though the wall clock reference is specified with the
/// (non-monotonic) system clock, the [`synchronize()`](Clock::synchronize)
/// (non-monotonic) system clock, the [`synchronize`](Clock::synchronize)
/// method will still use the system's _monotonic_ clock. This constructor
/// makes a best-effort attempt at synchronizing the monotonic clock with
/// the non-monotonic system clock _at construction time_, but this
@ -106,10 +135,10 @@ impl SystemClock {
/// ```
/// use std::time::{Duration, UNIX_EPOCH};
///
/// use asynchronix::simulation::SimInit;
/// use asynchronix::time::{MonotonicTime, SystemClock};
/// use nexosim::simulation::SimInit;
/// use nexosim::time::{MonotonicTime, SystemClock};
///
/// let t0 = MonotonicTime::new(1_234_567_890, 0);
/// let t0 = MonotonicTime::new(1_234_567_890, 0).unwrap();
///
/// // Make the simulation start at the next full second boundary.
/// let now_secs = UNIX_EPOCH.elapsed().unwrap().as_secs();
@ -120,58 +149,14 @@ impl SystemClock {
/// let simu = SimInit::new()
/// // .add_model(...)
/// // .add_model(...)
/// .init_with_clock(t0, clock);
/// .set_clock(clock)
/// .init(t0);
/// ```
pub fn from_system_time(simulation_ref: MonotonicTime, wall_clock_ref: SystemTime) -> Self {
// Select the best-correlated `Instant`/`SystemTime` pair from several
// samples to improve robustness towards possible thread suspension
// between the calls to `SystemTime::now()` and `Instant::now()`.
const SAMPLES: usize = 3;
let mut last_instant = Instant::now();
let mut min_delta = Duration::MAX;
let mut ref_time = None;
// Select the best-correlated instant/date pair.
for _ in 0..SAMPLES {
// The inner loop is to work around monotonic clock platform bugs
// that may cause `checked_duration_since` to fail.
let (date, instant, delta) = loop {
let date = SystemTime::now();
let instant = Instant::now();
let delta = instant.checked_duration_since(last_instant);
last_instant = instant;
if let Some(delta) = delta {
break (date, instant, delta);
}
};
// Store the current instant/date if the time elapsed since the last
// measurement is shorter than the previous candidate.
if min_delta > delta {
min_delta = delta;
ref_time = Some((instant, date));
}
}
// Set the selected instant/date as the wall clock reference and adjust
// the simulation reference accordingly.
let (instant_ref, date_ref) = ref_time.unwrap();
let simulation_ref = if date_ref > wall_clock_ref {
let correction = date_ref.duration_since(wall_clock_ref).unwrap();
simulation_ref + correction
} else {
let correction = wall_clock_ref.duration_since(date_ref).unwrap();
simulation_ref - correction
};
Self {
wall_clock_ref: instant_ref,
Self(MonotonicClock::init_from_system_time(
simulation_ref,
}
wall_clock_ref,
))
}
}
@ -179,22 +164,18 @@ impl Clock for SystemClock {
/// Blocks until the system time corresponds to the specified simulation
/// time.
fn synchronize(&mut self, deadline: MonotonicTime) -> SyncStatus {
let target_time = if deadline >= self.simulation_ref {
self.wall_clock_ref + deadline.duration_since(self.simulation_ref)
} else {
self.wall_clock_ref - self.simulation_ref.duration_since(deadline)
};
let now = self.0.now();
if now <= deadline {
spin_sleep::sleep(deadline.duration_since(now));
let now = Instant::now();
match target_time.checked_duration_since(now) {
Some(sleep_duration) => {
spin_sleep::sleep(sleep_duration);
SyncStatus::Synchronized
}
None => SyncStatus::OutOfSync(now.duration_since(target_time)),
return SyncStatus::Synchronized;
}
SyncStatus::OutOfSync(now.duration_since(deadline))
}
fn now(&self) -> MonotonicTime{
self.0.now()
}
}
@ -202,8 +183,8 @@ impl Clock for SystemClock {
/// monotonic clock.
///
/// This clock is similar to [`SystemClock`] except that the first call to
/// [`synchronize()`](Clock::synchronize) never blocks and implicitly defines
/// the reference time. In other words, the clock starts running on its first
/// [`synchronize`](Clock::synchronize) never blocks and implicitly defines the
/// reference time. In other words, the clock starts running on its first
/// invocation.
#[derive(Copy, Clone, Debug, Default)]
pub struct AutoSystemClock {
@ -232,4 +213,35 @@ impl Clock for AutoSystemClock {
Some(clock) => clock.synchronize(deadline),
}
}
fn now(&self) -> MonotonicTime {
// TODO: how to avoid panic? self is not mut
self.inner.unwrap().now()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn smoke_system_clock() {
let t0 = MonotonicTime::EPOCH;
const TOLERANCE: f64 = 0.0005; // [s]
let now = Instant::now();
let mut clock = SystemClock::from_instant(t0, now);
let t1 = t0 + Duration::from_millis(200);
assert_eq!(clock.synchronize(t1), SyncStatus::Synchronized);
let elapsed = now.elapsed().as_secs_f64();
let dt = t1.duration_since(t0).as_secs_f64();
assert!(
(dt - elapsed) <= TOLERANCE,
"Expected t = {:.6}s +/- {:.6}s, measured t = {:.6}s",
dt,
TOLERANCE,
elapsed,
);
}
}

Some files were not shown because too many files have changed in this diff Show More