107 Commits

Author SHA1 Message Date
0b2d4f6187 Merge pull request 'satrs v0.2.1' (#190) from prep-satrs-v0.2.1 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #190
2024-05-19 13:19:23 +02:00
f7016b940a changelog
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
Rust/sat-rs/pipeline/pr-main There was a failure building this commit
2024-05-19 08:14:31 +02:00
397ecd0c40 prep patch release 2024-05-19 08:13:32 +02:00
422f2c11ab Merge pull request 'removed unsafe block which is not necessary' (#189) from remove-unnecessary-unsafe-block into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #189
2024-05-18 12:46:58 +02:00
37e945fd91 Merge branch 'main' into remove-unnecessary-unsafe-block 2024-05-18 12:46:44 +02:00
45379858f0 Merge pull request 'TCP server config default improvements' (#187) from tcp-server-cfg-improvements into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #187
2024-05-18 12:44:53 +02:00
7c194ab543 Merge branch 'main' into tcp-server-cfg-improvements
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
2024-05-18 12:44:42 +02:00
bca1d7292a removed unsafe block which is not necessary
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-13 17:01:26 +02:00
cdcb9cae1c Merge pull request 'cross ref docs for events' (#188) from cross-ref-docs-for-events into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #188
2024-05-13 15:57:00 +02:00
9dcbd42862 cross ref docs
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-13 15:36:09 +02:00
da05efc16d TCP server config default improvements
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-05-13 15:31:51 +02:00
e38e25a998 Merge pull request 'update the satrs example graph' (#186) from satrs-example-graph-update into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #186
2024-05-13 15:29:39 +02:00
14b381cf4a update the satrs example graph
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-10 17:03:46 +02:00
3746e9ebb0 Merge pull request 'Add timestamp to SimRequest' (#140) from add-timestamp-to-sim-request into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #140
2024-05-08 14:58:13 +02:00
d2fc783562 Merge remote-tracking branch 'origin/main' into add-timestamp-to-sim-request
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
2024-05-08 14:57:12 +02:00
282f799203 Merge pull request 'prep v0.2.0' (#184) from prep_v0.2.0 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #184
2024-05-02 14:57:14 +02:00
46dbb4309b new clippy check
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-05-02 14:44:22 +02:00
42d1257e83 prepare next release v0.2.0
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-05-02 14:39:30 +02:00
583f6ce4d2 Merge pull request 'small robustness fix' (#183) from robustness-fix into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #183
2024-05-02 13:41:55 +02:00
408803fe86 small robustness fix
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-05-02 13:41:27 +02:00
9ffe4d0ae0 Merge pull request 'smaller improvements' (#182) from smaller-improvements into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #182
2024-05-02 12:28:39 +02:00
e37061dcf0 smaller improvements 2024-05-02 12:28:09 +02:00
3a2ac11407 Merge pull request 'bounded the PUS stack hot loop' (#181) from pus-hot-loop-bounding into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #181
2024-05-02 12:02:02 +02:00
23327a7786 bounded the PUS stack hot loop
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-05-02 12:01:24 +02:00
89d5a1022f Merge pull request 'optimize PUS stack code' (#180) from optimize-pus-stack-code into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #180
2024-05-02 11:59:26 +02:00
a00c843698 optimize PUS stack code
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-05-02 11:58:46 +02:00
c586fd7fef Merge pull request 'try unifying some direct PUS handler code' (#179) from unify-some-example-code into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #179
2024-05-02 11:29:11 +02:00
7e78e70a17 try unifying some direct PUS handler code
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-02 11:14:05 +02:00
424dfc439c Merge pull request 'simplified PUS stack' (#178) from simplify-pus-stack into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #178
2024-05-02 10:01:16 +02:00
45eb2f1343 cargo fmt
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-05-01 21:16:26 +02:00
736eb74e66 simplified PUS stack
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-05-01 21:13:08 +02:00
29f71c2a57 Merge pull request 'Reworked generic parameter handling for PUS service 1 and 5' (#175) from rework-generic-params-for-pus into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #175
2024-04-30 15:42:05 +02:00
f0d08b65a4 Merge branch 'main' into rework-generic-params-for-pus
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-30 13:35:08 +02:00
c7a74a844c Merge pull request 'renamed thread name' (#176) from small-tweak into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #176
2024-04-30 13:31:46 +02:00
9c60427f89 Reworked generic parameter handling for PUS service 1 and 5
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-30 13:29:55 +02:00
958ab9bab6 renamed thread name
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-25 11:11:31 +02:00
312849bddb Merge pull request 'More improvements for Event API' (#173) from improve-event-api into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #173
2024-04-24 19:34:33 +02:00
b0159a3ba7 prep next release candidate
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-24 19:18:45 +02:00
c477739f6d more improvements for API, tests for example event module
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-24 18:50:08 +02:00
b7ce039406 add optional defmt support for events
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-24 18:36:00 +02:00
4736d40997 Merge pull request 'simplified event management' (#172) from simplify-event-management into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #172
2024-04-24 15:58:00 +02:00
5ec5124ea3 Updated events modules and docs
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-24 14:30:45 +02:00
5e43259d4f Merge branch 'main' into add-timestamp-to-sim-request
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-23 16:36:30 +02:00
bfaddd0ebb Merge pull request 'prep next release' (#171) from pre-v0.2.0-rc.4 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #171
2024-04-23 16:32:03 +02:00
423a068736 prep next release
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-23 14:55:19 +02:00
8022af1bf2 Merge pull request 'update Python client for example' (#170) from update-example-pyclient into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #170
2024-04-23 14:52:04 +02:00
acd2260dfd update Python client for example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-23 14:22:50 +02:00
e5ee698dc4 Merge pull request 'TCP server improvements' (#169) from tcp-ip-improvements into main
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
Reviewed-on: #169
2024-04-23 13:21:41 +02:00
e8907c74d4 changelog
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-23 11:23:00 +02:00
536051e05b improvements and fixes
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-22 20:29:14 +02:00
701db659e9 Merge pull request 'formatting' (#168) from fmt into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #168
2024-04-22 15:47:58 +02:00
4b8e54b91b formatting
Some checks are pending
Rust/sat-rs/pipeline/head This commit looks good
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-22 10:42:49 +02:00
870d60cfd6 Merge pull request 'bugfix and improvements for CCSDS SP decoder' (#167) from ccsds-decoder-bugfix into main
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
Reviewed-on: #167
2024-04-22 10:23:12 +02:00
9e62e4292c bugfix and improvements for CCSDS SP decoder
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-20 11:19:46 +02:00
b2e77fbc09 Merge pull request 'requires another hotfix' (#166) from and-another-docs-rs-hotfix into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #166
2024-04-17 20:42:09 +02:00
5371928496 docs_rs build argument hotfix
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
2024-04-17 20:41:30 +02:00
31cddbd99b Merge pull request 'bump msrv version' (#165) from bump-msrv into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #165
2024-04-17 18:56:21 +02:00
7c00e13e70 bump msrv version
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 18:10:32 +02:00
aa72063454 Merge pull request 'prepare next release candidate' (#164) from prep-v0.2.0-rc.2 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #164
2024-04-17 18:03:28 +02:00
7b37b76695 prepare next release candidate
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 17:19:38 +02:00
ea5d95c12d Merge pull request 'why is this an issue for docs-rs?' (#163) from fix-for-docs-build-docs-rs into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #163
2024-04-17 17:09:36 +02:00
c62adbb300 Merge branch 'main' into fix-for-docs-build-docs-rs
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-17 16:41:45 +02:00
9242b8a607 Merge pull request 'prepare MIB release' (#162) from prepare-mib-release into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #162
2024-04-17 16:37:34 +02:00
4a27d2605d why is this an issue for docs-rs?
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-17 16:34:56 +02:00
8195245481 prepare MIB release
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
Rust/sat-rs/pipeline/pr-main Build queued...
2024-04-17 16:17:30 +02:00
f6f7519625 Merge pull request 'small cleanup' (#161) from small-cargo-toml-cleaning into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #161
2024-04-17 16:03:03 +02:00
0f0fbc1a18 small cleanup
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 15:17:46 +02:00
6e55e2ac95 Merge pull request 'Prepare next releases' (#160) from prep-next-satrs-releases into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #160
2024-04-17 14:58:01 +02:00
2f96bfe992 changelog sat-rs
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 10:03:17 +02:00
52aafb3aab prep next releases
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 10:01:46 +02:00
6ce9cb5ead Merge pull request 'use released satrs-shared' (#159) from use-released-satrs-shared into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #159
2024-04-16 21:31:21 +02:00
273f79d1e6 use release satrs-shared
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-16 21:07:51 +02:00
622221835e Merge pull request 'allow sat-rs shared spacepackets range' (#158) from satrs-shared-spacepackets-range into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #158
2024-04-16 20:54:59 +02:00
e396ad2e7a small fix
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-16 19:52:32 +02:00
772927d50b allow spacepackets range
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-16 19:50:46 +02:00
be9a45e55f Merge pull request 'changelog satrs-shared v0.1.3' (#157) from changelog-satrs-shared into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #157
2024-04-16 19:48:12 +02:00
eee8a69550 changelog satrs-shared v0.1.3
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-04-16 19:47:36 +02:00
f7a6d3ce47 Merge pull request 'bump spacepackets to v0.11.0' (#156) from bump-spacepackets into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #156
2024-04-16 19:46:17 +02:00
df97a3a93e small adjustment
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-16 19:39:07 +02:00
42750e08c0 bump spacepackets to v0.11.0
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-16 19:26:46 +02:00
786671bbd7 Merge pull request 're-worked TMTC modules' (#155) from rework-tmtc-modules into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #155
2024-04-16 11:10:52 +02:00
63f37f0917 Re-worked TMTC modules
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-16 11:04:22 +02:00
8cfe3b81e7 Merge pull request 'bugfix for targeted services' (#154) from bugfix-targeted-services into main
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
Reviewed-on: #154
2024-04-13 15:10:14 +02:00
de50bec562 bugfix for targeted services
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-10 17:18:53 +02:00
39ab9fa27b Merge pull request 'closure param name tweak' (#153) from small-example-tweak into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #153
2024-04-10 17:17:13 +02:00
1dbc81a8f5 closure param name tweak
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-10 15:51:08 +02:00
1ad74ee1d5 Merge pull request 'this makes a bit more sense' (#152) from naming-improvement-pus-actions into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #152
2024-04-10 15:37:39 +02:00
f96fe6bdc0 this makes a bit more sense
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-10 15:19:08 +02:00
d43a8eb571 Merge pull request 'improve example structure' (#151) from improve-example-structure into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #151
2024-04-10 13:19:41 +02:00
0bbada90ef improve example structure
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-04-10 12:58:51 +02:00
3375780e00 Merge pull request 'Refactor and improve TCP servers' (#150) from refactor-tcp-server into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #150
2024-04-10 12:29:23 +02:00
de028ed827 bugfix in example
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-10 11:54:05 +02:00
d27ac5dfc9 refactored TCP server
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-04-10 11:28:16 +02:00
c67b7cb93a this is non-trivial
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-04-09 19:40:55 +02:00
f71ba3e8d8 Merge pull request 'introduce stop signal handling for TCP' (#149) from tcp-server-stop-signal into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #149
2024-04-09 18:11:29 +02:00
975cd927f4 Merge branch 'main' into add-timestamp-to-sim-request
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-09 17:27:57 +02:00
3cc9dd3c48 introduce stop signal handling
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-09 17:21:43 +02:00
0fec994028 Merge pull request 'Update STM32F3 example' (#148) from update-stm32f3-example-tmtc-handling into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #148
2024-04-04 18:33:00 +02:00
226a134aff Update STM32F3 example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
1. New command to change blinky frequency.
2. Bump used sat-rs version.
2024-04-04 18:21:30 +02:00
aac59ec7c1 Merge pull request 'Major refactoring and update of PUS module' (#146) from pus-modules-update into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #146
2024-04-04 15:27:29 +02:00
ce7eb8650f changelog
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-04 15:20:07 +02:00
df2733a176 Major refactoring and update of PUS module 2024-04-04 15:18:53 +02:00
9039c1b59a Merge branch 'main' into add-timestamp-to-sim-request
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-25 16:14:04 +01:00
972bf19188 cargo fmt
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-13 12:03:11 +01:00
9d711d2b73 add fern logging
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-03-13 10:49:24 +01:00
d0005cdd63 this works
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-03-13 10:36:08 +01:00
f00e6cf50c we require an asynchronix update here I guess
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-03-12 18:25:21 +01:00
103 changed files with 6853 additions and 6521 deletions

64
.github/workflows/ci.yml vendored Normal file
View File

@@ -0,0 +1,64 @@
name: ci
on: [push, pull_request]
jobs:
check:
name: Check build
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo check --release
test:
name: Run Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- run: cargo nextest run --all-features
- run: cargo test --doc
cross-check:
name: Check Cross-Compilation
runs-on: ubuntu-latest
strategy:
matrix:
target:
- armv7-unknown-linux-gnueabihf
- thumbv7em-none-eabihf
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf"
- run: cargo check -p satrs --release --target=${{matrix.target}} --no-default-features
fmt:
name: Check formatting
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo fmt --all -- --check
docs:
name: Check Documentation Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]'
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo clippy -- -D warnings

1
.gitignore vendored
View File

@@ -2,6 +2,7 @@ target/
output.log output.log
/Cargo.lock /Cargo.lock
output.log
output.log output.log

View File

@@ -1,4 +1,4 @@
<p align="center"> <img src="misc/satrs-logo.png" width="40%"> </p> <p align="center"> <img src="misc/satrs-logo-v2.png" width="40%"> </p>
[![sat-rs website](https://img.shields.io/badge/sat--rs-website-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/) [![sat-rs website](https://img.shields.io/badge/sat--rs-website-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
[![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/) [![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/)
@@ -24,11 +24,6 @@ A lot of the architecture and general design considerations are based on the
through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/) through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/). and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/).
This framework is in the early stages of development. Important features are missing. New releases
with breaking changes are released regularly, with all changes documented inside respective
changelog files. You should only use this framework if your are willing to work in this
environment.
# Overview # Overview
This project currently contains following crates: This project currently contains following crates:
@@ -45,7 +40,7 @@ This project currently contains following crates:
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib): * [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib):
Components to build a mission information base from the on-board software directly. Components to build a mission information base from the on-board software directly.
* [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example-stm32f3-disco): * [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example-stm32f3-disco):
Example of a simple example on-board software using sat-rs components on a bare-metal system Example of a simple example using low-level sat-rs components on a bare-metal system
with constrained resources. with constrained resources.
Each project has its own `CHANGELOG.md`. Each project has its own `CHANGELOG.md`.

View File

@@ -33,6 +33,7 @@ pipeline {
stage('Test') { stage('Test') {
steps { steps {
sh 'cargo nextest r --all-features' sh 'cargo nextest r --all-features'
sh 'cargo test --doc'
} }
} }
stage('Check with all features') { stage('Check with all features') {

View File

@@ -166,7 +166,7 @@ Subsystem<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:
<y:Geometry height="30.0" width="125.0" x="1151.9280499999995" y="281.84403125000006"/> <y:Geometry height="30.0" width="125.0" x="1151.9280499999995" y="281.84403125000006"/>
<y:Fill color="#CCFFFF" transparent="false"/> <y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/> <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="14" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="20.296875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="76.255859375" x="24.3720703125" xml:space="preserve" y="4.8515625">TM Funnel<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel> <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="14" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="20.296875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="58.837890625" x="33.0810546875" xml:space="preserve" y="4.8515625">TM Sink<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/> <y:Shape type="rectangle"/>
</y:ShapeNode> </y:ShapeNode>
</data> </data>
@@ -260,7 +260,7 @@ Mode Tree<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:
<y:Geometry height="57.265600000000006" width="631.1152" x="810.8847999999999" y="411.39428125"/> <y:Geometry height="57.265600000000006" width="631.1152" x="810.8847999999999" y="411.39428125"/>
<y:Fill hasColor="false" transparent="false"/> <y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/> <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="261.8125" x="166.89412267941418" xml:space="preserve" y="3.144146301369915">satrs-satellite <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="261.8125" x="166.89412267941418" xml:space="preserve" y="3.144146301369915">satrs-minisim
Simulator based on asynchronix<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="-0.028136269449041573" nodeRatioY="-0.08493150684931505" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel> Simulator based on asynchronix<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="-0.028136269449041573" nodeRatioY="-0.08493150684931505" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/> <y:Shape type="rectangle"/>
</y:ShapeNode> </y:ShapeNode>
@@ -272,7 +272,7 @@ Simulator based on asynchronix<y:LabelModel><y:SmartNodeLabelModel distance="4.0
<y:Geometry height="50.0" width="631.1152000000002" x="810.8847999999998" y="476.2958625000002"/> <y:Geometry height="50.0" width="631.1152000000002" x="810.8847999999998" y="476.2958625000002"/>
<y:Fill hasColor="false" transparent="false"/> <y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/> <y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="374.8359375" x="110.3824039294143" xml:space="preserve" y="0.12842465753431043">satrs-tmtc <y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="374.8359375" x="110.3824039294143" xml:space="preserve" y="0.12842465753431043">pytmtc
Command-line interface based TMTC handling<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="-0.028136269449041573" nodeRatioY="-0.08493150684931505" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel> Command-line interface based TMTC handling<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="-0.028136269449041573" nodeRatioY="-0.08493150684931505" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/> <y:Shape type="rectangle"/>
</y:ShapeNode> </y:ShapeNode>

File diff suppressed because it is too large Load Diff

BIN
misc/satrs-logo-v2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

View File

@@ -17,7 +17,7 @@ it is still centered around small packets. `sat-rs` provides support for these E
standards and also attempts to fill the gap to the internet protocol by providing the following standards and also attempts to fill the gap to the internet protocol by providing the following
components. components.
1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/host/udp_server/index.html). 1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/std/udp_server/index.html).
UDP is already packet based which makes it an excellent fit for exchanging space packets. UDP is already packet based which makes it an excellent fit for exchanging space packets.
2. [TCP TMTC Server Components](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/index.html). 2. [TCP TMTC Server Components](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/index.html).
TCP is a stream based protocol, so the library provides building blocks to parse telemetry TCP is a stream based protocol, so the library provides building blocks to parse telemetry
@@ -39,8 +39,12 @@ task might be to store all arriving telemetry persistently. This is especially i
space systems which do not have permanent contact like low-earth-orbit (LEO) satellites. space systems which do not have permanent contact like low-earth-orbit (LEO) satellites.
The most important task of a TC source is to deliver the telecommands to the correct recipients. The most important task of a TC source is to deliver the telecommands to the correct recipients.
For modern component oriented software using message passing, this usually includes staged For component oriented software using message passing, this usually includes staged demultiplexing
demultiplexing components to determine where a command needs to be sent. components to determine where a command needs to be sent.
Using a generic concept of a TC source and a TM sink as part of the software design simplifies
the flexibility of the TMTC infrastructure: Newly added TM generators and TC receiver only have to
forward their generated or received packets to those handler objects.
# Low-level protocols and the bridge to the communcation subsystem # Low-level protocols and the bridge to the communcation subsystem

View File

@@ -1,16 +1,24 @@
# Events # Events
Events can be an extremely important mechanism used for remote systems to monitor unexpected Events are an important mechanism used for remote systems to monitor unexpected
or expected anomalies and events occuring on these systems. They are oftentimes tied to or expected anomalies and events occuring on these systems.
One common use case for events on remote systems is to offer a light-weight publish-subscribe
mechanism and IPC mechanism for software and hardware events which are also packaged as telemetry
(TM) or can trigger a system response. They can also be tied to
Fault Detection, Isolation and Recovery (FDIR) operations, which need to happen autonomously. Fault Detection, Isolation and Recovery (FDIR) operations, which need to happen autonomously.
Events can also be used as a convenient Inter-Process Communication (IPC) mechansism, which is The PUS Service 5 standardizes how the ground interface for events might look like, but does not
also observable for the Ground segment. The PUS Service 5 standardizes how the ground interface specify how other software components might react to those events. There is the PUS Service 19,
for events might look like, but does not specify how other software components might react which might be used for that purpose, but the event components recommended by this framework do not
to those events. There is the PUS Service 19, which might be used for that purpose, but the rely on the present of this service.
event components recommended by this framework do not really need this service.
The following images shows how the flow of events could look like in a system where components The following images shows how the flow of events could look like in a system where components
can generate events, and where other system components might be interested in those events: can generate events, and where other system components might be interested in those events:
![Event flow](images/events/event_man_arch.png) ![Event flow](images/events/event_man_arch.png)
For the concrete implementation of your own event management and/or event routing system, you
can have a look at the event management documentation inside the
[API documentation](https://docs.rs/satrs/latest/satrs/event_man/index.html) where you can also
find references to all examples.

View File

@@ -1,11 +1,11 @@
# Modes # Modes
Modes are an extremely useful concept for complex system in general. They also allow simplified Modes are an extremely useful concept to model complex systems. They allow simplified
system reasoning for both system operators and OBSW developers. They model the behaviour of a system reasoning for both system operators and OBSW developers. They also provide a way to alter
component and also provide observability of a system. A few examples of how to model the behaviour of a component and also provide observability of a system. A few examples of how to
different components of a space system with modes will be given. model the mode of different components within a space system with modes will be given.
## Modelling a pyhsical devices with modes ## Pyhsical device component with modes
The following simple mode scheme with the following three mode The following simple mode scheme with the following three mode
@@ -13,7 +13,8 @@ The following simple mode scheme with the following three mode
- `ON` - `ON`
- `NORMAL` - `NORMAL`
can be applied to a large number of simpler devices of a remote system, for example sensors. can be applied to a large number of simpler device controllers of a remote system, for example
sensors.
1. `OFF` means that a device is physically switched off, and the corresponding software component 1. `OFF` means that a device is physically switched off, and the corresponding software component
does not poll the device regularly. does not poll the device regularly.
@@ -31,7 +32,7 @@ for the majority of devices:
2. `NORMAL` or `ON` to `OFF`: Any important shutdown configuration or handling must be performed 2. `NORMAL` or `ON` to `OFF`: Any important shutdown configuration or handling must be performed
before powering off the device. before powering off the device.
## Modelling a controller with modes ## Controller components with modes
Controller components are not modelling physical devices, but a mode scheme is still the best Controller components are not modelling physical devices, but a mode scheme is still the best
way to model most of these components. way to model most of these components.

View File

@@ -22,9 +22,9 @@ dependencies = [
[[package]] [[package]]
name = "autocfg" name = "autocfg"
version = "1.1.0" version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80"
[[package]] [[package]]
name = "bare-metal" name = "bare-metal"
@@ -88,19 +88,13 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]] [[package]]
name = "chrono" name = "chrono"
version = "0.4.35" version = "0.4.37"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e"
dependencies = [ dependencies = [
"num-traits", "num-traits",
] ]
[[package]]
name = "cobs"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15"
[[package]] [[package]]
name = "cobs" name = "cobs"
version = "0.2.3" version = "0.2.3"
@@ -189,7 +183,7 @@ dependencies = [
"ident_case", "ident_case",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]
[[package]] [[package]]
@@ -200,7 +194,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
dependencies = [ dependencies = [
"darling_core", "darling_core",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]
[[package]] [[package]]
@@ -233,7 +227,7 @@ dependencies = [
"proc-macro-error", "proc-macro-error",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]
[[package]] [[package]]
@@ -265,7 +259,7 @@ checksum = "984bc6eca246389726ac2826acc2488ca0fe5fcd6b8d9b48797021951d76a125"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]
[[package]] [[package]]
@@ -331,7 +325,7 @@ dependencies = [
"darling", "darling",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]
[[package]] [[package]]
@@ -559,7 +553,7 @@ checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]
[[package]] [[package]]
@@ -580,9 +574,9 @@ checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
[[package]] [[package]]
name = "pin-project-lite" name = "pin-project-lite"
version = "0.2.13" version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
[[package]] [[package]]
name = "pin-utils" name = "pin-utils"
@@ -634,9 +628,9 @@ dependencies = [
[[package]] [[package]]
name = "rtcc" name = "rtcc"
version = "0.3.1" version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4fbd0d5bed2b76e27a7ef872568b34072c1af94c277cd52c17a89d54673b3fe" checksum = "95973c3a0274adc4f3c5b70d2b5b85618d6de9559a6737d3293ecae9a2fc0839"
dependencies = [ dependencies = [
"chrono", "chrono",
] ]
@@ -680,7 +674,7 @@ dependencies = [
"proc-macro-error", "proc-macro-error",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]
[[package]] [[package]]
@@ -729,10 +723,8 @@ dependencies = [
[[package]] [[package]]
name = "satrs" name = "satrs"
version = "0.2.0-rc.0" version = "0.2.0-rc.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8cb19cba46a45047ff0879ebfbf9d6ae1c5b2e0e38b2e08760b10a441d4dae6"
dependencies = [ dependencies = [
"cobs 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "cobs",
"crc", "crc",
"delegate", "delegate",
"num-traits", "num-traits",
@@ -747,7 +739,7 @@ dependencies = [
name = "satrs-example-stm32f3-disco" name = "satrs-example-stm32f3-disco"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"cobs 0.2.3 (git+https://github.com/robamu/cobs.rs.git?branch=all_features)", "cobs",
"cortex-m", "cortex-m",
"cortex-m-rt", "cortex-m-rt",
"cortex-m-semihosting", "cortex-m-semihosting",
@@ -767,9 +759,7 @@ dependencies = [
[[package]] [[package]]
name = "satrs-shared" name = "satrs-shared"
version = "0.1.2" version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75a402ba556a7f5eef707035b45e64a3259b09674311e98697f3dd0508a1bf51"
dependencies = [ dependencies = [
"spacepackets", "spacepackets",
] ]
@@ -809,12 +799,12 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]] [[package]]
name = "spacepackets" name = "spacepackets"
version = "0.10.0" version = "0.11.0-rc.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28246ae2451af240c3e3ff3c51363c7b6ad565ca6aa9bad23b8c725687c485e1" checksum = "c2cfd5f9a4c7f10714d21f9bc61f2d176cb7ae092cdd687e7ade2d4e6f7d7125"
dependencies = [ dependencies = [
"chrono",
"crc", "crc",
"defmt",
"delegate", "delegate",
"num-traits", "num-traits",
"num_enum", "num_enum",
@@ -909,9 +899,9 @@ dependencies = [
[[package]] [[package]]
name = "syn" name = "syn"
version = "2.0.53" version = "2.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032" checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -935,7 +925,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]
[[package]] [[package]]
@@ -1001,5 +991,5 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn 2.0.53", "syn 2.0.58",
] ]

View File

@@ -46,8 +46,9 @@ branch = "complete-dma-update-hal"
# path = "../stm32f3-discovery" # path = "../stm32f3-discovery"
[dependencies.satrs] [dependencies.satrs]
version = "0.2.0-rc.0" path = "../satrs"
default-features = false default-features = false
features = ["defmt"]
[dev-dependencies] [dev-dependencies]
defmt-test = "0.3" defmt-test = "0.3"

View File

@@ -103,3 +103,12 @@ After that, you can for example send a ping to the MCU using the following comma
```sh ```sh
./main.py -p /ping ./main.py -p /ping
``` ```
You can configure the blinky frequency using
```sh
./main.py -p /change_blink_freq
```
All these commands will package a PUS telecommand which will be sent to the MCU using the COBS
format as the packet framing format.

View File

@@ -94,6 +94,7 @@ class SatRsConfigHook(HookBase):
def create_cmd_definition_tree() -> CmdTreeNode: def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node() root_node = CmdTreeNode.root_node()
root_node.add_child(CmdTreeNode("ping", "Send PUS ping TC")) root_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
root_node.add_child(CmdTreeNode("change_blink_freq", "Change blink frequency"))
return root_node return root_node
@@ -215,6 +216,25 @@ class TcHandler(TcHandlerBase):
if cmd_path == "/ping": if cmd_path == "/ping":
q.add_log_cmd("Sending PUS ping telecommand") q.add_log_cmd("Sending PUS ping telecommand")
q.add_pus_tc(PusTelecommand(service=17, subservice=1)) q.add_pus_tc(PusTelecommand(service=17, subservice=1))
if cmd_path == "/change_blink_freq":
self.create_change_blink_freq_command(q)
def create_change_blink_freq_command(self, q: DefaultPusQueueHelper):
q.add_log_cmd("Changing blink frequency")
while True:
blink_freq = int(
input(
"Please specify new blink frequency in ms. Valid Range [2..10000]: "
)
)
if blink_freq < 2 or blink_freq > 10000:
print(
"Invalid blink frequency. Please specify a value between 2 and 10000."
)
continue
break
app_data = struct.pack("!I", blink_freq)
q.add_pus_tc(PusTelecommand(service=8, subservice=1, app_data=app_data))
def main(): def main():

View File

@@ -1,5 +1,13 @@
#![no_std] #![no_std]
#![no_main] #![no_main]
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReportCreator, VerificationToken,
};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::ecss::EcssEnumU16;
use satrs::spacepackets::CcsdsPacket;
use satrs::spacepackets::{ByteConversionError, SpHeader};
// global logger + panicking-behavior + memory layout // global logger + panicking-behavior + memory layout
use satrs_example_stm32f3_disco as _; use satrs_example_stm32f3_disco as _;
@@ -7,21 +15,17 @@ use rtic::app;
use heapless::{mpmc::Q8, Vec}; use heapless::{mpmc::Q8, Vec};
#[allow(unused_imports)] #[allow(unused_imports)]
use rtic_monotonics::systick::fugit::TimerInstantU32; use rtic_monotonics::systick::fugit::{MillisDurationU32, TimerInstantU32};
use rtic_monotonics::systick::ExtU32; use rtic_monotonics::systick::ExtU32;
use satrs::seq_count::SequenceCountProviderCore; use satrs::seq_count::SequenceCountProviderCore;
use satrs::{ use satrs::spacepackets::{ecss::PusPacket, ecss::WritablePusPacket};
pool::StoreError,
pus::{EcssChannel, EcssTmSenderCore, EcssTmtcError, PusTmWrapper},
spacepackets::{ecss::PusPacket, ecss::WritablePusPacket},
};
use stm32f3xx_hal::dma::dma1; use stm32f3xx_hal::dma::dma1;
use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3}; use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3};
use stm32f3xx_hal::pac::USART2; use stm32f3xx_hal::pac::USART2;
use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent}; use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent};
const UART_BAUD: u32 = 115200; const UART_BAUD: u32 = 115200;
const BLINK_FREQ_MS: u32 = 1000; const DEFAULT_BLINK_FREQ_MS: u32 = 1000;
const TX_HANDLER_FREQ_MS: u32 = 20; const TX_HANDLER_FREQ_MS: u32 = 20;
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u32 = 5; const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u32 = 5;
const MAX_TC_LEN: usize = 128; const MAX_TC_LEN: usize = 128;
@@ -54,7 +58,6 @@ type TcPacket = Vec<u8, MAX_TC_LEN>;
static TM_REQUESTS: Q8<TmPacket> = Q8::new(); static TM_REQUESTS: Q8<TmPacket> = Q8::new();
use core::cell::RefCell;
use core::sync::atomic::{AtomicU16, Ordering}; use core::sync::atomic::{AtomicU16, Ordering};
pub struct SeqCountProviderAtomicRef { pub struct SeqCountProviderAtomicRef {
@@ -93,56 +96,45 @@ pub struct TxIdle {
dma_channel: dma1::C7, dma_channel: dma1::C7,
} }
pub struct TmSender { #[derive(Debug, defmt::Format)]
vec: Option<RefCell<Vec<u8, MAX_TM_LEN>>>, pub enum TmSendError {
ByteConversion(ByteConversionError),
Queue,
} }
impl TmSender { impl From<ByteConversionError> for TmSendError {
pub fn new(tm_packet: TmPacket) -> Self { fn from(value: ByteConversionError) -> Self {
Self { Self::ByteConversion(value)
vec: Some(RefCell::new(tm_packet)),
}
} }
} }
impl EcssChannel for TmSender { fn send_tm(tm_creator: PusTmCreator) -> Result<(), TmSendError> {
fn id(&self) -> satrs::ChannelId { if tm_creator.len_written() > MAX_TM_LEN {
0 return Err(ByteConversionError::ToSliceTooSmall {
expected: tm_creator.len_written(),
found: MAX_TM_LEN,
}
.into());
} }
let mut tm_vec = TmPacket::new();
tm_vec
.resize(tm_creator.len_written(), 0)
.expect("vec resize failed");
tm_creator.write_to_bytes(tm_vec.as_mut_slice())?;
defmt::info!(
"Sending TM[{},{}] with size {}",
tm_creator.service(),
tm_creator.subservice(),
tm_creator.len_written()
);
TM_REQUESTS
.enqueue(tm_vec)
.map_err(|_| TmSendError::Queue)?;
Ok(())
} }
impl EcssTmSenderCore for TmSender { fn handle_tm_send_error(error: TmSendError) {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { defmt::warn!("sending tm failed with error {}", error);
let vec = self.vec.as_ref();
if vec.is_none() {
panic!("send_tm should only be called once");
}
let vec_ref = vec.unwrap();
let mut vec = vec_ref.borrow_mut();
match tm {
PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
PusTmWrapper::Direct(tm) => {
if tm.len_written() > MAX_TM_LEN {
return Err(EcssTmtcError::Store(StoreError::DataTooLarge(
tm.len_written(),
)));
}
vec.resize(tm.len_written(), 0).expect("vec resize failed");
tm.write_to_bytes(vec.as_mut_slice())?;
defmt::info!(
"Sending TM[{},{}] with size {}",
tm.service(),
tm.subservice(),
tm.len_written()
);
drop(vec);
TM_REQUESTS
.enqueue(vec_ref.take())
.map_err(|_| EcssTmtcError::Store(StoreError::StoreFull(0)))?;
}
}
Ok(())
}
} }
pub enum UartTxState { pub enum UartTxState {
@@ -157,18 +149,106 @@ pub struct UartTxShared {
state: UartTxState, state: UartTxState,
} }
pub struct RequestWithToken {
token: VerificationToken<TcStateAccepted>,
request: Request,
}
#[derive(Debug, defmt::Format)]
pub enum Request {
Ping,
ChangeBlinkFrequency(u32),
}
#[derive(Debug, defmt::Format)]
pub enum RequestError {
InvalidApid = 1,
InvalidService = 2,
InvalidSubservice = 3,
NotEnoughAppData = 4,
}
pub fn convert_pus_tc_to_request(
tc: &PusTcReader,
verif_reporter: &mut VerificationReportCreator,
src_data_buf: &mut [u8],
timestamp: &[u8],
) -> Result<RequestWithToken, RequestError> {
defmt::info!(
"Found PUS TC [{},{}] with length {}",
tc.service(),
tc.subservice(),
tc.len_packed()
);
let token = verif_reporter.add_tc(tc);
if tc.apid() != PUS_APID {
defmt::warn!("Received tc with unknown APID {}", tc.apid());
let result = send_tm(
verif_reporter
.acceptance_failure(
src_data_buf,
token,
SEQ_COUNT_PROVIDER.get_and_increment(),
0,
FailParams::new(timestamp, &EcssEnumU16::new(0), &[]),
)
.unwrap(),
);
if let Err(e) = result {
handle_tm_send_error(e);
}
return Err(RequestError::InvalidApid);
}
let (tm_creator, accepted_token) = verif_reporter
.acceptance_success(
src_data_buf,
token,
SEQ_COUNT_PROVIDER.get_and_increment(),
0,
timestamp,
)
.unwrap();
if let Err(e) = send_tm(tm_creator) {
handle_tm_send_error(e);
}
if tc.service() == 17 && tc.subservice() == 1 {
if tc.subservice() == 1 {
return Ok(RequestWithToken {
request: Request::Ping,
token: accepted_token,
});
} else {
return Err(RequestError::InvalidSubservice);
}
} else if tc.service() == 8 {
if tc.subservice() == 1 {
if tc.user_data().len() < 4 {
return Err(RequestError::NotEnoughAppData);
}
let new_freq_ms = u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap());
return Ok(RequestWithToken {
request: Request::ChangeBlinkFrequency(new_freq_ms),
token: accepted_token,
});
} else {
return Err(RequestError::InvalidSubservice);
}
} else {
return Err(RequestError::InvalidService);
}
}
#[app(device = stm32f3xx_hal::pac, peripherals = true)] #[app(device = stm32f3xx_hal::pac, peripherals = true)]
mod app { mod app {
use super::*; use super::*;
use core::slice::Iter; use core::slice::Iter;
use rtic_monotonics::systick::Systick; use rtic_monotonics::systick::Systick;
use rtic_monotonics::Monotonic; use rtic_monotonics::Monotonic;
use satrs::pus::verification::FailParams; use satrs::pus::verification::{TcStateStarted, VerificationReportCreator};
use satrs::pus::verification::VerificationReporterCore; use satrs::spacepackets::{ecss::tc::PusTcReader, time::cds::P_FIELD_BASE};
use satrs::spacepackets::{
ecss::tc::PusTcReader, ecss::tm::PusTmCreator, ecss::tm::PusTmSecondaryHeader,
ecss::EcssEnumU16, time::cds::P_FIELD_BASE, CcsdsPacket, SpHeader,
};
#[allow(unused_imports)] #[allow(unused_imports)]
use stm32f3_discovery::leds::Direction; use stm32f3_discovery::leds::Direction;
use stm32f3_discovery::leds::Leds; use stm32f3_discovery::leds::Leds;
@@ -181,15 +261,16 @@ mod app {
#[shared] #[shared]
struct Shared { struct Shared {
blink_freq: MillisDurationU32,
tx_shared: UartTxShared, tx_shared: UartTxShared,
rx_transfer: Option<RxDmaTransferType>, rx_transfer: Option<RxDmaTransferType>,
} }
#[local] #[local]
struct Local { struct Local {
verif_reporter: VerificationReportCreator,
leds: Leds, leds: Leds,
last_dir: Direction, last_dir: Direction,
verif_reporter: VerificationReporterCore,
curr_dir: Iter<'static, Direction>, curr_dir: Iter<'static, Direction>,
} }
@@ -215,8 +296,6 @@ mod app {
defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery"); defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery");
let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb); let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb);
let verif_reporter = VerificationReporterCore::new(PUS_APID).unwrap();
let leds = Leds::new( let leds = Leds::new(
gpioe.pe8, gpioe.pe8,
gpioe.pe9, gpioe.pe9,
@@ -265,8 +344,12 @@ mod app {
defmt::info!("Spawning tasks"); defmt::info!("Spawning tasks");
blink::spawn().unwrap(); blink::spawn().unwrap();
serial_tx_handler::spawn().unwrap(); serial_tx_handler::spawn().unwrap();
let verif_reporter = VerificationReportCreator::new(PUS_APID).unwrap();
( (
Shared { Shared {
blink_freq: MillisDurationU32::from_ticks(DEFAULT_BLINK_FREQ_MS),
tx_shared: UartTxShared { tx_shared: UartTxShared {
last_completed: None, last_completed: None,
state: UartTxState::Idle(Some(TxIdle { state: UartTxState::Idle(Some(TxIdle {
@@ -277,17 +360,16 @@ mod app {
rx_transfer: Some(rx_transfer), rx_transfer: Some(rx_transfer),
}, },
Local { Local {
//timer: mono_timer, verif_reporter,
leds, leds,
last_dir: Direction::North, last_dir: Direction::North,
curr_dir: Direction::iter(), curr_dir: Direction::iter(),
verif_reporter,
}, },
) )
} }
#[task(local = [leds, curr_dir, last_dir])] #[task(local = [leds, curr_dir, last_dir], shared=[blink_freq])]
async fn blink(cx: blink::Context) { async fn blink(mut cx: blink::Context) {
let blink::LocalResources { let blink::LocalResources {
leds, leds,
curr_dir, curr_dir,
@@ -311,7 +393,8 @@ mod app {
toggle_leds(curr_dir.next().unwrap()); toggle_leds(curr_dir.next().unwrap());
} }
} }
Systick::delay(BLINK_FREQ_MS.millis()).await; let current_blink_freq = cx.shared.blink_freq.lock(|current| *current);
Systick::delay(current_blink_freq).await;
} }
} }
@@ -386,18 +469,18 @@ mod app {
#[task( #[task(
local = [ local = [
stamp_buf: [u8; 7] = [0; 7], verif_reporter,
decode_buf: [u8; MAX_TC_LEN] = [0; MAX_TC_LEN], decode_buf: [u8; MAX_TC_LEN] = [0; MAX_TC_LEN],
src_data_buf: [u8; MAX_TM_LEN] = [0; MAX_TM_LEN], src_data_buf: [u8; MAX_TM_LEN] = [0; MAX_TM_LEN],
verif_reporter timestamp: [u8; 7] = [0; 7],
], ],
shared = [blink_freq]
)] )]
async fn serial_rx_handler( async fn serial_rx_handler(
cx: serial_rx_handler::Context, mut cx: serial_rx_handler::Context,
received_packet: Vec<u8, MAX_TC_LEN>, received_packet: Vec<u8, MAX_TC_LEN>,
) { ) {
defmt::info!("running rx handler"); cx.local.timestamp[0] = P_FIELD_BASE;
cx.local.stamp_buf[0] = P_FIELD_BASE;
defmt::info!("Received packet with {} bytes", received_packet.len()); defmt::info!("Received packet with {} bytes", received_packet.len());
let decode_buf = cx.local.decode_buf; let decode_buf = cx.local.decode_buf;
let packet = received_packet.as_slice(); let packet = received_packet.as_slice();
@@ -417,18 +500,49 @@ mod app {
Ok(len) => { Ok(len) => {
defmt::info!("Decoded packet length: {}", len); defmt::info!("Decoded packet length: {}", len);
let pus_tc = PusTcReader::new(decode_buf); let pus_tc = PusTcReader::new(decode_buf);
let verif_reporter = cx.local.verif_reporter;
match pus_tc { match pus_tc {
Ok((tc, tc_len)) => handle_tc( Ok((tc, _tc_len)) => {
tc, match convert_pus_tc_to_request(
tc_len, &tc,
verif_reporter, cx.local.verif_reporter,
cx.local.src_data_buf, cx.local.src_data_buf,
cx.local.stamp_buf, cx.local.timestamp,
), ) {
Err(_e) => { Ok(request_with_token) => {
// TODO: Print error after API rework. let started_token = handle_start_verification(
defmt::warn!("Error unpacking PUS TC"); request_with_token.token,
cx.local.verif_reporter,
cx.local.src_data_buf,
cx.local.timestamp,
);
match request_with_token.request {
Request::Ping => {
handle_ping_request(cx.local.timestamp);
}
Request::ChangeBlinkFrequency(new_freq_ms) => {
defmt::info!("Received blink frequency change request with new frequncy {}", new_freq_ms);
cx.shared.blink_freq.lock(|blink_freq| {
*blink_freq =
MillisDurationU32::from_ticks(new_freq_ms);
});
}
}
handle_completion_verification(
started_token,
cx.local.verif_reporter,
cx.local.src_data_buf,
cx.local.timestamp,
);
}
Err(e) => {
// TODO: Error handling: Send verification failure based on request error.
defmt::warn!("request error {}", e);
}
}
}
Err(e) => {
defmt::warn!("Error unpacking PUS TC: {}", e);
} }
} }
} }
@@ -438,104 +552,64 @@ mod app {
} }
} }
fn handle_tc( fn handle_ping_request(timestamp: &[u8]) {
tc: PusTcReader, defmt::info!("Received PUS ping telecommand, sending ping reply TM[17,2]");
tc_len: usize, let sp_header =
verif_reporter: &mut VerificationReporterCore, SpHeader::new_for_unseg_tc(PUS_APID, SEQ_COUNT_PROVIDER.get_and_increment(), 0);
src_data_buf: &mut [u8; MAX_TM_LEN], let sec_header = PusTmSecondaryHeader::new_simple(17, 2, timestamp);
stamp_buf: &[u8; 7], let ping_reply = PusTmCreator::new(sp_header, sec_header, &[], true);
) { let mut tm_packet = TmPacket::new();
defmt::info!( tm_packet
"Found PUS TC [{},{}] with length {}", .resize(ping_reply.len_written(), 0)
tc.service(), .expect("vec resize failed");
tc.subservice(), ping_reply.write_to_bytes(&mut tm_packet).unwrap();
tc_len if TM_REQUESTS.enqueue(tm_packet).is_err() {
); defmt::warn!("TC queue full");
let token = verif_reporter.add_tc(&tc);
if tc.apid() != PUS_APID {
defmt::warn!("Received tc with unknown APID {}", tc.apid());
let sendable = verif_reporter
.acceptance_failure(
src_data_buf,
token,
SEQ_COUNT_PROVIDER.get(),
0,
FailParams::new(stamp_buf, &EcssEnumU16::new(0), &[]),
)
.unwrap();
let sender = TmSender::new(TmPacket::new());
if let Err(_e) = verif_reporter.send_acceptance_failure(sendable, &sender) {
defmt::warn!("Sending acceptance failure failed");
};
return; return;
} }
let sendable = verif_reporter }
.acceptance_success(src_data_buf, token, SEQ_COUNT_PROVIDER.get(), 0, stamp_buf)
fn handle_start_verification(
accepted_token: VerificationToken<TcStateAccepted>,
verif_reporter: &mut VerificationReportCreator,
src_data_buf: &mut [u8],
timestamp: &[u8],
) -> VerificationToken<TcStateStarted> {
let (tm_creator, started_token) = verif_reporter
.start_success(
src_data_buf,
accepted_token,
SEQ_COUNT_PROVIDER.get(),
0,
&timestamp,
)
.unwrap(); .unwrap();
let result = send_tm(tm_creator);
if let Err(e) = result {
handle_tm_send_error(e);
}
started_token
}
let sender = TmSender::new(TmPacket::new()); fn handle_completion_verification(
let accepted_token = match verif_reporter.send_acceptance_success(sendable, &sender) { started_token: VerificationToken<TcStateStarted>,
Ok(token) => token, verif_reporter: &mut VerificationReportCreator,
Err(_e) => { src_data_buf: &mut [u8],
// TODO: Print error as soon as EcssTmtcError has Format attr.. or rework API. timestamp: &[u8],
defmt::warn!("Sending acceptance success failed"); ) {
return; let result = send_tm(
} verif_reporter
}; .completion_success(
src_data_buf,
if tc.service() == 17 { started_token,
if tc.subservice() == 1 { SEQ_COUNT_PROVIDER.get(),
let sendable = verif_reporter 0,
.start_success( timestamp,
src_data_buf, )
accepted_token, .unwrap(),
SEQ_COUNT_PROVIDER.get(), );
0, if let Err(e) = result {
stamp_buf, handle_tm_send_error(e);
)
.unwrap();
// let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let sender = TmSender::new(TmPacket::new());
let started_token = match verif_reporter.send_start_success(sendable, &sender) {
Ok(token) => token,
Err(_e) => {
// TODO: Print error as soon as EcssTmtcError has Format attr.. or rework API.
defmt::warn!("Sending acceptance success failed");
return;
}
};
defmt::info!("Received PUS ping telecommand, sending ping reply TM[17,2]");
let mut sp_header =
SpHeader::tc_unseg(PUS_APID, SEQ_COUNT_PROVIDER.get(), 0).unwrap();
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, stamp_buf);
let ping_reply = PusTmCreator::new(&mut sp_header, sec_header, &[], true);
let mut tm_packet = TmPacket::new();
tm_packet
.resize(ping_reply.len_written(), 0)
.expect("vec resize failed");
ping_reply.write_to_bytes(&mut tm_packet).unwrap();
if TM_REQUESTS.enqueue(tm_packet).is_err() {
defmt::warn!("TC queue full");
return;
}
SEQ_COUNT_PROVIDER.increment();
let sendable = verif_reporter
.completion_success(
src_data_buf,
started_token,
SEQ_COUNT_PROVIDER.get(),
0,
stamp_buf,
)
.unwrap();
let sender = TmSender::new(TmPacket::new());
if let Err(_e) = verif_reporter.send_step_or_completion_success(sendable, &sender) {
defmt::warn!("Sending completion success failed");
}
} else {
// TODO: Invalid subservice
}
} }
} }

View File

@@ -17,6 +17,8 @@ zerocopy = "0.6"
csv = "1" csv = "1"
num_enum = "0.7" num_enum = "0.7"
thiserror = "1" thiserror = "1"
lazy_static = "1"
strum = { version = "0.26", features = ["derive"] }
derive-new = "0.5" derive-new = "0.5"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"

View File

@@ -4,11 +4,13 @@ import dataclasses
import enum import enum
import struct import struct
from spacepackets.ecss.tc import PacketId, PacketType
EXAMPLE_PUS_APID = 0x02 class Apid(enum.IntEnum):
EXAMPLE_PUS_PACKET_ID_TM = PacketId(PacketType.TM, True, EXAMPLE_PUS_APID) SCHED = 1
TM_PACKET_IDS = [EXAMPLE_PUS_PACKET_ID_TM] GENERIC_PUS = 2
ACS = 3
CFDP = 4
TMTC = 5
class EventSeverity(enum.IntEnum): class EventSeverity(enum.IntEnum):
@@ -36,8 +38,8 @@ class EventU32:
) )
class RequestTargetId(enum.IntEnum): class AcsId(enum.IntEnum):
ACS = 1 MGM_0 = 0
class AcsHkIds(enum.IntEnum): class AcsHkIds(enum.IntEnum):

View File

@@ -3,10 +3,11 @@
import logging import logging
import sys import sys
import time import time
from typing import Optional from typing import Any, Optional
from prompt_toolkit.history import History from prompt_toolkit.history import History
from prompt_toolkit.history import FileHistory from prompt_toolkit.history import FileHistory
from spacepackets.ccsds import PacketId, PacketType
import tmtccmd import tmtccmd
from spacepackets.ecss import PusTelemetry, PusVerificator from spacepackets.ecss import PusTelemetry, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm from spacepackets.ecss.pus_17_test import Service17Tm
@@ -16,7 +17,7 @@ from spacepackets.ccsds.time import CdsShortTimestamp
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest from tmtccmd.core.base import BackendRequest
from tmtccmd.pus import VerificationWrapper from tmtccmd.pus import VerificationWrapper
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase from tmtccmd.tmtc import CcsdsTmHandler, GenericApidHandlerBase
from tmtccmd.com import ComInterface from tmtccmd.com import ComInterface
from tmtccmd.config import ( from tmtccmd.config import (
CmdTreeNode, CmdTreeNode,
@@ -46,7 +47,7 @@ from tmtccmd.util.obj_id import ObjectIdDictT
import pus_tc import pus_tc
from common import EXAMPLE_PUS_APID, TM_PACKET_IDS, EventU32 from common import Apid, EventU32
_LOGGER = logging.getLogger() _LOGGER = logging.getLogger()
@@ -62,10 +63,13 @@ class SatRsConfigHook(HookBase):
) )
assert self.cfg_path is not None assert self.cfg_path is not None
packet_id_list = []
for apid in Apid:
packet_id_list.append(PacketId(PacketType.TM, True, apid))
cfg = create_com_interface_cfg_default( cfg = create_com_interface_cfg_default(
com_if_key=com_if_key, com_if_key=com_if_key,
json_cfg_path=self.cfg_path, json_cfg_path=self.cfg_path,
space_packet_ids=TM_PACKET_IDS, space_packet_ids=packet_id_list,
) )
assert cfg is not None assert cfg is not None
return create_com_interface_default(cfg) return create_com_interface_default(cfg)
@@ -85,21 +89,23 @@ class SatRsConfigHook(HookBase):
return get_core_object_ids() return get_core_object_ids()
class PusHandler(SpecificApidHandlerBase): class PusHandler(GenericApidHandlerBase):
def __init__( def __init__(
self, self,
file_logger: logging.Logger, file_logger: logging.Logger,
verif_wrapper: VerificationWrapper, verif_wrapper: VerificationWrapper,
raw_logger: RawTmtcTimedLogWrapper, raw_logger: RawTmtcTimedLogWrapper,
): ):
super().__init__(EXAMPLE_PUS_APID, None) super().__init__(None)
self.file_logger = file_logger self.file_logger = file_logger
self.raw_logger = raw_logger self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: any): def handle_tm(self, apid: int, packet: bytes, _user_args: Any):
try: try:
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty()) pus_tm = PusTelemetry.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
except ValueError as e: except ValueError as e:
_LOGGER.warning("Could not generate PUS TM object from raw data") _LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}") _LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
@@ -107,7 +113,7 @@ class PusHandler(SpecificApidHandlerBase):
service = pus_tm.service service = pus_tm.service
if service == 1: if service == 1:
tm_packet = Service1Tm.unpack( tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.empty(), 1, 2) data=packet, params=UnpackParams(CdsShortTimestamp.TIMESTAMP_SIZE, 1, 2)
) )
res = self.verif_wrapper.add_tm(tm_packet) res = self.verif_wrapper.add_tm(tm_packet)
if res is None: if res is None:
@@ -124,7 +130,9 @@ class PusHandler(SpecificApidHandlerBase):
elif service == 3: elif service == 3:
_LOGGER.info("No handling for HK packets implemented") _LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]") _LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty()) pus_tm = PusTelemetry.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
if pus_tm.subservice == 25: if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8: if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet") raise ValueError("No addressable ID in HK packet")
@@ -132,16 +140,18 @@ class PusHandler(SpecificApidHandlerBase):
_LOGGER.info(json_str) _LOGGER.info(json_str)
elif service == 5: elif service == 5:
tm_packet = PusTelemetry.unpack( tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty() packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
) )
src_data = tm_packet.source_data src_data = tm_packet.source_data
event_u32 = EventU32.unpack(src_data) event_u32 = EventU32.unpack(src_data)
_LOGGER.info(f"Received event packet. Event: {event_u32}") _LOGGER.info(
f"Received event packet. Source APID: {Apid(tm_packet.apid)!r}, Event: {event_u32}"
)
if event_u32.group_id == 0 and event_u32.unique_id == 0: if event_u32.group_id == 0 and event_u32.unique_id == 0:
_LOGGER.info("Received test event") _LOGGER.info("Received test event")
elif service == 17: elif service == 17:
tm_packet = Service17Tm.unpack( tm_packet = Service17Tm.unpack(
packet, time_reader=CdsShortTimestamp.empty() packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
) )
if tm_packet.subservice == 2: if tm_packet.subservice == 2:
self.file_logger.info("Received Ping Reply TM[17,2]") self.file_logger.info("Received Ping Reply TM[17,2]")
@@ -158,7 +168,7 @@ class PusHandler(SpecificApidHandlerBase):
f"The service {service} is not implemented in Telemetry Factory" f"The service {service} is not implemented in Telemetry Factory"
) )
tm_packet = PusTelemetry.unpack( tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty() packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
) )
self.raw_logger.log_tm(pus_tm) self.raw_logger.log_tm(pus_tm)
@@ -177,7 +187,7 @@ class TcHandler(TcHandlerBase):
tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE, tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE,
seq_cnt_provider=seq_count_provider, seq_cnt_provider=seq_count_provider,
pus_verificator=self.verif_wrapper.pus_verificator, pus_verificator=self.verif_wrapper.pus_verificator,
default_pus_apid=EXAMPLE_PUS_APID, default_pus_apid=None,
) )
def send_cb(self, send_params: SendCbParams): def send_cb(self, send_params: SendCbParams):
@@ -193,15 +203,15 @@ class TcHandler(TcHandlerBase):
_LOGGER.info(log_entry.log_str) _LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, info: ProcedureWrapper): def queue_finished_cb(self, info: ProcedureWrapper):
if info.proc_type == TcProcedureType.DEFAULT: if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_def_procedure() def_proc = info.to_tree_commanding_procedure()
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}") _LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper): def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper q.queue_wrapper = wrapper.queue_wrapper
if info.proc_type == TcProcedureType.DEFAULT: if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_def_procedure() def_proc = info.to_tree_commanding_procedure()
assert def_proc.cmd_path is not None assert def_proc.cmd_path is not None
pus_tc.pack_pus_telecommands(q, def_proc.cmd_path) pus_tc.pack_pus_telecommands(q, def_proc.cmd_path)
@@ -221,7 +231,6 @@ def main():
post_args_wrapper.set_params_without_prompts(proc_wrapper) post_args_wrapper.set_params_without_prompts(proc_wrapper)
else: else:
post_args_wrapper.set_params_with_prompts(proc_wrapper) post_args_wrapper.set_params_with_prompts(proc_wrapper)
params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper( setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
) )
@@ -233,8 +242,9 @@ def main():
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger) verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler # Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger) tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None) ccsds_handler = CcsdsTmHandler(generic_handler=tm_handler)
ccsds_handler.add_apid_handler(tm_handler) # TODO: We could add the CFDP handler for the CFDP APID at a later stage.
# ccsds_handler.add_apid_handler(tm_handler)
# Create TC handler # Create TC handler
seq_count_provider = PusFileSeqCountProvider() seq_count_provider = PusFileSeqCountProvider()
@@ -252,6 +262,7 @@ def main():
while True: while True:
state = tmtc_backend.periodic_op(None) state = tmtc_backend.periodic_op(None)
if state.request == BackendRequest.TERMINATION_NO_ERROR: if state.request == BackendRequest.TERMINATION_NO_ERROR:
tmtc_backend.close_com_if()
sys.exit(0) sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE: elif state.request == BackendRequest.DELAY_IDLE:
_LOGGER.info("TMTC Client in IDLE mode") _LOGGER.info("TMTC Client in IDLE mode")
@@ -266,6 +277,7 @@ def main():
elif state.request == BackendRequest.CALL_NEXT: elif state.request == BackendRequest.CALL_NEXT:
pass pass
except KeyboardInterrupt: except KeyboardInterrupt:
tmtc_backend.close_com_if()
sys.exit(0) sys.exit(0)

View File

@@ -0,0 +1,143 @@
import datetime
import struct
import logging
from spacepackets.ccsds import CdsShortTimestamp
from spacepackets.ecss import PusTelecommand
from tmtccmd.config import CmdTreeNode
from tmtccmd.pus.tc.s200_fsfw_mode import Mode
from tmtccmd.tmtc import DefaultPusQueueHelper
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from tmtccmd.pus.s200_fsfw_mode import Subservice as ModeSubservice
from common import AcsId, Apid
_LOGGER = logging.getLogger(__name__)
def create_set_mode_cmd(
apid: int, unique_id: int, mode: int, submode: int
) -> PusTelecommand:
app_data = bytearray()
app_data.extend(struct.pack("!I", unique_id))
app_data.extend(struct.pack("!I", mode))
app_data.extend(struct.pack("!H", submode))
return PusTelecommand(
service=200,
subservice=ModeSubservice.TC_MODE_COMMAND,
apid=apid,
app_data=app_data,
)
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
hk_node = CmdTreeNode("hk", "Housekeeping Node", hide_children_for_print=True)
hk_node.add_child(CmdTreeNode("one_shot_hk", "Request One Shot HK set"))
hk_node.add_child(
CmdTreeNode("enable", "Enable periodic housekeeping data generation")
)
hk_node.add_child(
CmdTreeNode("disable", "Disable periodic housekeeping data generation")
)
mode_node = CmdTreeNode("mode", "Mode Node", hide_children_for_print=True)
set_mode_node = CmdTreeNode(
"set_mode", "Set Node", hide_children_which_are_leaves=True
)
set_mode_node.add_child(CmdTreeNode("off", "Set OFF Mode"))
set_mode_node.add_child(CmdTreeNode("on", "Set ON Mode"))
set_mode_node.add_child(CmdTreeNode("normal", "Set NORMAL Mode"))
mode_node.add_child(set_mode_node)
mode_node.add_child(CmdTreeNode("read_mode", "Read Mode"))
test_node = CmdTreeNode("test", "Test Node")
test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event"))
root_node.add_child(test_node)
scheduler_node = CmdTreeNode("scheduler", "Scheduler Node")
scheduler_node.add_child(
CmdTreeNode(
"schedule_ping_10_secs_ahead", "Schedule Ping to execute in 10 seconds"
)
)
root_node.add_child(scheduler_node)
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(mode_node)
mgm_node.add_child(hk_node)
acs_node.add_child(mgm_node)
root_node.add_child(acs_node)
return root_node
def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
# It should always be at least the root path "/", so we split of the empty portion left of it.
cmd_path_list = cmd_path.split("/")[1:]
if len(cmd_path_list) == 0:
_LOGGER.warning("empty command path")
return
if cmd_path_list[0] == "test":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "ping":
q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=1)
)
elif cmd_path_list[1] == "trigger_event":
q.add_log_cmd("Triggering test event")
return q.add_pus_tc(
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=128)
)
if cmd_path_list[0] == "scheduler":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
q.add_log_cmd("Sending PUS scheduled TC telecommand")
crt_time = CdsShortTimestamp.from_now()
time_stamp = crt_time + datetime.timedelta(seconds=10)
time_stamp = time_stamp.pack()
return q.add_pus_tc(
create_time_tagged_cmd(
time_stamp,
PusTelecommand(service=17, subservice=1),
apid=Apid.SCHED,
)
)
if cmd_path_list[0] == "acs":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "mgms":
assert len(cmd_path_list) >= 3
if cmd_path_list[2] == "hk":
if cmd_path_list[3] == "one_shot_hk":
q.add_log_cmd("Sending HK one shot request")
# TODO: Fix
# q.add_pus_tc(
# create_request_one_hk_command(
# make_addressable_id(Apid.ACS, AcsId.MGM_SET)
# )
# )
if cmd_path_list[2] == "mode":
if cmd_path_list[3] == "set_mode":
handle_set_mode_cmd(
q, "MGM 0", cmd_path_list[4], Apid.ACS, AcsId.MGM_0
)
def handle_set_mode_cmd(
q: DefaultPusQueueHelper, target_str: str, mode_str: str, apid: int, unique_id: int
):
if mode_str == "off":
q.add_log_cmd(f"Sending Mode OFF to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.OFF, 0))
elif mode_str == "on":
q.add_log_cmd(f"Sending Mode ON to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.ON, 0))
elif mode_str == "normal":
q.add_log_cmd(f"Sending Mode NORMAL to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.NORMAL, 0))

View File

@@ -1,2 +1,2 @@
tmtccmd == 8.0.0rc1 tmtccmd == 8.0.0rc2
# -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd # -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd

View File

@@ -1,85 +0,0 @@
import datetime
import logging
from spacepackets.ccsds import CdsShortTimestamp
from spacepackets.ecss import PusTelecommand
from tmtccmd.config import CmdTreeNode
from tmtccmd.tmtc import DefaultPusQueueHelper
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from tmtccmd.pus.tc.s3_fsfw_hk import create_request_one_hk_command
from common import (
EXAMPLE_PUS_APID,
make_addressable_id,
RequestTargetId,
AcsHkIds,
)
_LOGGER = logging.getLogger(__name__)
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
test_node = CmdTreeNode("test", "Test Node")
test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event"))
root_node.add_child(test_node)
scheduler_node = CmdTreeNode("scheduler", "Scheduler Node")
scheduler_node.add_child(
CmdTreeNode(
"schedule_ping_10_secs_ahead", "Schedule Ping to execute in 10 seconds"
)
)
root_node.add_child(scheduler_node)
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(CmdTreeNode("one_shot_hk", "Request one shot HK"))
acs_node.add_child(mgm_node)
root_node.add_child(acs_node)
return root_node
def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
# It should always be at least the root path "/", so we split of the empty portion left of it.
cmd_path_list = cmd_path.split("/")[1:]
if len(cmd_path_list) == 0:
_LOGGER.warning("empty command path")
return
if cmd_path_list[0] == "test":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "ping":
q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
elif cmd_path_list[1] == "trigger_event":
q.add_log_cmd("Triggering test event")
return q.add_pus_tc(PusTelecommand(service=17, subservice=128))
if cmd_path_list[0] == "scheduler":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
q.add_log_cmd("Sending PUS scheduled TC telecommand")
crt_time = CdsShortTimestamp.from_now()
time_stamp = crt_time + datetime.timedelta(seconds=10)
time_stamp = time_stamp.pack()
return q.add_pus_tc(
create_time_tagged_cmd(
time_stamp,
PusTelecommand(service=17, subservice=1),
apid=EXAMPLE_PUS_APID,
)
)
if cmd_path_list[0] == "acs":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "mgm":
assert len(cmd_path_list) >= 3
if cmd_path_list[2] == "one_shot_hk":
q.add_log_cmd("Sending HK one shot request")
q.add_pus_tc(
create_request_one_hk_command(
make_addressable_id(RequestTargetId.ACS, AcsHkIds.MGM_SET)
)
)

View File

@@ -1,18 +1,17 @@
// TODO: Remove this at a later stage.
#![allow(dead_code)]
use derive_new::new; use derive_new::new;
use satrs::hk::HkRequestVariant; use satrs::hk::{HkRequest, HkRequestVariant};
use satrs::queue::{GenericSendError, GenericTargetedMessagingError};
use satrs::spacepackets::ecss::hk; use satrs::spacepackets::ecss::hk;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader}; use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::SpHeader; use satrs::spacepackets::SpHeader;
use satrs_example::TimeStampHelper; use satrs_example::{DeviceMode, TimeStampHelper};
use std::sync::mpsc::{self}; use std::sync::mpsc::{self};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use satrs::mode::{ use satrs::mode::{
ModeAndSubmode, ModeError, ModeProvider, ModeReply, ModeRequest, ModeRequestHandler, ModeAndSubmode, ModeError, ModeProvider, ModeReply, ModeRequest, ModeRequestHandler,
}; };
use satrs::pus::{EcssTmSenderCore, PusTmVariant}; use satrs::pus::{EcssTmSender, PusTmVariant};
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId}; use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs_example::config::components::PUS_MODE_SERVICE; use satrs_example::config::components::PUS_MODE_SERVICE;
@@ -21,24 +20,36 @@ use crate::requests::CompositeRequest;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
const GAUSS_TO_MICROTESLA_FACTOR: f32 = 100.0;
// This is the selected resoltion for the STM LIS3MDL device for the 4 Gauss sensitivity setting.
const FIELD_LSB_PER_GAUSS_4_SENS: f32 = 1.0 / 6842.0;
pub trait SpiInterface { pub trait SpiInterface {
type Error; type Error;
fn transfer(&mut self, data: &mut [u8]) -> Result<(), Self::Error>; fn transfer(&mut self, tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error>;
} }
#[derive(Default)] #[derive(Default)]
pub struct SpiDummyInterface {} pub struct SpiDummyInterface {
pub dummy_val_0: i16,
pub dummy_val_1: i16,
pub dummy_val_2: i16,
}
impl SpiInterface for SpiDummyInterface { impl SpiInterface for SpiDummyInterface {
type Error = (); type Error = ();
fn transfer(&mut self, _data: &mut [u8]) -> Result<(), Self::Error> { fn transfer(&mut self, _tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error> {
rx[0..2].copy_from_slice(&self.dummy_val_0.to_be_bytes());
rx[2..4].copy_from_slice(&self.dummy_val_1.to_be_bytes());
rx[4..6].copy_from_slice(&self.dummy_val_2.to_be_bytes());
Ok(()) Ok(())
} }
} }
#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize)] #[derive(Default, Debug, Copy, Clone, Serialize, Deserialize)]
pub struct MgmData { pub struct MgmData {
pub valid: bool,
pub x: f32, pub x: f32,
pub y: f32, pub y: f32,
pub z: f32, pub z: f32,
@@ -50,9 +61,10 @@ pub struct MpscModeLeafInterface {
pub reply_tx_to_parent: mpsc::Sender<GenericMessage<ModeReply>>, pub reply_tx_to_parent: mpsc::Sender<GenericMessage<ModeReply>>,
} }
/// Example MGM device handler strongly based on the LIS3MDL MEMS device.
#[derive(new)] #[derive(new)]
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub struct MgmHandler<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> { pub struct MgmHandlerLis3Mdl<ComInterface: SpiInterface, TmSender: EcssTmSender> {
id: UniqueApidTargetId, id: UniqueApidTargetId,
dev_str: &'static str, dev_str: &'static str,
mode_interface: MpscModeLeafInterface, mode_interface: MpscModeLeafInterface,
@@ -62,84 +74,143 @@ pub struct MgmHandler<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> {
com_interface: ComInterface, com_interface: ComInterface,
shared_mgm_set: Arc<Mutex<MgmData>>, shared_mgm_set: Arc<Mutex<MgmData>>,
#[new(value = "ModeAndSubmode::new(satrs_example::DeviceMode::Off as u32, 0)")] #[new(value = "ModeAndSubmode::new(satrs_example::DeviceMode::Off as u32, 0)")]
mode: ModeAndSubmode, mode_and_submode: ModeAndSubmode,
#[new(default)]
tx_buf: [u8; 12],
#[new(default)]
rx_buf: [u8; 12],
#[new(default)]
tm_buf: [u8; 16],
#[new(default)] #[new(default)]
stamp_helper: TimeStampHelper, stamp_helper: TimeStampHelper,
} }
impl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> MgmHandler<ComInterface, TmSender> { impl<ComInterface: SpiInterface, TmSender: EcssTmSender> MgmHandlerLis3Mdl<ComInterface, TmSender> {
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
self.stamp_helper.update_from_now(); self.stamp_helper.update_from_now();
// Handle messages. // Handle requests.
match self.composite_request_receiver.try_recv() { self.handle_composite_requests();
Ok(ref msg) => match &msg.message { self.handle_mode_requests();
CompositeRequest::Hk(hk_req) => match hk_req.variant { if self.mode() == DeviceMode::Normal as u32 {
HkRequestVariant::OneShot => { log::trace!("polling LIS3MDL sensor {}", self.dev_str);
self.hk_reply_sender // Communicate with the device.
.send(GenericMessage::new( let result = self.com_interface.transfer(&self.tx_buf, &mut self.rx_buf);
msg.requestor_info, assert!(result.is_ok());
HkReply::new(hk_req.unique_id, HkReplyVariant::Ack), // Actual data begins on the second byte, similarly to how a lot of SPI devices behave.
)) let x_raw = i16::from_be_bytes(self.rx_buf[1..3].try_into().unwrap());
.expect("failed to send HK reply"); let y_raw = i16::from_be_bytes(self.rx_buf[3..5].try_into().unwrap());
let mut sp_header = SpHeader::tm_unseg(self.id.apid, 0, 0).unwrap(); let z_raw = i16::from_be_bytes(self.rx_buf[5..7].try_into().unwrap());
let sec_header = PusTmSecondaryHeader::new( // Simple scaling to retrieve the float value, assuming a sensor resolution of
3, let mut mgm_guard = self.shared_mgm_set.lock().unwrap();
hk::Subservice::TmHkPacket as u8, mgm_guard.x = x_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
0, mgm_guard.y = y_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
0, mgm_guard.z = z_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
Some(self.stamp_helper.stamp()), drop(mgm_guard);
);
// Let's serialize it as JSON for now.. This is a lot simpler than binary
// serialization.
let mgm_data_serialized =
serde_json::to_vec(&*self.shared_mgm_set.lock().unwrap()).unwrap();
let hk_tm = PusTmCreator::new(
&mut sp_header,
sec_header,
&mgm_data_serialized,
true,
);
self.tm_sender
.send_tm(self.id.id(), PusTmVariant::Direct(hk_tm))
.expect("failed to send HK TM");
}
HkRequestVariant::EnablePeriodic => todo!(),
HkRequestVariant::DisablePeriodic => todo!(),
HkRequestVariant::ModifyCollectionInterval(_) => todo!(),
},
// TODO: This object does not have actions (yet).. Still send back completion failure
// reply.
CompositeRequest::Action(_action_req) => {}
},
Err(_) => todo!(),
} }
match self.mode_interface.request_rx.try_recv() { }
Ok(msg) => {
let result = self.handle_mode_request(msg); pub fn handle_composite_requests(&mut self) {
// TODO: Trigger event? loop {
if result.is_err() { match self.composite_request_receiver.try_recv() {
log::warn!( Ok(ref msg) => match &msg.message {
"{}: mode request failed with error {:?}", CompositeRequest::Hk(hk_request) => {
self.dev_str, self.handle_hk_request(&msg.requestor_info, hk_request)
result.err().unwrap() }
); // TODO: This object does not have actions (yet).. Still send back completion failure
// reply.
CompositeRequest::Action(_action_req) => {}
},
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::warn!(
"{}: failed to receive composite request: {:?}",
self.dev_str,
e
);
} else {
break;
}
}
}
}
}
pub fn handle_hk_request(&mut self, requestor_info: &MessageMetadata, hk_request: &HkRequest) {
match hk_request.variant {
HkRequestVariant::OneShot => {
self.hk_reply_sender
.send(GenericMessage::new(
*requestor_info,
HkReply::new(hk_request.unique_id, HkReplyVariant::Ack),
))
.expect("failed to send HK reply");
let sec_header = PusTmSecondaryHeader::new(
3,
hk::Subservice::TmHkPacket as u8,
0,
0,
self.stamp_helper.stamp(),
);
let mgm_snapshot = *self.shared_mgm_set.lock().unwrap();
// Use binary serialization here. We want the data to be tightly packed.
self.tm_buf[0] = mgm_snapshot.valid as u8;
self.tm_buf[1..5].copy_from_slice(&mgm_snapshot.x.to_be_bytes());
self.tm_buf[5..9].copy_from_slice(&mgm_snapshot.y.to_be_bytes());
self.tm_buf[9..13].copy_from_slice(&mgm_snapshot.z.to_be_bytes());
let hk_tm = PusTmCreator::new(
SpHeader::new_from_apid(self.id.apid),
sec_header,
&self.tm_buf[0..12],
true,
);
self.tm_sender
.send_tm(self.id.id(), PusTmVariant::Direct(hk_tm))
.expect("failed to send HK TM");
}
HkRequestVariant::EnablePeriodic => todo!(),
HkRequestVariant::DisablePeriodic => todo!(),
HkRequestVariant::ModifyCollectionInterval(_) => todo!(),
}
}
pub fn handle_mode_requests(&mut self) {
loop {
// TODO: Only allow one set mode request per cycle?
match self.mode_interface.request_rx.try_recv() {
Ok(msg) => {
let result = self.handle_mode_request(msg);
// TODO: Trigger event?
if result.is_err() {
log::warn!(
"{}: mode request failed with error {:?}",
self.dev_str,
result.err().unwrap()
);
}
}
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::warn!("{}: failed to receive mode request: {:?}", self.dev_str, e);
} else {
break;
}
} }
} }
Err(_) => todo!(),
} }
} }
} }
impl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> ModeProvider impl<ComInterface: SpiInterface, TmSender: EcssTmSender> ModeProvider
for MgmHandler<ComInterface, TmSender> for MgmHandlerLis3Mdl<ComInterface, TmSender>
{ {
fn mode_and_submode(&self) -> ModeAndSubmode { fn mode_and_submode(&self) -> ModeAndSubmode {
self.mode self.mode_and_submode
} }
} }
impl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> ModeRequestHandler impl<ComInterface: SpiInterface, TmSender: EcssTmSender> ModeRequestHandler
for MgmHandler<ComInterface, TmSender> for MgmHandlerLis3Mdl<ComInterface, TmSender>
{ {
type Error = ModeError; type Error = ModeError;
fn start_transition( fn start_transition(
@@ -147,24 +218,37 @@ impl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> ModeRequestHandler
requestor: MessageMetadata, requestor: MessageMetadata,
mode_and_submode: ModeAndSubmode, mode_and_submode: ModeAndSubmode,
) -> Result<(), satrs::mode::ModeError> { ) -> Result<(), satrs::mode::ModeError> {
self.mode = mode_and_submode; log::info!(
"{}: transitioning to mode {:?}",
self.dev_str,
mode_and_submode
);
self.mode_and_submode = mode_and_submode;
self.handle_mode_reached(Some(requestor))?; self.handle_mode_reached(Some(requestor))?;
Ok(()) Ok(())
} }
fn announce_mode(&self, _requestor_info: MessageMetadata, _recursive: bool) { fn announce_mode(&self, _requestor_info: Option<MessageMetadata>, _recursive: bool) {
log::info!("{} announcing mode: {:?}", self.dev_str, self.mode); log::info!(
"{} announcing mode: {:?}",
self.dev_str,
self.mode_and_submode
);
} }
fn handle_mode_reached( fn handle_mode_reached(
&mut self, &mut self,
requestor: Option<MessageMetadata>, requestor: Option<MessageMetadata>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
self.announce_mode(requestor, false);
if let Some(requestor) = requestor { if let Some(requestor) = requestor {
if requestor.sender_id() == PUS_MODE_SERVICE.raw() { if requestor.sender_id() != PUS_MODE_SERVICE.id() {
// self.mode_reply_sender_to_pus.send( log::warn!(
//GenericMessage::new(requestor.request_id, requestor.sender_id, ModeReply::ModeReply(self.mode)) "can not send back mode reply to sender {}",
// )?; requestor.sender_id()
);
} else {
self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode()))?;
} }
} }
Ok(()) Ok(())
@@ -172,9 +256,19 @@ impl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> ModeRequestHandler
fn send_mode_reply( fn send_mode_reply(
&self, &self,
_requestor: MessageMetadata, requestor: MessageMetadata,
_reply: ModeReply, reply: ModeReply,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
);
}
self.mode_interface
.reply_tx_to_pus
.send(GenericMessage::new(requestor, reply))
.map_err(|_| GenericTargetedMessagingError::Send(GenericSendError::RxDisconnected))?;
Ok(()) Ok(())
} }

View File

@@ -12,8 +12,7 @@ use std::time::Duration;
fn main() { fn main() {
let mut buf = [0; 32]; let mut buf = [0; 32];
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); let pus_tc = PusTcCreator::new_simple(SpHeader::new_from_apid(0x02), 17, 1, &[], true);
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed"); let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed");
let tc_req_id = RequestId::new(&pus_tc); let tc_req_id = RequestId::new(&pus_tc);
println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}"); println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}");

View File

@@ -1,50 +0,0 @@
use satrs::pus::ReceivesEcssPusTc;
use satrs::spacepackets::{CcsdsPacket, SpHeader};
use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc};
use satrs_example::config::components::Apid;
#[derive(Clone)]
pub struct CcsdsReceiver<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone,
E,
> {
pub tc_source: TcSource,
}
impl<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
E: 'static,
> CcsdsPacketHandler for CcsdsReceiver<TcSource, E>
{
type Error = E;
fn valid_apids(&self) -> &'static [u16] {
&[
Apid::GenericPus as u16,
Apid::Acs as u16,
Apid::Sched as u16,
Apid::EventTm as u16,
]
}
fn handle_known_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
if sp_header.apid() == Apid::Cfdp as u16 {
} else {
return self.tc_source.pass_ccsds(sp_header, tc_raw);
}
Ok(())
}
fn handle_unknown_apid(
&mut self,
sp_header: &SpHeader,
_tc_raw: &[u8],
) -> Result<(), Self::Error> {
println!("Unknown APID 0x{:x?} detected", sp_header.apid());
Ok(())
}
}

View File

@@ -1,7 +1,12 @@
use satrs::res_code::ResultU16; use lazy_static::lazy_static;
use satrs::{
res_code::ResultU16,
spacepackets::{PacketId, PacketType},
};
use satrs_mib::res_code::ResultU16Info; use satrs_mib::res_code::ResultU16Info;
use satrs_mib::resultcode; use satrs_mib::resultcode;
use std::net::Ipv4Addr; use std::{collections::HashSet, net::Ipv4Addr};
use strum::IntoEnumIterator;
use num_enum::{IntoPrimitive, TryFromPrimitive}; use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::{ use satrs::{
@@ -33,8 +38,24 @@ pub enum GroupId {
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
pub const SERVER_PORT: u16 = 7301; pub const SERVER_PORT: u16 = 7301;
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(0, 0);
EventU32TypedSev::<SeverityInfo>::const_new(0, 0);
lazy_static! {
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, id as u16));
}
set
};
pub static ref APID_VALIDATOR: HashSet<u16> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(id as u16);
}
set
};
}
pub mod tmtc_err { pub mod tmtc_err {
@@ -102,26 +123,26 @@ pub mod mode_err {
pub mod components { pub mod components {
use satrs::request::UniqueApidTargetId; use satrs::request::UniqueApidTargetId;
use strum::EnumIter;
#[derive(Copy, Clone, PartialEq, Eq)] #[derive(Copy, Clone, PartialEq, Eq, EnumIter)]
pub enum Apid { pub enum Apid {
VerificationTm = 1, Sched = 1,
Sched = 2, GenericPus = 2,
EventTm = 3, Acs = 3,
HkTm = 4, Cfdp = 4,
GenericPus = 5, Tmtc = 5,
Acs = 6,
Cfdp = 7,
} }
// Component IDs for components with the PUS APID. // Component IDs for components with the PUS APID.
#[derive(Copy, Clone, PartialEq, Eq)] #[derive(Copy, Clone, PartialEq, Eq)]
pub enum PusId { pub enum PusId {
PusRouting = 0, PusEventManagement = 0,
PusTest = 1, PusRouting = 1,
PusAction = 2, PusTest = 2,
PusMode = 3, PusAction = 3,
PusHk = 4, PusMode = 4,
PusHk = 5,
} }
#[derive(Copy, Clone, PartialEq, Eq)] #[derive(Copy, Clone, PartialEq, Eq)]
@@ -129,10 +150,16 @@ pub mod components {
Mgm0 = 0, Mgm0 = 0,
} }
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum TmtcId {
UdpServer = 0,
TcpServer = 1,
}
pub const PUS_ACTION_SERVICE: UniqueApidTargetId = pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32); UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32);
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId = pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::EventTm as u16, 0); UniqueApidTargetId::new(Apid::GenericPus as u16, 0);
pub const PUS_ROUTING_SERVICE: UniqueApidTargetId = pub const PUS_ROUTING_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32); UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32);
pub const PUS_TEST_SERVICE: UniqueApidTargetId = pub const PUS_TEST_SERVICE: UniqueApidTargetId =
@@ -145,6 +172,10 @@ pub mod components {
UniqueApidTargetId::new(Apid::Sched as u16, 0); UniqueApidTargetId::new(Apid::Sched as u16, 0);
pub const MGM_HANDLER_0: UniqueApidTargetId = pub const MGM_HANDLER_0: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32); UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
pub const UDP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::UdpServer as u32);
pub const TCP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::TcpServer as u32);
} }
pub mod pool { pub mod pool {

View File

@@ -1,45 +1,52 @@
use std::sync::mpsc::{self}; use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter; use crate::pus::create_verification_reporter;
use satrs::event_man::{EventMessageU32, EventRoutingError};
use satrs::pus::event::EventTmHookProvider;
use satrs::pus::verification::VerificationReporter; use satrs::pus::verification::VerificationReporter;
use satrs::pus::EcssTmSenderCore; use satrs::pus::EcssTmSender;
use satrs::request::UniqueApidTargetId;
use satrs::{ use satrs::{
event_man::{ event_man::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded,
MpscEventReceiver,
},
events::EventU32,
params::Params,
pus::{ pus::{
event_man::{ event_man::{
DefaultPusEventU32Dispatcher, EventReporter, EventRequest, EventRequestWithToken, DefaultPusEventU32TmCreator, EventReporter, EventRequest, EventRequestWithToken,
}, },
verification::{TcStateStarted, VerificationReportingProvider, VerificationToken}, verification::{TcStateStarted, VerificationReportingProvider, VerificationToken},
}, },
spacepackets::time::cds::CdsTime, spacepackets::time::cds::CdsTime,
ComponentId,
}; };
use satrs_example::config::components;
use satrs_example::config::components::PUS_EVENT_MANAGEMENT; use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
use crate::update_time; use crate::update_time;
// This helper sets the APID of the event sender for the PUS telemetry.
#[derive(Default)]
pub struct EventApidSetter {
pub next_apid: u16,
}
impl EventTmHookProvider for EventApidSetter {
fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) {
tm.set_apid(self.next_apid);
}
}
/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event /// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event
/// packets. It also handles the verification completion of PUS event service requests. /// packets. It also handles the verification completion of PUS event service requests.
pub struct PusEventHandler<TmSender: EcssTmSenderCore> { pub struct PusEventHandler<TmSender: EcssTmSender> {
id: ComponentId,
event_request_rx: mpsc::Receiver<EventRequestWithToken>, event_request_rx: mpsc::Receiver<EventRequestWithToken>,
pus_event_dispatcher: DefaultPusEventU32Dispatcher<()>, pus_event_tm_creator: DefaultPusEventU32TmCreator<EventApidSetter>,
pus_event_man_rx: mpsc::Receiver<(EventU32, Option<Params>)>, pus_event_man_rx: mpsc::Receiver<EventMessageU32>,
tm_sender: TmSender, tm_sender: TmSender,
time_provider: CdsTime, time_provider: CdsTime,
timestamp: [u8; 7], timestamp: [u8; 7],
small_data_buf: [u8; 64],
verif_handler: VerificationReporter, verif_handler: VerificationReporter,
} }
impl<TmSender: EcssTmSenderCore> PusEventHandler<TmSender> { impl<TmSender: EcssTmSender> PusEventHandler<TmSender> {
pub fn new( pub fn new(
id: ComponentId,
tm_sender: TmSender, tm_sender: TmSender,
verif_handler: VerificationReporter, verif_handler: VerificationReporter,
event_manager: &mut EventManagerWithBoundedMpsc, event_manager: &mut EventManagerWithBoundedMpsc,
@@ -50,14 +57,16 @@ impl<TmSender: EcssTmSenderCore> PusEventHandler<TmSender> {
// All events sent to the manager are routed to the PUS event manager, which generates PUS event // All events sent to the manager are routed to the PUS event manager, which generates PUS event
// telemetry for each event. // telemetry for each event.
let event_reporter = EventReporter::new( let event_reporter = EventReporter::new_with_hook(
PUS_EVENT_MANAGEMENT.raw(), PUS_EVENT_MANAGEMENT.raw(),
components::Apid::EventTm as u16, 0,
0,
128, 128,
EventApidSetter::default(),
) )
.unwrap(); .unwrap();
let pus_event_dispatcher = let pus_event_dispatcher =
DefaultPusEventU32Dispatcher::new_with_default_backend(event_reporter); DefaultPusEventU32TmCreator::new_with_default_backend(event_reporter);
let pus_event_man_send_provider = EventU32SenderMpscBounded::new( let pus_event_man_send_provider = EventU32SenderMpscBounded::new(
PUS_EVENT_MANAGEMENT.raw(), PUS_EVENT_MANAGEMENT.raw(),
pus_event_man_tx, pus_event_man_tx,
@@ -68,12 +77,12 @@ impl<TmSender: EcssTmSenderCore> PusEventHandler<TmSender> {
event_manager.add_sender(pus_event_man_send_provider); event_manager.add_sender(pus_event_man_send_provider);
Self { Self {
id,
event_request_rx, event_request_rx,
pus_event_dispatcher, pus_event_tm_creator: pus_event_dispatcher,
pus_event_man_rx, pus_event_man_rx,
time_provider: CdsTime::new_with_u16_days(0, 0), time_provider: CdsTime::new_with_u16_days(0, 0),
timestamp: [0; 7], timestamp: [0; 7],
small_data_buf: [0; 64],
verif_handler, verif_handler,
tm_sender, tm_sender,
} }
@@ -86,113 +95,203 @@ impl<TmSender: EcssTmSenderCore> PusEventHandler<TmSender> {
.try_into() .try_into()
.expect("expected start verification token"); .expect("expected start verification token");
self.verif_handler self.verif_handler
.completion_success(self.id, &self.tm_sender, started_token, timestamp) .completion_success(&self.tm_sender, started_token, timestamp)
.expect("Sending completion success failed"); .expect("Sending completion success failed");
}; };
// handle event requests loop {
if let Ok(event_req) = self.event_request_rx.try_recv() { // handle event requests
match event_req.request { match self.event_request_rx.try_recv() {
EventRequest::Enable(event) => { Ok(event_req) => match event_req.request {
self.pus_event_dispatcher EventRequest::Enable(event) => {
.enable_tm_for_event(&event) self.pus_event_tm_creator
.expect("Enabling TM failed"); .enable_tm_for_event(&event)
update_time(&mut self.time_provider, &mut self.timestamp); .expect("Enabling TM failed");
report_completion(event_req, &self.timestamp); update_time(&mut self.time_provider, &mut self.timestamp);
} report_completion(event_req, &self.timestamp);
EventRequest::Disable(event) => { }
self.pus_event_dispatcher EventRequest::Disable(event) => {
.disable_tm_for_event(&event) self.pus_event_tm_creator
.expect("Disabling TM failed"); .disable_tm_for_event(&event)
update_time(&mut self.time_provider, &mut self.timestamp); .expect("Disabling TM failed");
report_completion(event_req, &self.timestamp); update_time(&mut self.time_provider, &mut self.timestamp);
} report_completion(event_req, &self.timestamp);
}
},
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("all event request senders have disconnected");
break;
}
},
} }
} }
} }
pub fn generate_pus_event_tm(&mut self) { pub fn generate_pus_event_tm(&mut self) {
// Perform the generation of PUS event packets loop {
if let Ok((event, _param)) = self.pus_event_man_rx.try_recv() { // Perform the generation of PUS event packets
update_time(&mut self.time_provider, &mut self.timestamp); match self.pus_event_man_rx.try_recv() {
self.pus_event_dispatcher Ok(event_msg) => {
.generate_pus_event_tm_generic(&self.tm_sender, &self.timestamp, event, None) // We use the TM modification hook to set the sender APID for each event.
.expect("Sending TM as event failed"); self.pus_event_tm_creator.reporter.tm_hook.next_apid =
UniqueApidTargetId::from(event_msg.sender_id()).apid;
update_time(&mut self.time_provider, &mut self.timestamp);
let generation_result = self
.pus_event_tm_creator
.generate_pus_event_tm_generic_with_generic_params(
&self.tm_sender,
&self.timestamp,
event_msg.event(),
&mut self.small_data_buf,
event_msg.params(),
)
.expect("Sending TM as event failed");
if !generation_result.params_were_propagated {
log::warn!(
"Event TM parameters were not propagated: {:?}",
event_msg.params()
);
}
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("All event senders have disconnected");
break;
}
},
}
} }
} }
} }
/// This is a thin wrapper around the event manager which also caches the sender component pub struct EventHandler<TmSender: EcssTmSender> {
/// used to send events to the event manager.
pub struct EventManagerWrapper {
event_manager: EventManagerWithBoundedMpsc,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
}
impl EventManagerWrapper {
pub fn new() -> Self {
// The sender handle is the primary sender handle for all components which want to create events.
// The event manager will receive the RX handle to receive all the events.
let (event_sender, event_man_rx) = mpsc::channel();
let event_recv = MpscEventReceiver::<EventU32>::new(event_man_rx);
Self {
event_manager: EventManagerWithBoundedMpsc::new(event_recv),
event_sender,
}
}
// Returns a cached event sender to send events to the event manager for routing.
pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option<Params>)> {
self.event_sender.clone()
}
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
&mut self.event_manager
}
pub fn try_event_routing(&mut self) {
// Perform the event routing.
self.event_manager
.try_event_handling()
.expect("event handling failed");
}
}
pub struct EventHandler<TmSender: EcssTmSenderCore> {
pub event_man_wrapper: EventManagerWrapper,
pub pus_event_handler: PusEventHandler<TmSender>, pub pus_event_handler: PusEventHandler<TmSender>,
event_manager: EventManagerWithBoundedMpsc,
} }
impl<TmSender: EcssTmSenderCore> EventHandler<TmSender> { impl<TmSender: EcssTmSender> EventHandler<TmSender> {
pub fn new( pub fn new(
tm_sender: TmSender, tm_sender: TmSender,
event_rx: mpsc::Receiver<EventMessageU32>,
event_request_rx: mpsc::Receiver<EventRequestWithToken>, event_request_rx: mpsc::Receiver<EventRequestWithToken>,
) -> Self { ) -> Self {
let mut event_man_wrapper = EventManagerWrapper::new(); let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler = PusEventHandler::new( let pus_event_handler = PusEventHandler::new(
PUS_EVENT_MANAGEMENT.raw(),
tm_sender, tm_sender,
create_verification_reporter(PUS_EVENT_MANAGEMENT.apid), create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
event_man_wrapper.event_manager(), &mut event_manager,
event_request_rx, event_request_rx,
); );
Self {
event_man_wrapper,
pus_event_handler,
}
}
pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option<Params>)> { Self {
self.event_man_wrapper.clone_event_sender() pus_event_handler,
event_manager,
}
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc { pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
self.event_man_wrapper.event_manager() &mut self.event_manager
} }
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
self.pus_event_handler.handle_event_requests(); self.pus_event_handler.handle_event_requests();
self.event_man_wrapper.try_event_routing(); self.try_event_routing();
self.pus_event_handler.generate_pus_event_tm(); self.pus_event_handler.generate_pus_event_tm();
} }
pub fn try_event_routing(&mut self) {
let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| {
self.routing_error_handler(event_msg, error)
};
// Perform the event routing.
self.event_manager.try_event_handling(error_handler);
}
pub fn routing_error_handler(&self, event_msg: &EventMessageU32, error: EventRoutingError) {
log::warn!("event routing error for event {event_msg:?}: {error:?}");
}
}
#[cfg(test)]
mod tests {
use satrs::{
events::EventU32,
pus::verification::VerificationReporterCfg,
spacepackets::{
ecss::{tm::PusTmReader, PusPacket},
CcsdsPacket,
},
tmtc::PacketAsVec,
};
use super::*;
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(1, 2);
const TEST_EVENT: EventU32 = EventU32::new(satrs::events::Severity::Info, 1, 1);
pub struct EventManagementTestbench {
pub event_tx: mpsc::SyncSender<EventMessageU32>,
pub event_manager: EventManagerWithBoundedMpsc,
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub pus_event_handler: PusEventHandler<mpsc::Sender<PacketAsVec>>,
}
impl EventManagementTestbench {
pub fn new() -> Self {
let (event_tx, event_rx) = mpsc::sync_channel(10);
let (_event_req_tx, event_req_rx) = mpsc::sync_channel(10);
let (tm_sender, tm_receiver) = mpsc::channel();
let verif_reporter_cfg = VerificationReporterCfg::new(0x05, 2, 2, 128).unwrap();
let verif_reporter =
VerificationReporter::new(PUS_EVENT_MANAGEMENT.id(), &verif_reporter_cfg);
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler = PusEventHandler::<mpsc::Sender<PacketAsVec>>::new(
tm_sender,
verif_reporter,
&mut event_manager,
event_req_rx,
);
Self {
event_tx,
tm_receiver,
event_manager,
pus_event_handler,
}
}
}
#[test]
fn test_basic_event_generation() {
let mut testbench = EventManagementTestbench::new();
testbench
.event_tx
.send(EventMessageU32::new(
TEST_CREATOR_ID.id(),
EventU32::new(satrs::events::Severity::Info, 1, 1),
))
.expect("failed to send event");
testbench.pus_event_handler.handle_event_requests();
testbench.event_manager.try_event_handling(|_, _| {});
testbench.pus_event_handler.generate_pus_event_tm();
let tm_packet = testbench
.tm_receiver
.try_recv()
.expect("failed to receive TM packet");
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
.expect("failed to create TM reader")
.0;
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
assert_eq!(tm_reader.user_data().len(), 4);
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());
assert_eq!(event_read_back, TEST_EVENT);
}
#[test]
fn test_basic_event_disabled() {
// TODO: Add test.
}
} }

View File

@@ -0,0 +1,3 @@
//! This module contains all component related to the direct interface of the example.
pub mod tcp;
pub mod udp;

View File

@@ -1,25 +1,50 @@
use std::time::Duration;
use std::{ use std::{
collections::VecDeque, collections::{HashSet, VecDeque},
fmt::Debug,
marker::PhantomData,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };
use log::{info, warn}; use log::{info, warn};
use satrs::{ use satrs::{
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer}, encoding::ccsds::{SpValidity, SpacePacketValidator},
pus::ReceivesEcssPusTc, hal::std::tcp_server::{HandledConnectionHandler, ServerConfig, TcpSpacepacketsServer},
spacepackets::PacketId, spacepackets::{CcsdsPacket, PacketId},
tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, TmPacketSourceCore}, tmtc::{PacketSenderRaw, PacketSource},
}; };
use satrs_example::config::components;
use crate::ccsds::CcsdsReceiver; #[derive(Default)]
pub struct ConnectionFinishedHandler {}
pub const PACKET_ID_LOOKUP: &[PacketId] = &[ pub struct SimplePacketValidator {
PacketId::const_tc(true, components::Apid::GenericPus as u16), pub valid_ids: HashSet<PacketId>,
PacketId::const_tc(true, components::Apid::EventTm as u16), }
PacketId::const_tc(true, components::Apid::Acs as u16),
PacketId::const_tc(true, components::Apid::Sched as u16), impl SpacePacketValidator for SimplePacketValidator {
]; fn validate(
&self,
sp_header: &satrs::spacepackets::SpHeader,
_raw_buf: &[u8],
) -> satrs::encoding::ccsds::SpValidity {
if self.valid_ids.contains(&sp_header.packet_id()) {
return SpValidity::Valid;
}
log::warn!("ignoring space packet with header {:?}", sp_header);
// We could perform a CRC check.. but lets keep this simple and assume that TCP ensures
// data integrity.
SpValidity::Skip
}
}
impl HandledConnectionHandler for ConnectionFinishedHandler {
fn handled_connection(&mut self, info: satrs::hal::std::tcp_server::HandledConnectionInfo) {
info!(
"Served {} TMs and {} TCs for client {:?}",
info.num_sent_tms, info.num_received_tcs, info.addr
);
}
}
#[derive(Default, Clone)] #[derive(Default, Clone)]
pub struct SyncTcpTmSource { pub struct SyncTcpTmSource {
@@ -49,7 +74,7 @@ impl SyncTcpTmSource {
} }
} }
impl TmPacketSourceCore for SyncTcpTmSource { impl PacketSource for SyncTcpTmSource {
type Error = (); type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> { fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
@@ -77,58 +102,49 @@ impl TmPacketSourceCore for SyncTcpTmSource {
} }
} }
pub type TcpServerType<TcSource, MpscErrorType> = TcpSpacepacketsServer< pub type TcpServer<ReceivesTc, SendError> = TcpSpacepacketsServer<
(),
CcsdsError<MpscErrorType>,
SyncTcpTmSource, SyncTcpTmSource,
CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>, ReceivesTc,
SimplePacketValidator,
ConnectionFinishedHandler,
(),
SendError,
>; >;
pub struct TcpTask< pub struct TcpTask<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>(
TcSource: ReceivesCcsdsTc<Error = MpscErrorType> pub TcpServer<TcSender, SendError>,
+ ReceivesEcssPusTc<Error = MpscErrorType> PhantomData<SendError>,
+ Clone );
+ Send
+ 'static,
MpscErrorType: 'static,
> {
server: TcpServerType<TcSource, MpscErrorType>,
}
impl< impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
TcSource: ReceivesCcsdsTc<Error = MpscErrorType> TcpTask<TcSender, SendError>
+ ReceivesEcssPusTc<Error = MpscErrorType>
+ Clone
+ Send
+ 'static,
MpscErrorType: 'static + core::fmt::Debug,
> TcpTask<TcSource, MpscErrorType>
{ {
pub fn new( pub fn new(
cfg: ServerConfig, cfg: ServerConfig,
tm_source: SyncTcpTmSource, tm_source: SyncTcpTmSource,
tc_receiver: CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>, tc_sender: TcSender,
valid_ids: HashSet<PacketId>,
) -> Result<Self, std::io::Error> { ) -> Result<Self, std::io::Error> {
Ok(Self { Ok(Self(
server: TcpSpacepacketsServer::new( TcpSpacepacketsServer::new(
cfg, cfg,
tm_source, tm_source,
tc_receiver, tc_sender,
Box::new(PACKET_ID_LOOKUP), SimplePacketValidator { valid_ids },
ConnectionFinishedHandler::default(),
None,
)?, )?,
}) PhantomData,
))
} }
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
loop { loop {
let result = self.server.handle_next_connection(); let result = self
.0
.handle_all_connections(Some(Duration::from_millis(400)));
match result { match result {
Ok(conn_result) => { Ok(_conn_result) => (),
info!(
"Served {} TMs and {} TCs for client {:?}",
conn_result.num_sent_tms, conn_result.num_received_tcs, conn_result.addr
);
}
Err(e) => { Err(e) => {
warn!("TCP server error: {e:?}"); warn!("TCP server error: {e:?}");
} }

View File

@@ -1,12 +1,13 @@
use core::fmt::Debug;
use std::net::{SocketAddr, UdpSocket}; use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc; use std::sync::mpsc;
use log::{info, warn}; use log::{info, warn};
use satrs::pus::{PusTmAsVec, PusTmInPool}; use satrs::pus::HandlingStatus;
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderRaw};
use satrs::{ use satrs::{
hal::std::udp_server::{ReceiveResult, UdpTcServer}, hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{PoolProviderWithGuards, SharedStaticMemoryPool}, pool::{PoolProviderWithGuards, SharedStaticMemoryPool},
tmtc::CcsdsError,
}; };
pub trait UdpTmHandler { pub trait UdpTmHandler {
@@ -14,7 +15,7 @@ pub trait UdpTmHandler {
} }
pub struct StaticUdpTmHandler { pub struct StaticUdpTmHandler {
pub tm_rx: mpsc::Receiver<PusTmInPool>, pub tm_rx: mpsc::Receiver<PacketInPool>,
pub tm_store: SharedStaticMemoryPool, pub tm_store: SharedStaticMemoryPool,
} }
@@ -43,7 +44,7 @@ impl UdpTmHandler for StaticUdpTmHandler {
} }
pub struct DynamicUdpTmHandler { pub struct DynamicUdpTmHandler {
pub tm_rx: mpsc::Receiver<PusTmAsVec>, pub tm_rx: mpsc::Receiver<PacketAsVec>,
} }
impl UdpTmHandler for DynamicUdpTmHandler { impl UdpTmHandler for DynamicUdpTmHandler {
@@ -64,49 +65,57 @@ impl UdpTmHandler for DynamicUdpTmHandler {
} }
} }
pub struct UdpTmtcServer<TmHandler: UdpTmHandler, SendError> { pub struct UdpTmtcServer<
pub udp_tc_server: UdpTcServer<CcsdsError<SendError>>, TcSender: PacketSenderRaw<Error = SendError>,
TmHandler: UdpTmHandler,
SendError,
> {
pub udp_tc_server: UdpTcServer<TcSender, SendError>,
pub tm_handler: TmHandler, pub tm_handler: TmHandler,
} }
impl<TmHandler: UdpTmHandler, SendError: core::fmt::Debug + 'static> impl<
UdpTmtcServer<TmHandler, SendError> TcSender: PacketSenderRaw<Error = SendError>,
TmHandler: UdpTmHandler,
SendError: Debug + 'static,
> UdpTmtcServer<TcSender, TmHandler, SendError>
{ {
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
while self.poll_tc_server() {} loop {
if self.poll_tc_server() == HandlingStatus::Empty {
break;
}
}
if let Some(recv_addr) = self.udp_tc_server.last_sender() { if let Some(recv_addr) = self.udp_tc_server.last_sender() {
self.tm_handler self.tm_handler
.send_tm_to_udp_client(&self.udp_tc_server.socket, &recv_addr); .send_tm_to_udp_client(&self.udp_tc_server.socket, &recv_addr);
} }
} }
fn poll_tc_server(&mut self) -> bool { fn poll_tc_server(&mut self) -> HandlingStatus {
match self.udp_tc_server.try_recv_tc() { match self.udp_tc_server.try_recv_tc() {
Ok(_) => true, Ok(_) => HandlingStatus::HandledOne,
Err(e) => match e { Err(e) => {
ReceiveResult::ReceiverError(e) => match e { match e {
CcsdsError::ByteConversionError(e) => { ReceiveResult::NothingReceived => (),
warn!("packet error: {e:?}"); ReceiveResult::Io(e) => {
true warn!("IO error {e}");
} }
CcsdsError::CustomError(e) => { ReceiveResult::Send(send_error) => {
warn!("mpsc custom error {e:?}"); warn!("send error {send_error:?}");
true
} }
},
ReceiveResult::IoError(e) => {
warn!("IO error {e}");
false
} }
ReceiveResult::NothingReceived => false, HandlingStatus::Empty
}, }
} }
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::net::Ipv4Addr;
use std::{ use std::{
cell::RefCell,
collections::VecDeque, collections::VecDeque,
net::IpAddr, net::IpAddr,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
@@ -117,21 +126,26 @@ mod tests {
ecss::{tc::PusTcCreator, WritablePusPacket}, ecss::{tc::PusTcCreator, WritablePusPacket},
SpHeader, SpHeader,
}, },
tmtc::ReceivesTcCore, tmtc::PacketSenderRaw,
ComponentId,
}; };
use satrs_example::config::{components, OBSW_SERVER_ADDR}; use satrs_example::config::{components, OBSW_SERVER_ADDR};
use super::*; use super::*;
#[derive(Default, Debug, Clone)] const UDP_SERVER_ID: ComponentId = 0x05;
pub struct TestReceiver {
tc_vec: Arc<Mutex<VecDeque<Vec<u8>>>>, #[derive(Default, Debug)]
pub struct TestSender {
tc_vec: RefCell<VecDeque<PacketAsVec>>,
} }
impl ReceivesTcCore for TestReceiver { impl PacketSenderRaw for TestSender {
type Error = CcsdsError<()>; type Error = ();
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.tc_vec.lock().unwrap().push_back(tc_raw.to_vec()); fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut mut_queue = self.tc_vec.borrow_mut();
mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
Ok(()) Ok(())
} }
} }
@@ -150,9 +164,10 @@ mod tests {
#[test] #[test]
fn test_basic() { fn test_basic() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestReceiver::default(); let test_receiver = TestSender::default();
let tc_queue = test_receiver.tc_vec.clone(); // let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap(); let udp_tc_server =
UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, test_receiver).unwrap();
let tm_handler = TestTmHandler::default(); let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone(); let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
let mut udp_dyn_server = UdpTmtcServer { let mut udp_dyn_server = UdpTmtcServer {
@@ -160,16 +175,18 @@ mod tests {
tm_handler, tm_handler,
}; };
udp_dyn_server.periodic_operation(); udp_dyn_server.periodic_operation();
assert!(tc_queue.lock().unwrap().is_empty()); let queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow();
assert!(queue.is_empty());
assert!(tm_handler_calls.lock().unwrap().is_empty()); assert!(tm_handler_calls.lock().unwrap().is_empty());
} }
#[test] #[test]
fn test_transactions() { fn test_transactions() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0); let sock_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0);
let test_receiver = TestReceiver::default(); let test_receiver = TestSender::default();
let tc_queue = test_receiver.tc_vec.clone(); // let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap(); let udp_tc_server =
UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, test_receiver).unwrap();
let server_addr = udp_tc_server.socket.local_addr().unwrap(); let server_addr = udp_tc_server.socket.local_addr().unwrap();
let tm_handler = TestTmHandler::default(); let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone(); let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
@@ -177,20 +194,21 @@ mod tests {
udp_tc_server, udp_tc_server,
tm_handler, tm_handler,
}; };
let mut sph = SpHeader::tc_unseg(components::Apid::GenericPus as u16, 0, 0).unwrap(); let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0);
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true) let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true)
.to_vec() .to_vec()
.unwrap(); .unwrap();
let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed"); let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed");
let client_addr = client.local_addr().unwrap(); let client_addr = client.local_addr().unwrap();
client.connect(server_addr).unwrap(); println!("{}", server_addr);
client.send(&ping_tc).unwrap(); client.send_to(&ping_tc, server_addr).unwrap();
udp_dyn_server.periodic_operation(); udp_dyn_server.periodic_operation();
{ {
let mut tc_queue = tc_queue.lock().unwrap(); let mut queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow_mut();
assert!(!tc_queue.is_empty()); assert!(!queue.is_empty());
let received_tc = tc_queue.pop_front().unwrap(); let packet_with_sender = queue.pop_front().unwrap();
assert_eq!(received_tc, ping_tc); assert_eq!(packet_with_sender.packet, ping_tc);
assert_eq!(packet_with_sender.sender_id, UDP_SERVER_ID);
} }
{ {
@@ -201,7 +219,9 @@ mod tests {
assert_eq!(received_addr, client_addr); assert_eq!(received_addr, client_addr);
} }
udp_dyn_server.periodic_operation(); udp_dyn_server.periodic_operation();
assert!(tc_queue.lock().unwrap().is_empty()); let queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow();
assert!(queue.is_empty());
drop(queue);
// Still tries to send to the same client. // Still tries to send to the same client.
{ {
let mut tm_handler_calls = tm_handler_calls.lock().unwrap(); let mut tm_handler_calls = tm_handler_calls.lock().unwrap();

View File

@@ -1,34 +1,32 @@
mod acs; mod acs;
mod ccsds;
mod events; mod events;
mod hk; mod hk;
mod interface;
mod logger; mod logger;
mod pus; mod pus;
mod requests; mod requests;
mod tcp;
mod tm_funnel;
mod tmtc; mod tmtc;
mod udp;
use crate::events::EventHandler; use crate::events::EventHandler;
use crate::interface::udp::DynamicUdpTmHandler;
use crate::pus::stack::PusStack; use crate::pus::stack::PusStack;
use crate::tm_funnel::{TmFunnelDynamic, TmFunnelStatic}; use crate::tmtc::tc_source::{TcSourceTaskDynamic, TcSourceTaskStatic};
use crate::tmtc::tm_sink::{TmSinkDynamic, TmSinkStatic};
use log::info; use log::info;
use pus::test::create_test_service_dynamic; use pus::test::create_test_service_dynamic;
use satrs::hal::std::tcp_server::ServerConfig; use satrs::hal::std::tcp_server::ServerConfig;
use satrs::hal::std::udp_server::UdpTcServer; use satrs::hal::std::udp_server::UdpTcServer;
use satrs::request::GenericMessage; use satrs::request::GenericMessage;
use satrs::tmtc::tm_helper::SharedTmPool; use satrs::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools}; use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools};
use satrs_example::config::tasks::{ use satrs_example::config::tasks::{
FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC,
}; };
use satrs_example::config::{OBSW_SERVER_ADDR, SERVER_PORT}; use satrs_example::config::{OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT};
use tmtc::PusTcSourceProviderDynamic;
use udp::DynamicUdpTmHandler;
use crate::acs::mgm::{MgmHandler, MpscModeLeafInterface, SpiDummyInterface}; use crate::acs::mgm::{MgmHandlerLis3Mdl, MpscModeLeafInterface, SpiDummyInterface};
use crate::ccsds::CcsdsReceiver; use crate::interface::tcp::{SyncTcpTmSource, TcpTask};
use crate::interface::udp::{StaticUdpTmHandler, UdpTmtcServer};
use crate::logger::setup_logger; use crate::logger::setup_logger;
use crate::pus::action::{create_action_service_dynamic, create_action_service_static}; use crate::pus::action::{create_action_service_dynamic, create_action_service_static};
use crate::pus::event::{create_event_service_dynamic, create_event_service_static}; use crate::pus::event::{create_event_service_dynamic, create_event_service_static};
@@ -36,19 +34,12 @@ use crate::pus::hk::{create_hk_service_dynamic, create_hk_service_static};
use crate::pus::mode::{create_mode_service_dynamic, create_mode_service_static}; use crate::pus::mode::{create_mode_service_dynamic, create_mode_service_static};
use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_service_static}; use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_service_static};
use crate::pus::test::create_test_service_static; use crate::pus::test::create_test_service_static;
use crate::pus::{PusReceiver, PusTcMpscRouter}; use crate::pus::{PusTcDistributor, PusTcMpscRouter};
use crate::requests::{CompositeRequest, GenericRequestRouter}; use crate::requests::{CompositeRequest, GenericRequestRouter};
use crate::tcp::{SyncTcpTmSource, TcpTask};
use crate::tmtc::{
PusTcSourceProviderSharedPool, SharedTcPool, TcSourceTaskDynamic, TcSourceTaskStatic,
};
use crate::udp::{StaticUdpTmHandler, UdpTmtcServer};
use satrs::mode::ModeRequest; use satrs::mode::ModeRequest;
use satrs::pus::event_man::EventRequestWithToken; use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::TmInSharedPoolSender;
use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter}; use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter};
use satrs::tmtc::CcsdsDistributor; use satrs_example::config::components::{MGM_HANDLER_0, TCP_SERVER, UDP_SERVER};
use satrs_example::config::components::MGM_HANDLER_0;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::mpsc; use std::sync::mpsc;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
@@ -58,16 +49,16 @@ use std::time::Duration;
#[allow(dead_code)] #[allow(dead_code)]
fn static_tmtc_pool_main() { fn static_tmtc_pool_main() {
let (tm_pool, tc_pool) = create_static_pools(); let (tm_pool, tc_pool) = create_static_pools();
let shared_tm_pool = SharedTmPool::new(tm_pool); let shared_tm_pool = Arc::new(RwLock::new(tm_pool));
let shared_tc_pool = SharedTcPool { let shared_tc_pool = Arc::new(RwLock::new(tc_pool));
pool: Arc::new(RwLock::new(tc_pool)), let shared_tm_pool_wrapper = SharedPacketPool::new(&shared_tm_pool);
}; let shared_tc_pool_wrapper = SharedPacketPool::new(&shared_tc_pool);
let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50); let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50);
let (tm_funnel_tx, tm_funnel_rx) = mpsc::sync_channel(50); let (tm_sink_tx, tm_sink_rx) = mpsc::sync_channel(50);
let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50); let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50);
let tm_funnel_tx_sender = let tm_sink_tx_sender =
TmInSharedPoolSender::new(shared_tm_pool.clone(), tm_funnel_tx.clone()); PacketSenderWithSharedPool::new(tm_sink_tx.clone(), shared_tm_pool_wrapper.clone());
let (mgm_handler_composite_tx, mgm_handler_composite_rx) = let (mgm_handler_composite_tx, mgm_handler_composite_rx) =
mpsc::channel::<GenericMessage<CompositeRequest>>(); mpsc::channel::<GenericMessage<CompositeRequest>>();
@@ -84,19 +75,17 @@ fn static_tmtc_pool_main() {
// This helper structure is used by all telecommand providers which need to send telecommands // This helper structure is used by all telecommand providers which need to send telecommands
// to the TC source. // to the TC source.
let tc_source = PusTcSourceProviderSharedPool { let tc_source = PacketSenderWithSharedPool::new(tc_source_tx, shared_tc_pool_wrapper.clone());
shared_pool: shared_tc_pool.clone(),
tc_source: tc_source_tx,
};
// Create event handling components // Create event handling components
// These sender handles are used to send event requests, for example to enable or disable // These sender handles are used to send event requests, for example to enable or disable
// certain events. // certain events.
let (event_tx, event_rx) = mpsc::sync_channel(100);
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>(); let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
// The event task is the core handler to perform the event routing and TM handling as specified // The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation. // in the sat-rs documentation.
let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_request_rx); let mut event_handler = EventHandler::new(tm_sink_tx.clone(), event_rx, event_request_rx);
let (pus_test_tx, pus_test_rx) = mpsc::channel(); let (pus_test_tx, pus_test_rx) = mpsc::channel();
let (pus_event_tx, pus_event_rx) = mpsc::channel(); let (pus_event_tx, pus_event_rx) = mpsc::channel();
@@ -118,40 +107,40 @@ fn static_tmtc_pool_main() {
mode_tc_sender: pus_mode_tx, mode_tc_sender: pus_mode_tx,
}; };
let pus_test_service = create_test_service_static( let pus_test_service = create_test_service_static(
tm_funnel_tx_sender.clone(), tm_sink_tx_sender.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.clone(),
event_handler.clone_event_sender(), event_tx.clone(),
pus_test_rx, pus_test_rx,
); );
let pus_scheduler_service = create_scheduler_service_static( let pus_scheduler_service = create_scheduler_service_static(
tm_funnel_tx_sender.clone(), tm_sink_tx_sender.clone(),
tc_source.clone(), tc_source.clone(),
pus_sched_rx, pus_sched_rx,
create_sched_tc_pool(), create_sched_tc_pool(),
); );
let pus_event_service = create_event_service_static( let pus_event_service = create_event_service_static(
tm_funnel_tx_sender.clone(), tm_sink_tx_sender.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.clone(),
pus_event_rx, pus_event_rx,
event_request_tx, event_request_tx,
); );
let pus_action_service = create_action_service_static( let pus_action_service = create_action_service_static(
tm_funnel_tx_sender.clone(), tm_sink_tx_sender.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.clone(),
pus_action_rx, pus_action_rx,
request_map.clone(), request_map.clone(),
pus_action_reply_rx, pus_action_reply_rx,
); );
let pus_hk_service = create_hk_service_static( let pus_hk_service = create_hk_service_static(
tm_funnel_tx_sender.clone(), tm_sink_tx_sender.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.clone(),
pus_hk_rx, pus_hk_rx,
request_map.clone(), request_map.clone(),
pus_hk_reply_rx, pus_hk_reply_rx,
); );
let pus_mode_service = create_mode_service_static( let pus_mode_service = create_mode_service_static(
tm_funnel_tx_sender.clone(), tm_sink_tx_sender.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.clone(),
pus_mode_rx, pus_mode_rx,
request_map, request_map,
pus_mode_reply_rx, pus_mode_reply_rx,
@@ -165,39 +154,43 @@ fn static_tmtc_pool_main() {
pus_mode_service, pus_mode_service,
); );
let ccsds_receiver = CcsdsReceiver { tc_source };
let mut tmtc_task = TcSourceTaskStatic::new( let mut tmtc_task = TcSourceTaskStatic::new(
shared_tc_pool.clone(), shared_tc_pool_wrapper.clone(),
tc_source_rx, tc_source_rx,
PusReceiver::new(tm_funnel_tx_sender, pus_router), PusTcDistributor::new(tm_sink_tx_sender, pus_router),
); );
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone()); let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source.clone())
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
.expect("creating UDP TMTC server failed"); .expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer { let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server, udp_tc_server,
tm_handler: StaticUdpTmHandler { tm_handler: StaticUdpTmHandler {
tm_rx: tm_server_rx, tm_rx: tm_server_rx,
tm_store: shared_tm_pool.clone_backing_pool(), tm_store: shared_tm_pool.clone(),
}, },
}; };
let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver); let tcp_server_cfg = ServerConfig::new(
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192); TCP_SERVER.id(),
sock_addr,
Duration::from_millis(400),
4096,
8192,
);
let sync_tm_tcp_source = SyncTcpTmSource::new(200); let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new( let mut tcp_server = TcpTask::new(
tcp_server_cfg, tcp_server_cfg,
sync_tm_tcp_source.clone(), sync_tm_tcp_source.clone(),
tcp_ccsds_distributor, tc_source.clone(),
PACKET_ID_VALIDATOR.clone(),
) )
.expect("tcp server creation failed"); .expect("tcp server creation failed");
let mut tm_funnel = TmFunnelStatic::new( let mut tm_sink = TmSinkStatic::new(
shared_tm_pool, shared_tm_pool_wrapper,
sync_tm_tcp_source, sync_tm_tcp_source,
tm_funnel_rx, tm_sink_rx,
tm_server_tx, tm_server_tx,
); );
@@ -211,20 +204,20 @@ fn static_tmtc_pool_main() {
reply_tx_to_pus: pus_mode_reply_tx, reply_tx_to_pus: pus_mode_reply_tx,
reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx, reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx,
}; };
let mut mgm_handler = MgmHandler::new( let mut mgm_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_0, MGM_HANDLER_0,
"MGM_0", "MGM_0",
mode_leaf_interface, mode_leaf_interface,
mgm_handler_composite_rx, mgm_handler_composite_rx,
pus_hk_reply_tx, pus_hk_reply_tx,
tm_funnel_tx, tm_sink_tx,
dummy_spi_interface, dummy_spi_interface,
shared_mgm_set, shared_mgm_set,
); );
info!("Starting TMTC and UDP task"); info!("Starting TMTC and UDP task");
let jh_udp_tmtc = thread::Builder::new() let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string()) .name("SATRS tmtc-udp".to_string())
.spawn(move || { .spawn(move || {
info!("Running UDP server on port {SERVER_PORT}"); info!("Running UDP server on port {SERVER_PORT}");
loop { loop {
@@ -237,7 +230,7 @@ fn static_tmtc_pool_main() {
info!("Starting TCP task"); info!("Starting TCP task");
let jh_tcp = thread::Builder::new() let jh_tcp = thread::Builder::new()
.name("TCP".to_string()) .name("sat-rs tcp".to_string())
.spawn(move || { .spawn(move || {
info!("Running TCP server on port {SERVER_PORT}"); info!("Running TCP server on port {SERVER_PORT}");
loop { loop {
@@ -248,15 +241,15 @@ fn static_tmtc_pool_main() {
info!("Starting TM funnel task"); info!("Starting TM funnel task");
let jh_tm_funnel = thread::Builder::new() let jh_tm_funnel = thread::Builder::new()
.name("TM Funnel".to_string()) .name("tm sink".to_string())
.spawn(move || loop { .spawn(move || loop {
tm_funnel.operation(); tm_sink.operation();
}) })
.unwrap(); .unwrap();
info!("Starting event handling task"); info!("Starting event handling task");
let jh_event_handling = thread::Builder::new() let jh_event_handling = thread::Builder::new()
.name("Event".to_string()) .name("sat-rs events".to_string())
.spawn(move || loop { .spawn(move || loop {
event_handler.periodic_operation(); event_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING)); thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING));
@@ -265,7 +258,7 @@ fn static_tmtc_pool_main() {
info!("Starting AOCS thread"); info!("Starting AOCS thread");
let jh_aocs = thread::Builder::new() let jh_aocs = thread::Builder::new()
.name("AOCS".to_string()) .name("sat-rs aocs".to_string())
.spawn(move || loop { .spawn(move || loop {
mgm_handler.periodic_operation(); mgm_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_AOCS)); thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
@@ -274,7 +267,7 @@ fn static_tmtc_pool_main() {
info!("Starting PUS handler thread"); info!("Starting PUS handler thread");
let jh_pus_handler = thread::Builder::new() let jh_pus_handler = thread::Builder::new()
.name("PUS".to_string()) .name("sat-rs pus".to_string())
.spawn(move || loop { .spawn(move || loop {
pus_stack.periodic_operation(); pus_stack.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK)); thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));
@@ -319,15 +312,14 @@ fn dyn_tmtc_pool_main() {
.mode_router_map .mode_router_map
.insert(MGM_HANDLER_0.raw(), mgm_handler_mode_tx); .insert(MGM_HANDLER_0.raw(), mgm_handler_mode_tx);
let tc_source = PusTcSourceProviderDynamic(tc_source_tx);
// Create event handling components // Create event handling components
// These sender handles are used to send event requests, for example to enable or disable // These sender handles are used to send event requests, for example to enable or disable
// certain events. // certain events.
let (event_tx, event_rx) = mpsc::sync_channel(100);
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>(); let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
// The event task is the core handler to perform the event routing and TM handling as specified // The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation. // in the sat-rs documentation.
let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_request_rx); let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_rx, event_request_rx);
let (pus_test_tx, pus_test_rx) = mpsc::channel(); let (pus_test_tx, pus_test_rx) = mpsc::channel();
let (pus_event_tx, pus_event_rx) = mpsc::channel(); let (pus_event_tx, pus_event_rx) = mpsc::channel();
@@ -349,14 +341,11 @@ fn dyn_tmtc_pool_main() {
mode_tc_sender: pus_mode_tx, mode_tc_sender: pus_mode_tx,
}; };
let pus_test_service = create_test_service_dynamic( let pus_test_service =
tm_funnel_tx.clone(), create_test_service_dynamic(tm_funnel_tx.clone(), event_tx.clone(), pus_test_rx);
event_handler.clone_event_sender(),
pus_test_rx,
);
let pus_scheduler_service = create_scheduler_service_dynamic( let pus_scheduler_service = create_scheduler_service_dynamic(
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
tc_source.0.clone(), tc_source_tx.clone(),
pus_sched_rx, pus_sched_rx,
create_sched_tc_pool(), create_sched_tc_pool(),
); );
@@ -390,16 +379,13 @@ fn dyn_tmtc_pool_main() {
pus_mode_service, pus_mode_service,
); );
let ccsds_receiver = CcsdsReceiver { tc_source };
let mut tmtc_task = TcSourceTaskDynamic::new( let mut tmtc_task = TcSourceTaskDynamic::new(
tc_source_rx, tc_source_rx,
PusReceiver::new(tm_funnel_tx.clone(), pus_router), PusTcDistributor::new(tm_funnel_tx.clone(), pus_router),
); );
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone()); let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source_tx.clone())
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
.expect("creating UDP TMTC server failed"); .expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer { let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server, udp_tc_server,
@@ -408,17 +394,23 @@ fn dyn_tmtc_pool_main() {
}, },
}; };
let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver); let tcp_server_cfg = ServerConfig::new(
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192); TCP_SERVER.id(),
sock_addr,
Duration::from_millis(400),
4096,
8192,
);
let sync_tm_tcp_source = SyncTcpTmSource::new(200); let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new( let mut tcp_server = TcpTask::new(
tcp_server_cfg, tcp_server_cfg,
sync_tm_tcp_source.clone(), sync_tm_tcp_source.clone(),
tcp_ccsds_distributor, tc_source_tx.clone(),
PACKET_ID_VALIDATOR.clone(),
) )
.expect("tcp server creation failed"); .expect("tcp server creation failed");
let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx); let mut tm_funnel = TmSinkDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx);
let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) = let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) =
mpsc::channel(); mpsc::channel();
@@ -429,7 +421,7 @@ fn dyn_tmtc_pool_main() {
reply_tx_to_pus: pus_mode_reply_tx, reply_tx_to_pus: pus_mode_reply_tx,
reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx, reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx,
}; };
let mut mgm_handler = MgmHandler::new( let mut mgm_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_0, MGM_HANDLER_0,
"MGM_0", "MGM_0",
mode_leaf_interface, mode_leaf_interface,
@@ -442,7 +434,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting TMTC and UDP task"); info!("Starting TMTC and UDP task");
let jh_udp_tmtc = thread::Builder::new() let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string()) .name("sat-rs tmtc-udp".to_string())
.spawn(move || { .spawn(move || {
info!("Running UDP server on port {SERVER_PORT}"); info!("Running UDP server on port {SERVER_PORT}");
loop { loop {
@@ -455,7 +447,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting TCP task"); info!("Starting TCP task");
let jh_tcp = thread::Builder::new() let jh_tcp = thread::Builder::new()
.name("TCP".to_string()) .name("sat-rs tcp".to_string())
.spawn(move || { .spawn(move || {
info!("Running TCP server on port {SERVER_PORT}"); info!("Running TCP server on port {SERVER_PORT}");
loop { loop {
@@ -466,7 +458,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting TM funnel task"); info!("Starting TM funnel task");
let jh_tm_funnel = thread::Builder::new() let jh_tm_funnel = thread::Builder::new()
.name("TM Funnel".to_string()) .name("sat-rs tm-sink".to_string())
.spawn(move || loop { .spawn(move || loop {
tm_funnel.operation(); tm_funnel.operation();
}) })
@@ -474,7 +466,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting event handling task"); info!("Starting event handling task");
let jh_event_handling = thread::Builder::new() let jh_event_handling = thread::Builder::new()
.name("Event".to_string()) .name("sat-rs events".to_string())
.spawn(move || loop { .spawn(move || loop {
event_handler.periodic_operation(); event_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING)); thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING));
@@ -483,7 +475,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting AOCS thread"); info!("Starting AOCS thread");
let jh_aocs = thread::Builder::new() let jh_aocs = thread::Builder::new()
.name("AOCS".to_string()) .name("sat-rs aocs".to_string())
.spawn(move || loop { .spawn(move || loop {
mgm_handler.periodic_operation(); mgm_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_AOCS)); thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
@@ -492,7 +484,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting PUS handler thread"); info!("Starting PUS handler thread");
let jh_pus_handler = thread::Builder::new() let jh_pus_handler = thread::Builder::new()
.name("PUS".to_string()) .name("sat-rs pus".to_string())
.spawn(move || loop { .spawn(move || loop {
pus_stack.periodic_operation(); pus_stack.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK)); thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));

View File

@@ -1,24 +1,24 @@
use log::{error, warn}; use log::warn;
use satrs::action::{ActionRequest, ActionRequestVariant}; use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::params::WritableToBeBytes;
use satrs::pool::SharedStaticMemoryPool; use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::action::{ use satrs::pus::action::{
ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap, PusActionReply, ActionReplyPus, ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap,
}; };
use satrs::pus::verification::{ use satrs::pus::verification::{
FailParams, FailParamsWithStep, TcStateAccepted, TcStateStarted, VerificationReporter, handle_completion_failure_with_generic_params, handle_step_failure_with_generic_params,
FailParamHelper, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReportingProvider, VerificationToken, VerificationReportingProvider, VerificationToken,
}; };
use satrs::pus::{ use satrs::pus::{
ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, EcssTmSenderCore, EcssTmtcError, GenericConversionError, MpscTcReceiver, EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver,
MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, MpscTmAsVecSender, PusPacketHandlingError, PusReplyHandler, PusServiceHelper,
PusServiceHelper, PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, PusTcToRequestConverter,
}; };
use satrs::request::{GenericMessage, UniqueApidTargetId}; use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket}; use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket, PusServiceId};
use satrs::ComponentId; use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_ACTION_SERVICE; use satrs_example::config::components::PUS_ACTION_SERVICE;
use satrs_example::config::tmtc_err; use satrs_example::config::tmtc_err;
use std::sync::mpsc; use std::sync::mpsc;
@@ -27,8 +27,8 @@ use std::time::Duration;
use crate::requests::GenericRequestRouter; use crate::requests::GenericRequestRouter;
use super::{ use super::{
create_verification_reporter, generic_pus_request_timeout_handler, PusTargetedRequestService, create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
TargetedPusService, PusTargetedRequestService, TargetedPusService,
}; };
pub struct ActionReplyHandler { pub struct ActionReplyHandler {
@@ -43,14 +43,13 @@ impl Default for ActionReplyHandler {
} }
} }
impl PusReplyHandler<ActivePusActionRequestStd, PusActionReply> for ActionReplyHandler { impl PusReplyHandler<ActivePusActionRequestStd, ActionReplyPus> for ActionReplyHandler {
type Error = EcssTmtcError; type Error = EcssTmtcError;
fn handle_unrequested_reply( fn handle_unrequested_reply(
&mut self, &mut self,
_caller_id: ComponentId, reply: &GenericMessage<ActionReplyPus>,
reply: &GenericMessage<PusActionReply>, _tm_sender: &impl EcssTmSender,
_tm_sender: &impl EcssTmSenderCore,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
warn!("received unexpected reply for service 8: {reply:?}"); warn!("received unexpected reply for service 8: {reply:?}");
Ok(()) Ok(())
@@ -58,12 +57,11 @@ impl PusReplyHandler<ActivePusActionRequestStd, PusActionReply> for ActionReplyH
fn handle_reply( fn handle_reply(
&mut self, &mut self,
caller_id: ComponentId, reply: &GenericMessage<ActionReplyPus>,
reply: &GenericMessage<PusActionReply>,
active_request: &ActivePusActionRequestStd, active_request: &ActivePusActionRequestStd,
tm_sender: &(impl EcssTmSenderCore + ?Sized), tm_sender: &(impl EcssTmSender + ?Sized),
verification_handler: &impl VerificationReportingProvider, verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8], timestamp: &[u8],
) -> Result<bool, Self::Error> { ) -> Result<bool, Self::Error> {
let verif_token: VerificationToken<TcStateStarted> = active_request let verif_token: VerificationToken<TcStateStarted> = active_request
.token() .token()
@@ -71,16 +69,23 @@ impl PusReplyHandler<ActivePusActionRequestStd, PusActionReply> for ActionReplyH
.expect("invalid token state"); .expect("invalid token state");
let remove_entry = match &reply.message.variant { let remove_entry = match &reply.message.variant {
ActionReplyVariant::CompletionFailed { error_code, params } => { ActionReplyVariant::CompletionFailed { error_code, params } => {
let mut fail_data_len = 0; let error_propagated = handle_completion_failure_with_generic_params(
if let Some(params) = params {
fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?;
}
verification_handler.completion_failure(
caller_id,
tm_sender, tm_sender,
verif_token, verif_token,
FailParams::new(time_stamp, error_code, &self.fail_data_buf[..fail_data_len]), verification_handler,
FailParamHelper {
error_code,
params: params.as_ref(),
timestamp,
small_data_buf: &mut self.fail_data_buf,
},
)?; )?;
if !error_propagated {
log::warn!(
"error params for completion failure were not propated: {:?}",
params.as_ref()
);
}
true true
} }
ActionReplyVariant::StepFailed { ActionReplyVariant::StepFailed {
@@ -88,38 +93,35 @@ impl PusReplyHandler<ActivePusActionRequestStd, PusActionReply> for ActionReplyH
step, step,
params, params,
} => { } => {
let mut fail_data_len = 0; let error_propagated = handle_step_failure_with_generic_params(
if let Some(params) = params {
fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?;
}
verification_handler.step_failure(
caller_id,
tm_sender, tm_sender,
verif_token, verif_token,
FailParamsWithStep::new( verification_handler,
time_stamp, FailParamHelper {
&EcssEnumU16::new(*step),
error_code, error_code,
&self.fail_data_buf[..fail_data_len], params: params.as_ref(),
), timestamp,
small_data_buf: &mut self.fail_data_buf,
},
&EcssEnumU16::new(*step),
)?; )?;
if !error_propagated {
log::warn!(
"error params for completion failure were not propated: {:?}",
params.as_ref()
);
}
true true
} }
ActionReplyVariant::Completed => { ActionReplyVariant::Completed => {
verification_handler.completion_success( verification_handler.completion_success(tm_sender, verif_token, timestamp)?;
caller_id,
tm_sender,
verif_token,
time_stamp,
)?;
true true
} }
ActionReplyVariant::StepSuccess { step } => { ActionReplyVariant::StepSuccess { step } => {
verification_handler.step_success( verification_handler.step_success(
caller_id,
tm_sender, tm_sender,
&verif_token, &verif_token,
time_stamp, timestamp,
EcssEnumU16::new(*step), EcssEnumU16::new(*step),
)?; )?;
false false
@@ -131,14 +133,12 @@ impl PusReplyHandler<ActivePusActionRequestStd, PusActionReply> for ActionReplyH
fn handle_request_timeout( fn handle_request_timeout(
&mut self, &mut self,
caller_id: ComponentId,
active_request: &ActivePusActionRequestStd, active_request: &ActivePusActionRequestStd,
tm_sender: &impl EcssTmSenderCore, tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider, verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler( generic_pus_request_timeout_handler(
caller_id,
tm_sender, tm_sender,
active_request, active_request,
verification_handler, verification_handler,
@@ -156,10 +156,9 @@ impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for Actio
fn convert( fn convert(
&mut self, &mut self,
caller_id: ComponentId,
token: VerificationToken<TcStateAccepted>, token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader, tc: &PusTcReader,
tm_sender: &(impl EcssTmSenderCore + ?Sized), tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider, verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> { ) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> {
@@ -168,7 +167,6 @@ impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for Actio
if user_data.len() < 8 { if user_data.len() < 8 {
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA), FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
@@ -199,7 +197,6 @@ impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for Actio
} else { } else {
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE), FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
@@ -211,18 +208,18 @@ impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for Actio
} }
pub fn create_action_service_static( pub fn create_action_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter, action_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<PusActionReply>>, reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> ActionServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let action_request_handler = PusTargetedRequestService::new( let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_ACTION_SERVICE.raw(), PUS_ACTION_SERVICE.id(),
pus_action_rx, pus_action_rx,
tm_sender, tm_sender,
create_verification_reporter(PUS_ACTION_SERVICE.apid), create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048), EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
), ),
ActionRequestConverter::default(), ActionRequestConverter::default(),
@@ -239,17 +236,17 @@ pub fn create_action_service_static(
} }
pub fn create_action_service_dynamic( pub fn create_action_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter, action_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<PusActionReply>>, reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> ActionServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let action_request_handler = PusTargetedRequestService::new( let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_ACTION_SERVICE.raw(), PUS_ACTION_SERVICE.id(),
pus_action_rx, pus_action_rx,
tm_funnel_tx, tm_funnel_tx,
create_verification_reporter(PUS_ACTION_SERVICE.apid), create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
ActionRequestConverter::default(), ActionRequestConverter::default(),
@@ -263,8 +260,7 @@ pub fn create_action_service_dynamic(
} }
} }
pub struct ActionServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> pub struct ActionServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
{
pub(crate) service: PusTargetedRequestService< pub(crate) service: PusTargetedRequestService<
MpscTcReceiver, MpscTcReceiver,
TmSender, TmSender,
@@ -275,58 +271,42 @@ pub struct ActionServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: Ec
DefaultActiveActionRequestMap, DefaultActiveActionRequestMap,
ActivePusActionRequestStd, ActivePusActionRequestStd,
ActionRequest, ActionRequest,
PusActionReply, ActionReplyPus,
>, >,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> TargetedPusService impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for ActionServiceWrapper<TmSender, TcInMemConverter> for ActionServiceWrapper<TmSender, TcInMemConverter>
{ {
/// Returns [true] if the packet handling is finished. const SERVICE_ID: u8 = PusServiceId::Action as u8;
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { const SERVICE_STR: &'static str = "action";
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result { delegate::delegate! {
PusPacketHandlerResult::RequestHandled => {} to self.service {
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { fn poll_and_handle_next_tc(
warn!("PUS 8 partial packet handling success: {e:?}") &mut self,
} time_stamp: &[u8],
PusPacketHandlerResult::CustomSubservice(invalid, _) => { ) -> Result<HandlingStatus, PusPacketHandlingError>;
warn!("PUS 8 invalid subservice {invalid}");
} fn poll_and_handle_next_reply(
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { &mut self,
warn!("PUS 8 subservice {subservice} not implemented"); time_stamp: &[u8],
} ) -> Result<HandlingStatus, EcssTmtcError>;
PusPacketHandlerResult::Empty => {
return true; fn check_for_request_timeouts(&mut self);
}
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
}
} }
false
}
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> bool {
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS 8: Handling reply failed with error {e:?}");
false
})
}
fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID, TEST_UNIQUE_ID}; use satrs::pus::test_util::{
TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::pus::verification; use satrs::pus::verification;
use satrs::pus::verification::test_util::TestVerificationReporter; use satrs::pus::verification::test_util::TestVerificationReporter;
use satrs::request::MessageMetadata; use satrs::request::MessageMetadata;
use satrs::ComponentId;
use satrs::{ use satrs::{
res_code::ResultU16, res_code::ResultU16,
spacepackets::{ spacepackets::{
@@ -353,24 +333,24 @@ mod tests {
DefaultActiveActionRequestMap, DefaultActiveActionRequestMap,
ActivePusActionRequestStd, ActivePusActionRequestStd,
ActionRequest, ActionRequest,
PusActionReply, ActionReplyPus,
> >
{ {
pub fn new_for_action() -> Self { pub fn new_for_action(owner_id: ComponentId, target_id: ComponentId) -> Self {
let _ = env_logger::builder().is_test(true).try_init(); let _ = env_logger::builder().is_test(true).try_init();
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel(); let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (pus_action_tx, pus_action_rx) = mpsc::channel(); let (pus_action_tx, pus_action_rx) = mpsc::channel();
let (action_reply_tx, action_reply_rx) = mpsc::channel(); let (action_reply_tx, action_reply_rx) = mpsc::channel();
let (action_req_tx, action_req_rx) = mpsc::channel(); let (action_req_tx, action_req_rx) = mpsc::channel();
let verif_reporter = TestVerificationReporter::default(); let verif_reporter = TestVerificationReporter::new(owner_id);
let mut generic_req_router = GenericRequestRouter::default(); let mut generic_req_router = GenericRequestRouter::default();
generic_req_router generic_req_router
.composite_router_map .composite_router_map
.insert(TEST_COMPONENT_ID.id(), action_req_tx); .insert(target_id, action_req_tx);
Self { Self {
service: PusTargetedRequestService::new( service: PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
0, owner_id,
pus_action_rx, pus_action_rx,
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
verif_reporter, verif_reporter,
@@ -429,7 +409,7 @@ mod tests {
} }
let result = result.unwrap(); let result = result.unwrap();
match result { match result {
PusPacketHandlerResult::RequestHandled => (), HandlingStatus::HandledOne => (),
_ => panic!("unexpected result {result:?}"), _ => panic!("unexpected result {result:?}"),
} }
} }
@@ -441,21 +421,21 @@ mod tests {
} }
let result = result.unwrap(); let result = result.unwrap();
match result { match result {
PusPacketHandlerResult::Empty => (), HandlingStatus::Empty => (),
_ => panic!("unexpected result {result:?}"), _ => panic!("unexpected result {result:?}"),
} }
} }
pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) { pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_check_next_reply(time_stamp); let result = self.service.poll_and_handle_next_reply(time_stamp);
assert!(result.is_ok()); assert!(result.is_ok());
assert!(!result.unwrap()); assert_eq!(result.unwrap(), HandlingStatus::HandledOne);
} }
pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) { pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_check_next_reply(time_stamp); let result = self.service.poll_and_handle_next_reply(time_stamp);
assert!(result.is_ok()); assert!(result.is_ok());
assert!(result.unwrap()); assert_eq!(result.unwrap(), HandlingStatus::Empty);
} }
pub fn add_tc(&mut self, tc: &PusTcCreator) { pub fn add_tc(&mut self, tc: &PusTcCreator) {
@@ -465,12 +445,7 @@ mod tests {
.service .service
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.acceptance_success( .acceptance_success(self.service.service_helper.tm_sender(), token, &[0; 7])
self.service.service_helper.id(),
self.service.service_helper.tm_sender(),
token,
&[0; 7],
)
.expect("TC acceptance failed"); .expect("TC acceptance failed");
self.service self.service
.service_helper .service_helper
@@ -482,22 +457,28 @@ mod tests {
.verif_reporter() .verif_reporter()
.check_next_is_acceptance_success(id, accepted_token.request_id()); .check_next_is_acceptance_success(id, accepted_token.request_id());
self.pus_packet_tx self.pus_packet_tx
.send(EcssTcAndToken::new(tc.to_vec().unwrap(), accepted_token)) .send(EcssTcAndToken::new(
PacketAsVec::new(self.service.service_helper.id(), tc.to_vec().unwrap()),
accepted_token,
))
.unwrap(); .unwrap();
} }
} }
#[test] #[test]
fn basic_request() { fn basic_request() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(); let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding. // Create a basic action request and verify forwarding.
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); let sp_header = SpHeader::new_from_apid(TEST_APID);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128); let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32; let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8]; let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true);
testbench.add_tc(&pus8_packet); testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7]; let time_stamp: [u8; 7] = [0; 7];
testbench.verify_next_tc_is_handled_properly(&time_stamp); testbench.verify_next_tc_is_handled_properly(&time_stamp);
@@ -511,7 +492,7 @@ mod tests {
if let CompositeRequest::Action(action_req) = req.message { if let CompositeRequest::Action(action_req) = req.message {
assert_eq!(action_req.action_id, action_id); assert_eq!(action_req.action_id, action_id);
assert_eq!(action_req.variant, ActionRequestVariant::NoData); assert_eq!(action_req.variant, ActionRequestVariant::NoData);
let action_reply = PusActionReply::new(action_id, ActionReplyVariant::Completed); let action_reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
testbench testbench
.reply_tx .reply_tx
.send(GenericMessage::new(req.requestor_info, action_reply)) .send(GenericMessage::new(req.requestor_info, action_reply))
@@ -528,16 +509,23 @@ mod tests {
#[test] #[test]
fn basic_request_routing_error() { fn basic_request_routing_error() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(); let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding. // Create a basic action request and verify forwarding.
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(8, 128); let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32; let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8]; let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail. // Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&(TEST_UNIQUE_ID + 1).to_be_bytes()); app_data[0..4].copy_from_slice(&0_u32.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
testbench.add_tc(&pus8_packet); testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7]; let time_stamp: [u8; 7] = [0; 7];
@@ -548,17 +536,24 @@ mod tests {
#[test] #[test]
fn converter_action_req_no_data() { fn converter_action_req_no_data() {
let mut testbench = PusConverterTestbench::new(ActionRequestConverter::default()); let mut testbench = PusConverterTestbench::new(
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); TEST_COMPONENT_ID_0.raw(),
ActionRequestConverter::default(),
);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128); let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32; let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8]; let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail. // Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet); let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID); let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok()); assert!(result.is_ok());
let (active_req, request) = result.unwrap(); let (active_req, request) = result.unwrap();
if let ActionRequestVariant::NoData = request.variant { if let ActionRequestVariant::NoData = request.variant {
@@ -566,7 +561,7 @@ mod tests {
assert_eq!(active_req.action_id, action_id); assert_eq!(active_req.action_id, action_id);
assert_eq!( assert_eq!(
active_req.target_id(), active_req.target_id(),
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID).raw() UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0).raw()
); );
assert_eq!( assert_eq!(
active_req.token().request_id(), active_req.token().request_id(),
@@ -579,20 +574,25 @@ mod tests {
#[test] #[test]
fn converter_action_req_with_data() { fn converter_action_req_with_data() {
let mut testbench = PusConverterTestbench::new(ActionRequestConverter::default()); let mut testbench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ActionRequestConverter::default());
let sec_header = PusTcSecondaryHeader::new_simple(8, 128); let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32; let action_id = 5_u32;
let mut app_data: [u8; 16] = [0; 16]; let mut app_data: [u8; 16] = [0; 16];
// Invalid ID, routing should fail. // Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes()); app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
for i in 0..8 { for i in 0..8 {
app_data[i + 8] = i as u8; app_data[i + 8] = i as u8;
} }
let pus8_packet = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet); let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID); let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok()); assert!(result.is_ok());
let (active_req, request) = result.unwrap(); let (active_req, request) = result.unwrap();
if let ActionRequestVariant::VecData(vec) = request.variant { if let ActionRequestVariant::VecData(vec) = request.variant {
@@ -606,18 +606,19 @@ mod tests {
#[test] #[test]
fn reply_handling_completion_success() { fn reply_handling_completion_success() {
let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32; let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req = let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req); ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = PusActionReply::new(action_id, ActionReplyVariant::Completed); let reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok()); assert!(result.is_ok());
assert!(result.unwrap()); assert!(result.unwrap());
testbench.verif_reporter.assert_full_completion_success( testbench.verif_reporter.assert_full_completion_success(
TEST_COMPONENT_ID.id(), TEST_COMPONENT_ID_0.id(),
req_id, req_id,
None, None,
); );
@@ -625,13 +626,14 @@ mod tests {
#[test] #[test]
fn reply_handling_completion_failure() { fn reply_handling_completion_failure() {
let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32; let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req = let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req); ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3); let error_code = ResultU16::new(2, 3);
let reply = PusActionReply::new( let reply = ActionReplyPus::new(
action_id, action_id,
ActionReplyVariant::CompletionFailed { ActionReplyVariant::CompletionFailed {
error_code, error_code,
@@ -643,7 +645,7 @@ mod tests {
assert!(result.is_ok()); assert!(result.is_ok());
assert!(result.unwrap()); assert!(result.unwrap());
testbench.verif_reporter.assert_completion_failure( testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID.into(), TEST_COMPONENT_ID_0.into(),
req_id, req_id,
None, None,
error_code.raw() as u64, error_code.raw() as u64,
@@ -652,12 +654,13 @@ mod tests {
#[test] #[test]
fn reply_handling_step_success() { fn reply_handling_step_success() {
let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32; let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req = let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req); ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = PusActionReply::new(action_id, ActionReplyVariant::StepSuccess { step: 1 }); let reply = ActionReplyPus::new(action_id, ActionReplyVariant::StepSuccess { step: 1 });
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply); let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]); let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok()); assert!(result.is_ok());
@@ -666,24 +669,25 @@ mod tests {
testbench.verif_reporter.check_next_was_added(req_id); testbench.verif_reporter.check_next_was_added(req_id);
testbench testbench
.verif_reporter .verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID.raw(), req_id); .check_next_is_acceptance_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench testbench
.verif_reporter .verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID.raw(), req_id); .check_next_is_started_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench testbench
.verif_reporter .verif_reporter
.check_next_is_step_success(TEST_COMPONENT_ID.raw(), req_id, 1); .check_next_is_step_success(TEST_COMPONENT_ID_0.raw(), req_id, 1);
} }
#[test] #[test]
fn reply_handling_step_failure() { fn reply_handling_step_failure() {
let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32; let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req = let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req); ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3); let error_code = ResultU16::new(2, 3);
let reply = PusActionReply::new( let reply = ActionReplyPus::new(
action_id, action_id,
ActionReplyVariant::StepFailed { ActionReplyVariant::StepFailed {
error_code, error_code,
@@ -698,12 +702,12 @@ mod tests {
testbench.verif_reporter.check_next_was_added(req_id); testbench.verif_reporter.check_next_was_added(req_id);
testbench testbench
.verif_reporter .verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID.id(), req_id); .check_next_is_acceptance_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench testbench
.verif_reporter .verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID.id(), req_id); .check_next_is_started_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench.verif_reporter.check_next_is_step_failure( testbench.verif_reporter.check_next_is_step_failure(
TEST_COMPONENT_ID.id(), TEST_COMPONENT_ID_0.id(),
req_id, req_id,
error_code.raw().into(), error_code.raw().into(),
); );
@@ -711,8 +715,9 @@ mod tests {
#[test] #[test]
fn reply_handling_unrequested_reply() { fn reply_handling_unrequested_reply() {
let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); let mut testbench =
let action_reply = PusActionReply::new(5_u32, ActionReplyVariant::Completed); ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_reply = ActionReplyPus::new(5_u32, ActionReplyVariant::Completed);
let unrequested_reply = let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply); GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do // Right now this function does not do a lot. We simply check that it does not panic or do
@@ -723,16 +728,17 @@ mod tests {
#[test] #[test]
fn reply_handling_reply_timeout() { fn reply_handling_reply_timeout() {
let mut testbench = ReplyHandlerTestbench::new(ActionReplyHandler::default()); let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32; let action_id = 5_u32;
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout( let result = testbench.handle_request_timeout(
&ActivePusActionRequestStd::new_from_common_req(action_id, active_request), &ActivePusActionRequestStd::new_from_common_req(action_id, active_request),
&[], &[],
); );
assert!(result.is_ok()); assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure( testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID.raw(), TEST_COMPONENT_ID_0.raw(),
req_id, req_id,
None, None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64, tmtc_err::REQUEST_TIMEOUT.raw() as u64,

View File

@@ -1,30 +1,33 @@
use std::sync::mpsc; use std::sync::mpsc;
use crate::pus::create_verification_reporter; use crate::pus::create_verification_reporter;
use log::{error, warn};
use satrs::pool::SharedStaticMemoryPool; use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::event_man::EventRequestWithToken; use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusEventServiceHandler; use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporter; use satrs::pus::verification::VerificationReporter;
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter,
EcssTmSenderCore, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
}; };
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_EVENT_MANAGEMENT; use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
use super::{DirectPusService, HandlingStatus};
pub fn create_event_service_static( pub fn create_event_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>, pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>, event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> EventServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> EventServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus_5_handler = PusEventServiceHandler::new( let pus_5_handler = PusEventServiceHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_EVENT_MANAGEMENT.raw(), PUS_EVENT_MANAGEMENT.id(),
pus_event_rx, pus_event_rx,
tm_sender, tm_sender,
create_verification_reporter(PUS_EVENT_MANAGEMENT.apid), create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048), EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
), ),
event_request_tx, event_request_tx,
@@ -35,16 +38,16 @@ pub fn create_event_service_static(
} }
pub fn create_event_service_dynamic( pub fn create_event_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>, pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>, event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> EventServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> EventServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let pus_5_handler = PusEventServiceHandler::new( let pus_5_handler = PusEventServiceHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_EVENT_MANAGEMENT.raw(), PUS_EVENT_MANAGEMENT.id(),
pus_event_rx, pus_event_rx,
tm_funnel_tx, tm_funnel_tx,
create_verification_reporter(PUS_EVENT_MANAGEMENT.apid), create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
event_request_tx, event_request_tx,
@@ -54,35 +57,59 @@ pub fn create_event_service_dynamic(
} }
} }
pub struct EventServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> { pub struct EventServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
pub handler: pub handler:
PusEventServiceHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>, PusEventServiceHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
EventServiceWrapper<TmSender, TcInMemConverter> for EventServiceWrapper<TmSender, TcInMemConverter>
{ {
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { const SERVICE_ID: u8 = PusServiceId::Event as u8;
match self.handler.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result { const SERVICE_STR: &'static str = "events";
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
warn!("PUS 5 partial packet handling success: {e:?}") let error_handler = |partial_error: &PartialPusHandlingError| {
} log::warn!(
PusPacketHandlerResult::CustomSubservice(invalid, _) => { "PUS {}({}) partial error: {:?}",
warn!("PUS 5 invalid subservice {invalid}"); Self::SERVICE_ID,
} Self::SERVICE_STR,
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { partial_error
warn!("PUS 5 subservice {subservice} not implemented"); );
} };
PusPacketHandlerResult::Empty => { let result = self
return true; .handler
} .poll_and_handle_next_tc(error_handler, time_stamp);
}, if let Err(e) = result {
Err(error) => { log::warn!(
error!("PUS packet handling error: {error:?}") "PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
}
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
} }
} }
false HandlingStatus::HandledOne
} }
} }

View File

@@ -1,5 +1,4 @@
use derive_new::new; use derive_new::new;
use log::{error, warn};
use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId}; use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId};
use satrs::pool::SharedStaticMemoryPool; use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::verification::{ use satrs::pus::verification::{
@@ -8,15 +7,14 @@ use satrs::pus::verification::{
}; };
use satrs::pus::{ use satrs::pus::{
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken, ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSenderCore, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender,
EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender, EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender,
MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper, PusPacketHandlingError, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::request::{GenericMessage, UniqueApidTargetId}; use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket}; use satrs::spacepackets::ecss::{hk, PusPacket, PusServiceId};
use satrs::ComponentId; use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_HK_SERVICE; use satrs_example::config::components::PUS_HK_SERVICE;
use satrs_example::config::{hk_err, tmtc_err}; use satrs_example::config::{hk_err, tmtc_err};
use std::sync::mpsc; use std::sync::mpsc;
@@ -25,7 +23,7 @@ use std::time::Duration;
use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler}; use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler};
use crate::requests::GenericRequestRouter; use crate::requests::GenericRequestRouter;
use super::PusTargetedRequestService; use super::{HandlingStatus, PusTargetedRequestService, TargetedPusService};
#[derive(Clone, PartialEq, Debug, new)] #[derive(Clone, PartialEq, Debug, new)]
pub struct HkReply { pub struct HkReply {
@@ -46,9 +44,8 @@ impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
fn handle_unrequested_reply( fn handle_unrequested_reply(
&mut self, &mut self,
_caller_id: ComponentId,
reply: &GenericMessage<HkReply>, reply: &GenericMessage<HkReply>,
_tm_sender: &impl EcssTmSenderCore, _tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for service 3: {reply:?}"); log::warn!("received unexpected reply for service 3: {reply:?}");
Ok(()) Ok(())
@@ -56,10 +53,9 @@ impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
fn handle_reply( fn handle_reply(
&mut self, &mut self,
caller_id: ComponentId,
reply: &GenericMessage<HkReply>, reply: &GenericMessage<HkReply>,
active_request: &ActivePusRequestStd, active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSenderCore, tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider, verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<bool, Self::Error> { ) -> Result<bool, Self::Error> {
@@ -70,7 +66,7 @@ impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
match reply.message.variant { match reply.message.variant {
HkReplyVariant::Ack => { HkReplyVariant::Ack => {
verification_handler verification_handler
.completion_success(caller_id, tm_sender, started_token, time_stamp) .completion_success(tm_sender, started_token, time_stamp)
.expect("sending completion success verification failed"); .expect("sending completion success verification failed");
} }
}; };
@@ -79,14 +75,12 @@ impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
fn handle_request_timeout( fn handle_request_timeout(
&mut self, &mut self,
caller_id: ComponentId,
active_request: &ActivePusRequestStd, active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSenderCore, tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider, verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler( generic_pus_request_timeout_handler(
caller_id,
tm_sender, tm_sender,
active_request, active_request,
verification_handler, verification_handler,
@@ -114,10 +108,9 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
fn convert( fn convert(
&mut self, &mut self,
caller_id: ComponentId,
token: VerificationToken<TcStateAccepted>, token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader, tc: &PusTcReader,
tm_sender: &(impl EcssTmSenderCore + ?Sized), tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider, verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> { ) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> {
@@ -127,7 +120,6 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
let user_data_len_raw = user_data_len.to_be_bytes(); let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new( FailParams::new(
@@ -152,7 +144,6 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
let user_data_len_raw = user_data_len.to_be_bytes(); let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new(time_stamp, err, &user_data_len_raw), FailParams::new(time_stamp, err, &user_data_len_raw),
@@ -171,7 +162,6 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
if standard_subservice.is_err() { if standard_subservice.is_err() {
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]), FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]),
@@ -196,7 +186,6 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
if user_data.len() < 12 { if user_data.len() < 12 {
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new_no_fail_data( FailParams::new_no_fail_data(
@@ -222,7 +211,6 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
_ => { _ => {
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new( FailParams::new(
@@ -243,18 +231,18 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
} }
pub fn create_hk_service_static( pub fn create_hk_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>, pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter, request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>, reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> HkServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus_3_handler = PusTargetedRequestService::new( let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_HK_SERVICE.raw(), PUS_HK_SERVICE.id(),
pus_hk_rx, pus_hk_rx,
tm_sender, tm_sender,
create_verification_reporter(PUS_HK_SERVICE.apid), create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048), EcssTcInSharedStoreConverter::new(tc_pool, 2048),
), ),
HkRequestConverter::default(), HkRequestConverter::default(),
@@ -269,17 +257,17 @@ pub fn create_hk_service_static(
} }
pub fn create_hk_service_dynamic( pub fn create_hk_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>, pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter, request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>, reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> HkServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let pus_3_handler = PusTargetedRequestService::new( let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_HK_SERVICE.raw(), PUS_HK_SERVICE.id(),
pus_hk_rx, pus_hk_rx,
tm_funnel_tx, tm_funnel_tx,
create_verification_reporter(PUS_HK_SERVICE.apid), create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
HkRequestConverter::default(), HkRequestConverter::default(),
@@ -293,7 +281,7 @@ pub fn create_hk_service_dynamic(
} }
} }
pub struct HkServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> { pub struct HkServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
pub(crate) service: PusTargetedRequestService< pub(crate) service: PusTargetedRequestService<
MpscTcReceiver, MpscTcReceiver,
TmSender, TmSender,
@@ -308,50 +296,34 @@ pub struct HkServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTc
>, >,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
HkServiceWrapper<TmSender, TcInMemConverter> for HkServiceWrapper<TmSender, TcInMemConverter>
{ {
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { const SERVICE_ID: u8 = PusServiceId::Housekeeping as u8;
match self.service.poll_and_handle_next_tc(time_stamp) { const SERVICE_STR: &'static str = "housekeeping";
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} delegate::delegate! {
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { to self.service {
warn!("PUS 3 partial packet handling success: {e:?}") fn poll_and_handle_next_tc(
} &mut self,
PusPacketHandlerResult::CustomSubservice(invalid, _) => { time_stamp: &[u8],
warn!("PUS 3 invalid subservice {invalid}"); ) -> Result<HandlingStatus, PusPacketHandlingError>;
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { fn poll_and_handle_next_reply(
warn!("PUS 3 subservice {subservice} not implemented"); &mut self,
} time_stamp: &[u8],
PusPacketHandlerResult::Empty => { ) -> Result<HandlingStatus, EcssTmtcError>;
return true;
} fn check_for_request_timeouts(&mut self);
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
}
} }
false
}
pub fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> bool {
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS 3: Handling reply failed with error {e:?}");
false
})
}
pub fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use satrs::pus::test_util::{TEST_COMPONENT_ID, TEST_UNIQUE_ID}; use satrs::pus::test_util::{
TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::request::MessageMetadata; use satrs::request::MessageMetadata;
use satrs::{ use satrs::{
hk::HkRequestVariant, hk::HkRequestVariant,
@@ -373,24 +345,25 @@ mod tests {
#[test] #[test]
fn hk_converter_one_shot_req() { fn hk_converter_one_shot_req() {
let mut hk_bench = PusConverterTestbench::new(HkRequestConverter::default()); let mut hk_bench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let target_id = TEST_UNIQUE_ID; let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32; let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8]; let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes()); app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let hk_req = PusTcCreator::new_simple( let hk_req = PusTcCreator::new_simple(
&mut sp_header, sp_header,
3, 3,
Subservice::TcGenerateOneShotHk as u8, Subservice::TcGenerateOneShotHk as u8,
Some(&app_data), &app_data,
true, true,
); );
let accepted_token = hk_bench.add_tc(&hk_req); let accepted_token = hk_bench.add_tc(&hk_req);
let (_active_req, req) = hk_bench let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID) .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed"); .expect("conversion failed");
assert_eq!(req.unique_id, unique_id); assert_eq!(req.unique_id, unique_id);
@@ -402,9 +375,10 @@ mod tests {
#[test] #[test]
fn hk_converter_enable_periodic_generation() { fn hk_converter_enable_periodic_generation() {
let mut hk_bench = PusConverterTestbench::new(HkRequestConverter::default()); let mut hk_bench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let target_id = TEST_UNIQUE_ID; let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32; let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8]; let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
@@ -412,7 +386,7 @@ mod tests {
let mut generic_check = |tc: &PusTcCreator| { let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc); let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID) .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed"); .expect("conversion failed");
assert_eq!(req.unique_id, unique_id); assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::EnablePeriodic = req.variant { if let HkRequestVariant::EnablePeriodic = req.variant {
@@ -421,18 +395,18 @@ mod tests {
} }
}; };
let tc0 = PusTcCreator::new_simple( let tc0 = PusTcCreator::new_simple(
&mut sp_header, sp_header,
3, 3,
Subservice::TcEnableHkGeneration as u8, Subservice::TcEnableHkGeneration as u8,
Some(&app_data), &app_data,
true, true,
); );
generic_check(&tc0); generic_check(&tc0);
let tc1 = PusTcCreator::new_simple( let tc1 = PusTcCreator::new_simple(
&mut sp_header, sp_header,
3, 3,
Subservice::TcEnableDiagGeneration as u8, Subservice::TcEnableDiagGeneration as u8,
Some(&app_data), &app_data,
true, true,
); );
generic_check(&tc1); generic_check(&tc1);
@@ -440,9 +414,10 @@ mod tests {
#[test] #[test]
fn hk_conversion_disable_periodic_generation() { fn hk_conversion_disable_periodic_generation() {
let mut hk_bench = PusConverterTestbench::new(HkRequestConverter::default()); let mut hk_bench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let target_id = TEST_UNIQUE_ID; let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32; let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8]; let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes()); app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
@@ -450,7 +425,7 @@ mod tests {
let mut generic_check = |tc: &PusTcCreator| { let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc); let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID) .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed"); .expect("conversion failed");
assert_eq!(req.unique_id, unique_id); assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::DisablePeriodic = req.variant { if let HkRequestVariant::DisablePeriodic = req.variant {
@@ -459,18 +434,18 @@ mod tests {
} }
}; };
let tc0 = PusTcCreator::new_simple( let tc0 = PusTcCreator::new_simple(
&mut sp_header, sp_header,
3, 3,
Subservice::TcDisableHkGeneration as u8, Subservice::TcDisableHkGeneration as u8,
Some(&app_data), &app_data,
true, true,
); );
generic_check(&tc0); generic_check(&tc0);
let tc1 = PusTcCreator::new_simple( let tc1 = PusTcCreator::new_simple(
&mut sp_header, sp_header,
3, 3,
Subservice::TcDisableDiagGeneration as u8, Subservice::TcDisableDiagGeneration as u8,
Some(&app_data), &app_data,
true, true,
); );
generic_check(&tc1); generic_check(&tc1);
@@ -478,9 +453,10 @@ mod tests {
#[test] #[test]
fn hk_conversion_modify_interval() { fn hk_conversion_modify_interval() {
let mut hk_bench = PusConverterTestbench::new(HkRequestConverter::default()); let mut hk_bench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let target_id = TEST_UNIQUE_ID; let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32; let unique_id = 5_u32;
let mut app_data: [u8; 12] = [0; 12]; let mut app_data: [u8; 12] = [0; 12];
let collection_interval_factor = 5_u32; let collection_interval_factor = 5_u32;
@@ -491,7 +467,7 @@ mod tests {
let mut generic_check = |tc: &PusTcCreator| { let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc); let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID) .convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed"); .expect("conversion failed");
assert_eq!(req.unique_id, unique_id); assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant { if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant {
@@ -501,18 +477,18 @@ mod tests {
} }
}; };
let tc0 = PusTcCreator::new_simple( let tc0 = PusTcCreator::new_simple(
&mut sp_header, sp_header,
3, 3,
Subservice::TcModifyHkCollectionInterval as u8, Subservice::TcModifyHkCollectionInterval as u8,
Some(&app_data), &app_data,
true, true,
); );
generic_check(&tc0); generic_check(&tc0);
let tc1 = PusTcCreator::new_simple( let tc1 = PusTcCreator::new_simple(
&mut sp_header, sp_header,
3, 3,
Subservice::TcModifyDiagCollectionInterval as u8, Subservice::TcModifyDiagCollectionInterval as u8,
Some(&app_data), &app_data,
true, true,
); );
generic_check(&tc1); generic_check(&tc1);
@@ -520,7 +496,8 @@ mod tests {
#[test] #[test]
fn hk_reply_handler() { fn hk_reply_handler() {
let mut reply_testbench = ReplyHandlerTestbench::new(HkReplyHandler::default()); let mut reply_testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), HkReplyHandler::default());
let sender_id = 2_u64; let sender_id = 2_u64;
let apid_target_id = 3_u32; let apid_target_id = 3_u32;
let unique_id = 5_u32; let unique_id = 5_u32;
@@ -534,12 +511,13 @@ mod tests {
assert!(result.unwrap()); assert!(result.unwrap());
reply_testbench reply_testbench
.verif_reporter .verif_reporter
.assert_full_completion_success(TEST_COMPONENT_ID.raw(), req_id, None); .assert_full_completion_success(TEST_COMPONENT_ID_0.raw(), req_id, None);
} }
#[test] #[test]
fn reply_handling_unrequested_reply() { fn reply_handling_unrequested_reply() {
let mut testbench = ReplyHandlerTestbench::new(HkReplyHandler::default()); let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack); let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack);
let unrequested_reply = let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply); GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
@@ -551,12 +529,13 @@ mod tests {
#[test] #[test]
fn reply_handling_reply_timeout() { fn reply_handling_reply_timeout() {
let mut testbench = ReplyHandlerTestbench::new(HkReplyHandler::default()); let mut testbench =
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_1, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]); let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok()); assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure( testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID.raw(), TEST_COMPONENT_ID_1.raw(),
req_id, req_id,
None, None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64, tmtc_err::REQUEST_TIMEOUT.raw() as u64,

View File

@@ -1,20 +1,21 @@
use crate::requests::GenericRequestRouter; use crate::requests::GenericRequestRouter;
use crate::tmtc::MpscStoreAndSendError;
use log::warn; use log::warn;
use satrs::pool::PoolAddr;
use satrs::pus::verification::{ use satrs::pus::verification::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter, self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReporterCfg, VerificationReportingProvider, VerificationToken, VerificationReporterCfg, VerificationReportingProvider, VerificationToken,
}; };
use satrs::pus::{ use satrs::pus::{
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter,
EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, GenericConversionError, EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
GenericRoutingError, PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler, HandlingStatus, PusPacketHandlingError, PusReplyHandler, PusRequestRouter, PusServiceHelper,
PusRequestRouter, PusServiceHelper, PusTcToRequestConverter, TcInMemory, PusTcToRequestConverter, TcInMemory,
}; };
use satrs::queue::GenericReceiveError; use satrs::queue::{GenericReceiveError, GenericSendError};
use satrs::request::{Apid, GenericMessage, MessageMetadata}; use satrs::request::{Apid, GenericMessage, MessageMetadata};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusServiceId; use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketInPool};
use satrs::ComponentId; use satrs::ComponentId;
use satrs_example::config::components::PUS_ROUTING_SERVICE; use satrs_example::config::components::PUS_ROUTING_SERVICE;
use satrs_example::config::{tmtc_err, CustomPusServiceId}; use satrs_example::config::{tmtc_err, CustomPusServiceId};
@@ -30,11 +31,11 @@ pub mod scheduler;
pub mod stack; pub mod stack;
pub mod test; pub mod test;
pub fn create_verification_reporter(apid: Apid) -> VerificationReporter { pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter {
let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap(); let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap();
// Every software component which needs to generate verification telemetry, gets a cloned // Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter. // verification reporter.
VerificationReporter::new(&verif_cfg) VerificationReporter::new(owner_id, &verif_cfg)
} }
/// Simple router structure which forwards PUS telecommands to dedicated handlers. /// Simple router structure which forwards PUS telecommands to dedicated handlers.
@@ -47,7 +48,7 @@ pub struct PusTcMpscRouter {
pub mode_tc_sender: Sender<EcssTcAndToken>, pub mode_tc_sender: Sender<EcssTcAndToken>,
} }
pub struct PusReceiver<TmSender: EcssTmSenderCore> { pub struct PusTcDistributor<TmSender: EcssTmSender> {
pub id: ComponentId, pub id: ComponentId,
pub tm_sender: TmSender, pub tm_sender: TmSender,
pub verif_reporter: VerificationReporter, pub verif_reporter: VerificationReporter,
@@ -55,35 +56,69 @@ pub struct PusReceiver<TmSender: EcssTmSenderCore> {
stamp_helper: TimeStampHelper, stamp_helper: TimeStampHelper,
} }
impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> { impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self { pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self {
Self { Self {
id: PUS_ROUTING_SERVICE.raw(), id: PUS_ROUTING_SERVICE.raw(),
tm_sender, tm_sender,
verif_reporter: create_verification_reporter(PUS_ROUTING_SERVICE.apid), verif_reporter: create_verification_reporter(
PUS_ROUTING_SERVICE.id(),
PUS_ROUTING_SERVICE.apid,
),
pus_router, pus_router,
stamp_helper: TimeStampHelper::default(), stamp_helper: TimeStampHelper::default(),
} }
} }
pub fn handle_tc_packet( pub fn handle_tc_packet_vec(
&mut self, &mut self,
tc_in_memory: TcInMemory, packet_as_vec: PacketAsVec,
service: u8, ) -> Result<HandlingStatus, GenericSendError> {
pus_tc: &PusTcReader, self.handle_tc_generic(packet_as_vec.sender_id, None, &packet_as_vec.packet)
) -> Result<PusPacketHandlerResult, MpscStoreAndSendError> { }
let init_token = self.verif_reporter.add_tc(pus_tc);
pub fn handle_tc_packet_in_store(
&mut self,
packet_in_pool: PacketInPool,
pus_tc_copy: &[u8],
) -> Result<HandlingStatus, GenericSendError> {
self.handle_tc_generic(
packet_in_pool.sender_id,
Some(packet_in_pool.store_addr),
pus_tc_copy,
)
}
pub fn handle_tc_generic(
&mut self,
sender_id: ComponentId,
addr_opt: Option<PoolAddr>,
raw_tc: &[u8],
) -> Result<HandlingStatus, GenericSendError> {
let pus_tc_result = PusTcReader::new(raw_tc);
if pus_tc_result.is_err() {
log::warn!(
"error creating PUS TC from raw data received from {}: {}",
sender_id,
pus_tc_result.unwrap_err()
);
log::warn!("raw data: {:x?}", raw_tc);
// TODO: Shouldn't this be an error?
return Ok(HandlingStatus::HandledOne);
}
let pus_tc = pus_tc_result.unwrap().0;
let init_token = self.verif_reporter.add_tc(&pus_tc);
self.stamp_helper.update_from_now(); self.stamp_helper.update_from_now();
let accepted_token = self let accepted_token = self
.verif_reporter .verif_reporter
.acceptance_success( .acceptance_success(&self.tm_sender, init_token, self.stamp_helper.stamp())
self.id,
&self.tm_sender,
init_token,
self.stamp_helper.stamp(),
)
.expect("Acceptance success failure"); .expect("Acceptance success failure");
let service = PusServiceId::try_from(service); let service = PusServiceId::try_from(pus_tc.service());
let tc_in_memory: TcInMemory = if let Some(store_addr) = addr_opt {
PacketInPool::new(sender_id, store_addr).into()
} else {
PacketAsVec::new(sender_id, Vec::from(raw_tc)).into()
};
match service { match service {
Ok(standard_service) => match standard_service { Ok(standard_service) => match standard_service {
PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken { PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken {
@@ -108,7 +143,6 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
} }
_ => { _ => {
let result = self.verif_reporter.start_failure( let result = self.verif_reporter.start_failure(
self.id,
&self.tm_sender, &self.tm_sender,
accepted_token, accepted_token,
FailParams::new( FailParams::new(
@@ -125,16 +159,19 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
Err(e) => { Err(e) => {
if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) { if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) {
match custom_service { match custom_service {
CustomPusServiceId::Mode => { CustomPusServiceId::Mode => self
// TODO: Fix mode service. .pus_router
//self.handle_mode_service(pus_tc, accepted_token) .mode_tc_sender
} .send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})
.map_err(|_| GenericSendError::RxDisconnected)?,
CustomPusServiceId::Health => {} CustomPusServiceId::Health => {}
} }
} else { } else {
self.verif_reporter self.verif_reporter
.start_failure( .start_failure(
self.id,
&self.tm_sender, &self.tm_sender,
accepted_token, accepted_token,
FailParams::new( FailParams::new(
@@ -147,17 +184,65 @@ impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
} }
} }
} }
Ok(PusPacketHandlerResult::RequestHandled) Ok(HandlingStatus::HandledOne)
} }
} }
pub trait TargetedPusService { pub trait TargetedPusService {
/// Returns [true] if the packet handling is finished. const SERVICE_ID: u8;
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool; const SERVICE_STR: &'static str;
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> bool;
fn poll_and_handle_next_tc_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let result = self.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
log::error!(
"PUS service {}({})packet handling error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
result.unwrap()
}
fn poll_and_handle_next_reply_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.poll_and_handle_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!(
"PUS servce {}({}): Handling reply failed with error {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
HandlingStatus::Empty
})
}
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self); fn check_for_request_timeouts(&mut self);
} }
/// Generic trait for services which handle packets directly. Kept minimal right now because
/// of the difficulty to allow flexible user code for these services..
pub trait DirectPusService {
const SERVICE_ID: u8;
const SERVICE_STR: &'static str;
fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus;
}
/// This is a generic handler class for all PUS services where a PUS telecommand is converted /// This is a generic handler class for all PUS services where a PUS telecommand is converted
/// to a targeted request. /// to a targeted request.
/// ///
@@ -175,12 +260,13 @@ pub trait TargetedPusService {
/// ///
/// The handler exposes the following API: /// The handler exposes the following API:
/// ///
/// 1. [Self::handle_one_tc] which tries to poll and handle one TC packet, covering steps 1-5. /// 1. [Self::poll_and_handle_next_tc] which tries to poll and handle one TC packet, covering
/// 2. [Self::check_one_reply] which tries to poll and handle one reply, covering step 6. /// steps 1-5.
/// 2. [Self::poll_and_check_next_reply] which tries to poll and handle one reply, covering step 6.
/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7. /// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7.
pub struct PusTargetedRequestService< pub struct PusTargetedRequestService<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSenderCore, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>, RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
@@ -201,8 +287,8 @@ pub struct PusTargetedRequestService<
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSenderCore, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>, RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
@@ -254,10 +340,10 @@ where
pub fn poll_and_handle_next_tc( pub fn poll_and_handle_next_tc(
&mut self, &mut self,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { ) -> Result<HandlingStatus, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() { if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty); return Ok(HandlingStatus::Empty);
} }
let ecss_tc_and_token = possible_packet.unwrap(); let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper self.service_helper
@@ -265,7 +351,6 @@ where
.cache(&ecss_tc_and_token.tc_in_memory)?; .cache(&ecss_tc_and_token.tc_in_memory)?;
let tc = self.service_helper.tc_in_mem_converter().convert()?; let tc = self.service_helper.tc_in_mem_converter().convert()?;
let (mut request_info, request) = match self.request_converter.convert( let (mut request_info, request) = match self.request_converter.convert(
self.service_helper.id(),
ecss_tc_and_token.token, ecss_tc_and_token.token,
&tc, &tc,
self.service_helper.tm_sender(), self.service_helper.tm_sender(),
@@ -293,7 +378,6 @@ where
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.start_success( .start_success(
self.service_helper.id(),
&self.service_helper.common.tm_sender, &self.service_helper.common.tm_sender,
accepted_token, accepted_token,
time_stamp, time_stamp,
@@ -315,7 +399,7 @@ where
return Err(e.into()); return Err(e.into());
} }
} }
Ok(PusPacketHandlerResult::RequestHandled) Ok(HandlingStatus::HandledOne)
} }
fn handle_conversion_to_request_error( fn handle_conversion_to_request_error(
@@ -330,7 +414,6 @@ where
self.service_helper self.service_helper
.verif_reporter() .verif_reporter()
.completion_failure( .completion_failure(
self.service_helper.id(),
self.service_helper.tm_sender(), self.service_helper.tm_sender(),
token, token,
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SERVICE, &service_slice), FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SERVICE, &service_slice),
@@ -342,7 +425,6 @@ where
self.service_helper self.service_helper
.verif_reporter() .verif_reporter()
.completion_failure( .completion_failure(
self.service_helper.id(),
self.service_helper.tm_sender(), self.service_helper.tm_sender(),
token, token,
FailParams::new( FailParams::new(
@@ -359,7 +441,6 @@ where
self.service_helper self.service_helper
.verif_reporter() .verif_reporter()
.completion_failure( .completion_failure(
self.service_helper.id(),
self.service_helper.tm_sender(), self.service_helper.tm_sender(),
token, token,
FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info), FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info),
@@ -371,14 +452,17 @@ where
} }
} }
pub fn poll_and_check_next_reply(&mut self, time_stamp: &[u8]) -> Result<bool, EcssTmtcError> { pub fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError> {
match self.reply_receiver.try_recv() { match self.reply_receiver.try_recv() {
Ok(reply) => { Ok(reply) => {
self.handle_reply(&reply, time_stamp)?; self.handle_reply(&reply, time_stamp)?;
Ok(false) Ok(HandlingStatus::HandledOne)
} }
Err(e) => match e { Err(e) => match e {
mpsc::TryRecvError::Empty => Ok(true), mpsc::TryRecvError::Empty => Ok(HandlingStatus::Empty),
mpsc::TryRecvError::Disconnected => Err(EcssTmtcError::Receive( mpsc::TryRecvError::Disconnected => Err(EcssTmtcError::Receive(
GenericReceiveError::TxDisconnected(None), GenericReceiveError::TxDisconnected(None),
)), )),
@@ -393,29 +477,22 @@ where
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
let active_req_opt = self.active_request_map.get(reply.request_id()); let active_req_opt = self.active_request_map.get(reply.request_id());
if active_req_opt.is_none() { if active_req_opt.is_none() {
self.reply_handler.handle_unrequested_reply( self.reply_handler
self.service_helper.id(), .handle_unrequested_reply(reply, &self.service_helper.common.tm_sender)?;
reply,
&self.service_helper.common.tm_sender,
)?;
return Ok(()); return Ok(());
} }
let active_request = active_req_opt.unwrap(); let active_request = active_req_opt.unwrap();
let request_finished = self let result = self.reply_handler.handle_reply(
.reply_handler reply,
.handle_reply( active_request,
self.service_helper.id(), &self.service_helper.common.tm_sender,
reply, &self.service_helper.common.verif_reporter,
active_request, time_stamp,
&self.service_helper.common.tm_sender, );
&self.service_helper.common.verif_reporter, if result.is_err() || (result.is_ok() && *result.as_ref().unwrap()) {
time_stamp,
)
.unwrap_or(false);
if request_finished {
self.active_request_map.remove(reply.request_id()); self.active_request_map.remove(reply.request_id());
} }
Ok(()) result.map(|_| ())
} }
pub fn check_for_request_timeouts(&mut self) { pub fn check_for_request_timeouts(&mut self) {
@@ -437,8 +514,7 @@ where
/// Generic timeout handling: Handle the verification failure with a dedicated return code /// Generic timeout handling: Handle the verification failure with a dedicated return code
/// and also log the error. /// and also log the error.
pub fn generic_pus_request_timeout_handler( pub fn generic_pus_request_timeout_handler(
sender_id: ComponentId, sender: &(impl EcssTmSender + ?Sized),
sender: &(impl EcssTmSenderCore + ?Sized),
active_request: &(impl ActiveRequestProvider + Debug), active_request: &(impl ActiveRequestProvider + Debug),
verification_handler: &impl VerificationReportingProvider, verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
@@ -450,7 +526,6 @@ pub fn generic_pus_request_timeout_handler(
.try_into() .try_into()
.expect("token not in expected started state"); .expect("token not in expected started state");
verification_handler.completion_failure( verification_handler.completion_failure(
sender_id,
sender, sender,
started_token, started_token,
FailParams::new(time_stamp, &tmtc_err::REQUEST_TIMEOUT, &[]), FailParams::new(time_stamp, &tmtc_err::REQUEST_TIMEOUT, &[]),
@@ -462,8 +537,8 @@ pub fn generic_pus_request_timeout_handler(
pub(crate) mod tests { pub(crate) mod tests {
use std::time::Duration; use std::time::Duration;
use satrs::pus::test_util::TEST_COMPONENT_ID; use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::{MpscTmAsVecSender, PusTmAsVec, PusTmVariant}; use satrs::pus::{MpscTmAsVecSender, PusTmVariant};
use satrs::request::RequestId; use satrs::request::RequestId;
use satrs::{ use satrs::{
pus::{ pus::{
@@ -493,7 +568,7 @@ pub(crate) mod tests {
pub id: ComponentId, pub id: ComponentId,
pub verif_reporter: TestVerificationReporter, pub verif_reporter: TestVerificationReporter,
pub reply_handler: ReplyHandler, pub reply_handler: ReplyHandler,
pub tm_receiver: mpsc::Receiver<PusTmAsVec>, pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub default_timeout: Duration, pub default_timeout: Duration,
tm_sender: MpscTmAsVecSender, tm_sender: MpscTmAsVecSender,
phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>, phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>,
@@ -505,11 +580,11 @@ pub(crate) mod tests {
Reply, Reply,
> ReplyHandlerTestbench<ReplyHandler, ActiveRequestInfo, Reply> > ReplyHandlerTestbench<ReplyHandler, ActiveRequestInfo, Reply>
{ {
pub fn new(reply_handler: ReplyHandler) -> Self { pub fn new(owner_id: ComponentId, reply_handler: ReplyHandler) -> Self {
let test_verif_reporter = TestVerificationReporter::default(); let test_verif_reporter = TestVerificationReporter::new(owner_id);
let (tm_sender, tm_receiver) = mpsc::channel(); let (tm_sender, tm_receiver) = mpsc::channel();
Self { Self {
id: TEST_COMPONENT_ID.raw(), id: TEST_COMPONENT_ID_0.raw(),
verif_reporter: test_verif_reporter, verif_reporter: test_verif_reporter,
reply_handler, reply_handler,
default_timeout: Duration::from_secs(30), default_timeout: Duration::from_secs(30),
@@ -525,21 +600,21 @@ pub(crate) mod tests {
apid_target: u32, apid_target: u32,
time_stamp: &[u8], time_stamp: &[u8],
) -> (verification::RequestId, ActivePusRequestStd) { ) -> (verification::RequestId, ActivePusRequestStd) {
let mut sp_header = SpHeader::tc_unseg(apid, 0, 0).unwrap(); let sp_header = SpHeader::new_from_apid(apid);
let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0); let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0);
let init = self.verif_reporter.add_tc(&PusTcCreator::new( let init = self.verif_reporter.add_tc(&PusTcCreator::new(
&mut sp_header, sp_header,
sec_header_dummy, sec_header_dummy,
&[], &[],
true, true,
)); ));
let accepted = self let accepted = self
.verif_reporter .verif_reporter
.acceptance_success(self.id, &self.tm_sender, init, time_stamp) .acceptance_success(&self.tm_sender, init, time_stamp)
.expect("acceptance failed"); .expect("acceptance failed");
let started = self let started = self
.verif_reporter .verif_reporter
.start_success(self.id, &self.tm_sender, accepted, time_stamp) .start_success(&self.tm_sender, accepted, time_stamp)
.expect("start failed"); .expect("start failed");
( (
started.request_id(), started.request_id(),
@@ -558,7 +633,6 @@ pub(crate) mod tests {
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<bool, ReplyHandler::Error> { ) -> Result<bool, ReplyHandler::Error> {
self.reply_handler.handle_reply( self.reply_handler.handle_reply(
self.id,
reply, reply,
active_request, active_request,
&self.tm_sender, &self.tm_sender,
@@ -572,7 +646,7 @@ pub(crate) mod tests {
reply: &GenericMessage<Reply>, reply: &GenericMessage<Reply>,
) -> Result<(), ReplyHandler::Error> { ) -> Result<(), ReplyHandler::Error> {
self.reply_handler self.reply_handler
.handle_unrequested_reply(self.id, reply, &self.tm_sender) .handle_unrequested_reply(reply, &self.tm_sender)
} }
pub fn handle_request_timeout( pub fn handle_request_timeout(
&mut self, &mut self,
@@ -580,7 +654,6 @@ pub(crate) mod tests {
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<(), ReplyHandler::Error> { ) -> Result<(), ReplyHandler::Error> {
self.reply_handler.handle_request_timeout( self.reply_handler.handle_request_timeout(
self.id,
active_request_info, active_request_info,
&self.tm_sender, &self.tm_sender,
&self.verif_reporter, &self.verif_reporter,
@@ -595,7 +668,7 @@ pub(crate) mod tests {
/// Dummy sender component which does nothing on the [Self::send_tm] call. /// Dummy sender component which does nothing on the [Self::send_tm] call.
/// ///
/// Useful for unit tests. /// Useful for unit tests.
impl EcssTmSenderCore for DummySender { impl EcssTmSender for DummySender {
fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> { fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> {
Ok(()) Ok(())
} }
@@ -622,10 +695,10 @@ pub(crate) mod tests {
Request, Request,
> PusConverterTestbench<Converter, ActiveRequestInfo, Request> > PusConverterTestbench<Converter, ActiveRequestInfo, Request>
{ {
pub fn new(converter: Converter) -> Self { pub fn new(owner_id: ComponentId, converter: Converter) -> Self {
let test_verif_reporter = TestVerificationReporter::default(); let test_verif_reporter = TestVerificationReporter::new(owner_id);
Self { Self {
id: TEST_COMPONENT_ID.raw(), id: owner_id,
verif_reporter: test_verif_reporter, verif_reporter: test_verif_reporter,
converter, converter,
dummy_sender: DummySender::default(), dummy_sender: DummySender::default(),
@@ -640,7 +713,7 @@ pub(crate) mod tests {
self.current_request_id = Some(verification::RequestId::new(tc)); self.current_request_id = Some(verification::RequestId::new(tc));
self.current_packet = Some(tc.to_vec().unwrap()); self.current_packet = Some(tc.to_vec().unwrap());
self.verif_reporter self.verif_reporter
.acceptance_success(self.id, &self.dummy_sender, token, &[]) .acceptance_success(&self.dummy_sender, token, &[])
.expect("acceptance failed") .expect("acceptance failed")
} }
@@ -663,7 +736,6 @@ pub(crate) mod tests {
let current_packet = self.current_packet.take().unwrap(); let current_packet = self.current_packet.take().unwrap();
let tc_reader = PusTcReader::new(&current_packet).unwrap(); let tc_reader = PusTcReader::new(&current_packet).unwrap();
let (active_info, request) = self.converter.convert( let (active_info, request) = self.converter.convert(
self.id,
token, token,
&tc_reader.0, &tc_reader.0,
&self.dummy_sender, &self.dummy_sender,
@@ -703,7 +775,7 @@ pub(crate) mod tests {
ReplyType, ReplyType,
>, >,
pub request_id: Option<RequestId>, pub request_id: Option<RequestId>,
pub tm_funnel_rx: mpsc::Receiver<PusTmAsVec>, pub tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>, pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>,
pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>, pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>,
pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>, pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,

View File

@@ -1,4 +1,5 @@
use log::{error, warn}; use derive_new::new;
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use std::sync::mpsc; use std::sync::mpsc;
use std::time::Duration; use std::time::Duration;
@@ -7,8 +8,8 @@ use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::verification::VerificationReporter; use satrs::pus::verification::VerificationReporter;
use satrs::pus::{ use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlingError,
PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, PusServiceHelper,
}; };
use satrs::request::GenericMessage; use satrs::request::GenericMessage;
use satrs::{ use satrs::{
@@ -19,7 +20,7 @@ use satrs::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider, self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider,
VerificationToken, VerificationToken,
}, },
ActivePusRequestStd, ActiveRequestProvider, EcssTmSenderCore, EcssTmtcError, ActivePusRequestStd, ActiveRequestProvider, EcssTmSender, EcssTmtcError,
GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant, GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
}, },
request::UniqueApidTargetId, request::UniqueApidTargetId,
@@ -34,24 +35,25 @@ use satrs::{
ComponentId, ComponentId,
}; };
use satrs_example::config::components::PUS_MODE_SERVICE; use satrs_example::config::components::PUS_MODE_SERVICE;
use satrs_example::config::{mode_err, tmtc_err}; use satrs_example::config::{mode_err, tmtc_err, CustomPusServiceId};
use super::{ use super::{
create_verification_reporter, generic_pus_request_timeout_handler, PusTargetedRequestService, create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
TargetedPusService, PusTargetedRequestService, TargetedPusService,
}; };
#[derive(Default)] #[derive(new)]
pub struct ModeReplyHandler {} pub struct ModeReplyHandler {
owner_id: ComponentId,
}
impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler { impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
type Error = EcssTmtcError; type Error = EcssTmtcError;
fn handle_unrequested_reply( fn handle_unrequested_reply(
&mut self, &mut self,
_caller_id: ComponentId,
reply: &GenericMessage<ModeReply>, reply: &GenericMessage<ModeReply>,
_tm_sender: &impl EcssTmSenderCore, _tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for mode service 5: {reply:?}"); log::warn!("received unexpected reply for mode service 5: {reply:?}");
Ok(()) Ok(())
@@ -59,10 +61,9 @@ impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
fn handle_reply( fn handle_reply(
&mut self, &mut self,
caller_id: ComponentId,
reply: &GenericMessage<ModeReply>, reply: &GenericMessage<ModeReply>,
active_request: &ActivePusRequestStd, active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSenderCore, tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider, verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<bool, Self::Error> { ) -> Result<bool, Self::Error> {
@@ -77,27 +78,15 @@ impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
.write_to_be_bytes(&mut source_data) .write_to_be_bytes(&mut source_data)
.expect("writing mode reply failed"); .expect("writing mode reply failed");
let req_id = verification::RequestId::from(reply.request_id()); let req_id = verification::RequestId::from(reply.request_id());
let mut sp_header = SpHeader::tm_unseg(req_id.packet_id().apid(), 0, 0) let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0);
.expect("generating SP header failed"); let sec_header =
let sec_header = PusTmSecondaryHeader::new( PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp);
200, let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true);
Subservice::TmModeReply as u8, tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?;
0, verification_handler.completion_success(tm_sender, started_token, time_stamp)?;
0,
Some(time_stamp),
);
let pus_tm = PusTmCreator::new(&mut sp_header, sec_header, &source_data, true);
tm_sender.send_tm(caller_id, PusTmVariant::Direct(pus_tm))?;
verification_handler.completion_success(
caller_id,
tm_sender,
started_token,
time_stamp,
)?;
} }
ModeReply::CantReachMode(error_code) => { ModeReply::CantReachMode(error_code) => {
verification_handler.completion_failure( verification_handler.completion_failure(
caller_id,
tm_sender, tm_sender,
started_token, started_token,
FailParams::new(time_stamp, &error_code, &[]), FailParams::new(time_stamp, &error_code, &[]),
@@ -112,7 +101,6 @@ impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
.write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..]) .write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..])
.expect("writing reached mode failed"); .expect("writing reached mode failed");
verification_handler.completion_failure( verification_handler.completion_failure(
caller_id,
tm_sender, tm_sender,
started_token, started_token,
FailParams::new( FailParams::new(
@@ -128,14 +116,12 @@ impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
fn handle_request_timeout( fn handle_request_timeout(
&mut self, &mut self,
caller_id: ComponentId,
active_request: &ActivePusRequestStd, active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSenderCore, tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider, verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler( generic_pus_request_timeout_handler(
caller_id,
tm_sender, tm_sender,
active_request, active_request,
verification_handler, verification_handler,
@@ -154,11 +140,9 @@ impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestCo
fn convert( fn convert(
&mut self, &mut self,
caller_id: ComponentId,
token: VerificationToken<TcStateAccepted>, token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader, tc: &PusTcReader,
tm_sender: &(impl EcssTmSenderCore + ?Sized), tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider, verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> { ) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> {
@@ -167,7 +151,6 @@ impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestCo
let not_enough_app_data = |expected: usize| { let not_enough_app_data = |expected: usize| {
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA), FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
@@ -189,7 +172,6 @@ impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestCo
// Invalid subservice // Invalid subservice
verif_reporter verif_reporter
.start_failure( .start_failure(
caller_id,
tm_sender, tm_sender,
token, token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE), FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
@@ -221,23 +203,23 @@ impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestCo
} }
pub fn create_mode_service_static( pub fn create_mode_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter, mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>, reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> ModeServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let mode_request_handler = PusTargetedRequestService::new( let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_MODE_SERVICE.raw(), PUS_MODE_SERVICE.id(),
pus_action_rx, pus_action_rx,
tm_sender, tm_sender,
create_verification_reporter(PUS_MODE_SERVICE.apid), create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048), EcssTcInSharedStoreConverter::new(tc_pool, 2048),
), ),
ModeRequestConverter::default(), ModeRequestConverter::default(),
DefaultActiveRequestMap::default(), DefaultActiveRequestMap::default(),
ModeReplyHandler::default(), ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
mode_router, mode_router,
reply_receiver, reply_receiver,
); );
@@ -247,22 +229,22 @@ pub fn create_mode_service_static(
} }
pub fn create_mode_service_dynamic( pub fn create_mode_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter, mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>, reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> ModeServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let mode_request_handler = PusTargetedRequestService::new( let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_MODE_SERVICE.raw(), PUS_MODE_SERVICE.id(),
pus_action_rx, pus_action_rx,
tm_funnel_tx, tm_funnel_tx,
create_verification_reporter(PUS_MODE_SERVICE.apid), create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
ModeRequestConverter::default(), ModeRequestConverter::default(),
DefaultActiveRequestMap::default(), DefaultActiveRequestMap::default(),
ModeReplyHandler::default(), ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
mode_router, mode_router,
reply_receiver, reply_receiver,
); );
@@ -271,7 +253,7 @@ pub fn create_mode_service_dynamic(
} }
} }
pub struct ModeServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> { pub struct ModeServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
pub(crate) service: PusTargetedRequestService< pub(crate) service: PusTargetedRequestService<
MpscTcReceiver, MpscTcReceiver,
TmSender, TmSender,
@@ -286,50 +268,32 @@ pub struct ModeServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: Ecss
>, >,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> TargetedPusService impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for ModeServiceWrapper<TmSender, TcInMemConverter> for ModeServiceWrapper<TmSender, TcInMemConverter>
{ {
/// Returns [true] if the packet handling is finished. const SERVICE_ID: u8 = CustomPusServiceId::Mode as u8;
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool { const SERVICE_STR: &'static str = "mode";
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result { delegate::delegate! {
PusPacketHandlerResult::RequestHandled => {} to self.service {
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { fn poll_and_handle_next_tc(
warn!("PUS mode service: partial packet handling success: {e:?}") &mut self,
} time_stamp: &[u8],
PusPacketHandlerResult::CustomSubservice(invalid, _) => { ) -> Result<HandlingStatus, PusPacketHandlingError>;
warn!("PUS mode service: invalid subservice {invalid}");
} fn poll_and_handle_next_reply(
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { &mut self,
warn!("PUS mode service: {subservice} not implemented"); time_stamp: &[u8],
} ) -> Result<HandlingStatus, EcssTmtcError>;
PusPacketHandlerResult::Empty => {
return true; fn check_for_request_timeouts(&mut self);
}
},
Err(error) => {
error!("PUS mode service: packet handling error: {error:?}")
}
} }
false
}
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> bool {
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS action service: Handling reply failed with error {e:?}");
false
})
}
fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
} }
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID, TEST_UNIQUE_ID}; use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};
use satrs::request::MessageMetadata; use satrs::request::MessageMetadata;
use satrs::{ use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest}, mode::{ModeAndSubmode, ModeReply, ModeRequest},
@@ -351,72 +315,79 @@ mod tests {
#[test] #[test]
fn mode_converter_read_mode_request() { fn mode_converter_read_mode_request() {
let mut testbench = PusConverterTestbench::new(ModeRequestConverter::default()); let mut testbench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8); let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8);
let mut app_data: [u8; 4] = [0; 4]; let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc); let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID) .convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed"); .expect("conversion has failed");
assert_eq!(req, ModeRequest::ReadMode); assert_eq!(req, ModeRequest::ReadMode);
} }
#[test] #[test]
fn mode_converter_set_mode_request() { fn mode_converter_set_mode_request() {
let mut testbench = PusConverterTestbench::new(ModeRequestConverter::default()); let mut testbench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8); let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8);
let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN]; let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN];
let mode_and_submode = ModeAndSubmode::new(2, 1); let mode_and_submode = ModeAndSubmode::new(2, 1);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
mode_and_submode mode_and_submode
.write_to_be_bytes(&mut app_data[4..]) .write_to_be_bytes(&mut app_data[4..])
.unwrap(); .unwrap();
let tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc); let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID) .convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed"); .expect("conversion has failed");
assert_eq!(req, ModeRequest::SetMode(mode_and_submode)); assert_eq!(req, ModeRequest::SetMode(mode_and_submode));
} }
#[test] #[test]
fn mode_converter_announce_mode() { fn mode_converter_announce_mode() {
let mut testbench = PusConverterTestbench::new(ModeRequestConverter::default()); let mut testbench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8); let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8);
let mut app_data: [u8; 4] = [0; 4]; let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc); let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID) .convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed"); .expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceMode); assert_eq!(req, ModeRequest::AnnounceMode);
} }
#[test] #[test]
fn mode_converter_announce_mode_recursively() { fn mode_converter_announce_mode_recursively() {
let mut testbench = PusConverterTestbench::new(ModeRequestConverter::default()); let mut testbench =
let mut sp_header = SpHeader::tc_unseg(TEST_APID, 0, 0).unwrap(); PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = let sec_header =
PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8); PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8);
let mut app_data: [u8; 4] = [0; 4]; let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID.to_be_bytes()); app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc); let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID) .convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed"); .expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceModeRecursive); assert_eq!(req, ModeRequest::AnnounceModeRecursive);
} }
#[test] #[test]
fn reply_handling_unrequested_reply() { fn reply_handling_unrequested_reply() {
let mut testbench = ReplyHandlerTestbench::new(ModeReplyHandler::default()); let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1)); let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1));
let unrequested_reply = let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply); GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply);
@@ -428,12 +399,15 @@ mod tests {
#[test] #[test]
fn reply_handling_reply_timeout() { fn reply_handling_reply_timeout() {
let mut testbench = ReplyHandlerTestbench::new(ModeReplyHandler::default()); let mut testbench = ReplyHandlerTestbench::new(
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID, &[]); TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]); let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok()); assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure( testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID.raw(), TEST_COMPONENT_ID_0.raw(),
req_id, req_id,
None, None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64, tmtc_err::REQUEST_TIMEOUT.raw() as u64,

View File

@@ -2,58 +2,71 @@ use std::sync::mpsc;
use std::time::Duration; use std::time::Duration;
use crate::pus::create_verification_reporter; use crate::pus::create_verification_reporter;
use log::{error, info, warn}; use log::info;
use satrs::pool::{PoolProvider, StaticMemoryPool}; use satrs::pool::{PoolProvider, StaticMemoryPool};
use satrs::pus::scheduler::{PusScheduler, TcInfo}; use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler_srv::PusSchedServiceHandler; use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter; use satrs::pus::verification::VerificationReporter;
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter,
EcssTmSenderCore, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender, MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
}; };
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool};
use satrs::ComponentId;
use satrs_example::config::components::PUS_SCHED_SERVICE; use satrs_example::config::components::PUS_SCHED_SERVICE;
use crate::tmtc::PusTcSourceProviderSharedPool; use super::{DirectPusService, HandlingStatus};
pub trait TcReleaser { pub trait TcReleaser {
fn release(&mut self, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool; fn release(&mut self, sender_id: ComponentId, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool;
} }
impl TcReleaser for PusTcSourceProviderSharedPool { impl TcReleaser for PacketSenderWithSharedPool {
fn release(&mut self, enabled: bool, _info: &TcInfo, tc: &[u8]) -> bool { fn release(
&mut self,
sender_id: ComponentId,
enabled: bool,
_info: &TcInfo,
tc: &[u8],
) -> bool {
if enabled { if enabled {
let shared_pool = self.shared_pool.get_mut();
// Transfer TC from scheduler TC pool to shared TC pool. // Transfer TC from scheduler TC pool to shared TC pool.
let released_tc_addr = self let released_tc_addr = shared_pool
.shared_pool .0
.pool
.write() .write()
.expect("locking pool failed") .expect("locking pool failed")
.add(tc) .add(tc)
.expect("adding TC to shared pool failed"); .expect("adding TC to shared pool failed");
self.tc_source self.sender
.send(released_tc_addr) .send(PacketInPool::new(sender_id, released_tc_addr))
.expect("sending TC to TC source failed"); .expect("sending TC to TC source failed");
} }
true true
} }
} }
impl TcReleaser for mpsc::Sender<Vec<u8>> { impl TcReleaser for mpsc::Sender<PacketAsVec> {
fn release(&mut self, enabled: bool, _info: &TcInfo, tc: &[u8]) -> bool { fn release(
&mut self,
sender_id: ComponentId,
enabled: bool,
_info: &TcInfo,
tc: &[u8],
) -> bool {
if enabled { if enabled {
// Send released TC to centralized TC source. // Send released TC to centralized TC source.
self.send(tc.to_vec()) self.send(PacketAsVec::new(sender_id, tc.to_vec()))
.expect("sending TC to TC source failed"); .expect("sending TC to TC source failed");
} }
true true
} }
} }
pub struct SchedulingServiceWrapper< pub struct SchedulingServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
TmSender: EcssTmSenderCore, {
TcInMemConverter: EcssTcInMemConverter,
> {
pub pus_11_handler: PusSchedServiceHandler< pub pus_11_handler: PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
TmSender, TmSender,
@@ -66,12 +79,68 @@ pub struct SchedulingServiceWrapper<
pub tc_releaser: Box<dyn TcReleaser + Send>, pub tc_releaser: Box<dyn TcReleaser + Send>,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
for SchedulingServiceWrapper<TmSender, TcInMemConverter>
{
const SERVICE_ID: u8 = PusServiceId::Verification as u8;
const SERVICE_STR: &'static str = "verification";
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let result = self.pus_11_handler.poll_and_handle_next_tc(
error_handler,
time_stamp,
&mut self.sched_tc_pool,
);
if let Err(e) = result {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
}
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
}
HandlingStatus::HandledOne
}
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
SchedulingServiceWrapper<TmSender, TcInMemConverter> SchedulingServiceWrapper<TmSender, TcInMemConverter>
{ {
pub fn release_tcs(&mut self) { pub fn release_tcs(&mut self) {
let id = self.pus_11_handler.service_helper.id();
let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool { let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool {
self.tc_releaser.release(enabled, info, tc) self.tc_releaser.release(id, enabled, info, tc)
}; };
self.pus_11_handler self.pus_11_handler
@@ -91,50 +160,23 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
info!("{released_tcs} TC(s) released from scheduler"); info!("{released_tcs} TC(s) released from scheduler");
} }
} }
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool {
match self
.pus_11_handler
.poll_and_handle_next_tc(time_stamp, &mut self.sched_tc_pool)
{
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS11 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS11 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS11: Subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => {
return true;
}
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
}
}
false
}
} }
pub fn create_scheduler_service_static( pub fn create_scheduler_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_releaser: PusTcSourceProviderSharedPool, tc_releaser: PacketSenderWithSharedPool,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>, pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool, sched_tc_pool: StaticMemoryPool,
) -> SchedulingServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> SchedulingServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed"); .expect("Creating PUS Scheduler failed");
let pus_11_handler = PusSchedServiceHandler::new( let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_SCHED_SERVICE.raw(), PUS_SCHED_SERVICE.id(),
pus_sched_rx, pus_sched_rx,
tm_sender, tm_sender,
create_verification_reporter(PUS_SCHED_SERVICE.apid), create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_releaser.clone_backing_pool(), 2048), EcssTcInSharedStoreConverter::new(tc_releaser.shared_packet_store().0.clone(), 2048),
), ),
scheduler, scheduler,
); );
@@ -147,8 +189,8 @@ pub fn create_scheduler_service_static(
} }
pub fn create_scheduler_service_dynamic( pub fn create_scheduler_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
tc_source_sender: mpsc::Sender<Vec<u8>>, tc_source_sender: mpsc::Sender<PacketAsVec>,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>, pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool, sched_tc_pool: StaticMemoryPool,
) -> SchedulingServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> SchedulingServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
@@ -158,10 +200,10 @@ pub fn create_scheduler_service_dynamic(
.expect("Creating PUS Scheduler failed"); .expect("Creating PUS Scheduler failed");
let pus_11_handler = PusSchedServiceHandler::new( let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
PUS_SCHED_SERVICE.raw(), PUS_SCHED_SERVICE.id(),
pus_sched_rx, pus_sched_rx,
tm_funnel_tx, tm_funnel_tx,
create_verification_reporter(PUS_SCHED_SERVICE.apid), create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
scheduler, scheduler,

View File

@@ -1,17 +1,20 @@
use crate::pus::mode::ModeServiceWrapper; use crate::pus::mode::ModeServiceWrapper;
use derive_new::new; use derive_new::new;
use satrs::{ use satrs::{
pus::{EcssTcInMemConverter, EcssTmSenderCore}, pus::{EcssTcInMemConverter, EcssTmSender},
spacepackets::time::{cds, TimeWriter}, spacepackets::time::{cds, TimeWriter},
}; };
use super::{ use super::{
action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper, action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, TargetedPusService, scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, DirectPusService,
HandlingStatus, TargetedPusService,
}; };
// TODO: For better extensibility, we could create 2 vectors: One for direct PUS services and one
// for targeted services..
#[derive(new)] #[derive(new)]
pub struct PusStack<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> { pub struct PusStack<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
test_srv: TestCustomServiceWrapper<TmSender, TcInMemConverter>, test_srv: TestCustomServiceWrapper<TmSender, TcInMemConverter>,
hk_srv_wrapper: HkServiceWrapper<TmSender, TcInMemConverter>, hk_srv_wrapper: HkServiceWrapper<TmSender, TcInMemConverter>,
event_srv: EventServiceWrapper<TmSender, TcInMemConverter>, event_srv: EventServiceWrapper<TmSender, TcInMemConverter>,
@@ -20,45 +23,35 @@ pub struct PusStack<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemCon
mode_srv: ModeServiceWrapper<TmSender, TcInMemConverter>, mode_srv: ModeServiceWrapper<TmSender, TcInMemConverter>,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
PusStack<TmSender, TcInMemConverter> PusStack<TmSender, TcInMemConverter>
{ {
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
// Release all telecommands which reached their release time before calling the service // Release all telecommands which reached their release time before calling the service
// handlers. // handlers.
self.schedule_srv.release_tcs(); self.schedule_srv.release_tcs();
let time_stamp = cds::CdsTime::now_with_u16_days() let timestamp = cds::CdsTime::now_with_u16_days()
.expect("time stamp generation error") .expect("time stamp generation error")
.to_vec() .to_vec()
.unwrap(); .unwrap();
let mut loop_count = 0_u32;
// Hot loop which will run continuously until all request and reply handling is done.
loop { loop {
let mut nothing_to_do = true; let mut nothing_to_do = true;
let mut is_srv_finished = Self::direct_service_checker(&mut self.test_srv, &timestamp, &mut nothing_to_do);
|tc_handling_done: bool, reply_handling_done: Option<bool>| { Self::direct_service_checker(&mut self.schedule_srv, &timestamp, &mut nothing_to_do);
if !tc_handling_done Self::direct_service_checker(&mut self.event_srv, &timestamp, &mut nothing_to_do);
|| (reply_handling_done.is_some() && !reply_handling_done.unwrap()) Self::targeted_service_checker(
{ &mut self.action_srv_wrapper,
nothing_to_do = false; &timestamp,
} &mut nothing_to_do,
};
is_srv_finished(self.test_srv.poll_and_handle_next_packet(&time_stamp), None);
is_srv_finished(self.schedule_srv.poll_and_handle_next_tc(&time_stamp), None);
is_srv_finished(self.event_srv.poll_and_handle_next_tc(&time_stamp), None);
is_srv_finished(
self.action_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
Some(
self.action_srv_wrapper
.poll_and_handle_next_reply(&time_stamp),
),
); );
is_srv_finished( Self::targeted_service_checker(
self.hk_srv_wrapper.poll_and_handle_next_tc(&time_stamp), &mut self.hk_srv_wrapper,
Some(self.hk_srv_wrapper.poll_and_handle_next_reply(&time_stamp)), &timestamp,
); &mut nothing_to_do,
is_srv_finished(
self.mode_srv.poll_and_handle_next_tc(&time_stamp),
Some(self.mode_srv.poll_and_handle_next_reply(&time_stamp)),
); );
Self::targeted_service_checker(&mut self.mode_srv, &timestamp, &mut nothing_to_do);
if nothing_to_do { if nothing_to_do {
// Timeout checking is only done once. // Timeout checking is only done once.
self.action_srv_wrapper.check_for_request_timeouts(); self.action_srv_wrapper.check_for_request_timeouts();
@@ -66,6 +59,37 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
self.mode_srv.check_for_request_timeouts(); self.mode_srv.check_for_request_timeouts();
break; break;
} }
// Safety mechanism to avoid infinite loops.
loop_count += 1;
if loop_count >= 500 {
log::warn!("reached PUS stack loop count 500, breaking");
break;
}
}
}
pub fn direct_service_checker<S: DirectPusService>(
service: &mut S,
timestamp: &[u8],
nothing_to_do: &mut bool,
) {
let handling_status = service.poll_and_handle_next_tc(timestamp);
if handling_status == HandlingStatus::HandledOne {
*nothing_to_do = false;
}
}
pub fn targeted_service_checker<S: TargetedPusService>(
service: &mut S,
timestamp: &[u8],
nothing_to_do: &mut bool,
) {
let request_handling = service.poll_and_handle_next_tc_default_handler(timestamp);
let reply_handling = service.poll_and_handle_next_reply_default_handler(timestamp);
if request_handling == HandlingStatus::HandledOne
|| reply_handling == HandlingStatus::HandledOne
{
*nothing_to_do = false;
} }
} }
} }

View File

@@ -1,93 +1,113 @@
use crate::pus::create_verification_reporter; use crate::pus::create_verification_reporter;
use log::{info, warn}; use log::info;
use satrs::params::Params; use satrs::event_man::{EventMessage, EventMessageU32};
use satrs::pool::SharedStaticMemoryPool; use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::test::PusService17TestHandler; use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider}; use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSenderCore, MpscTcReceiver, DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter,
MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusServiceHelper, EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, PusServiceHelper,
PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::pus::{EcssTcInSharedStoreConverter, PartialPusHandlingError};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket; use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::spacepackets::time::cds::CdsTime; use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs::spacepackets::time::TimeWriter;
use satrs::{events::EventU32, pus::EcssTcInSharedStoreConverter};
use satrs_example::config::components::PUS_TEST_SERVICE; use satrs_example::config::components::PUS_TEST_SERVICE;
use satrs_example::config::{tmtc_err, TEST_EVENT}; use satrs_example::config::{tmtc_err, TEST_EVENT};
use std::sync::mpsc; use std::sync::mpsc;
use super::{DirectPusService, HandlingStatus};
pub fn create_test_service_static( pub fn create_test_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>, tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>, event_sender: mpsc::SyncSender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>, pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> TestCustomServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> { ) -> TestCustomServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
PUS_TEST_SERVICE.raw(), PUS_TEST_SERVICE.id(),
pus_test_rx, pus_test_rx,
tm_sender, tm_sender,
create_verification_reporter(PUS_TEST_SERVICE.apid), create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048), EcssTcInSharedStoreConverter::new(tc_pool, 2048),
)); ));
TestCustomServiceWrapper { TestCustomServiceWrapper {
handler: pus17_handler, handler: pus17_handler,
test_srv_event_sender: event_sender, event_tx: event_sender,
} }
} }
pub fn create_test_service_dynamic( pub fn create_test_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>, tm_funnel_tx: mpsc::Sender<PacketAsVec>,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>, event_sender: mpsc::SyncSender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>, pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> { ) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
PUS_TEST_SERVICE.raw(), PUS_TEST_SERVICE.id(),
pus_test_rx, pus_test_rx,
tm_funnel_tx, tm_funnel_tx,
create_verification_reporter(PUS_TEST_SERVICE.apid), create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
)); ));
TestCustomServiceWrapper { TestCustomServiceWrapper {
handler: pus17_handler, handler: pus17_handler,
test_srv_event_sender: event_sender, event_tx: event_sender,
} }
} }
pub struct TestCustomServiceWrapper< pub struct TestCustomServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
TmSender: EcssTmSenderCore, {
TcInMemConverter: EcssTcInMemConverter,
> {
pub handler: pub handler:
PusService17TestHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>, PusService17TestHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub test_srv_event_sender: mpsc::Sender<(EventU32, Option<Params>)>, pub event_tx: mpsc::SyncSender<EventMessageU32>,
} }
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
TestCustomServiceWrapper<TmSender, TcInMemConverter> for TestCustomServiceWrapper<TmSender, TcInMemConverter>
{ {
pub fn poll_and_handle_next_packet(&mut self, time_stamp: &[u8]) -> bool { const SERVICE_ID: u8 = PusServiceId::Test as u8;
let res = self.handler.poll_and_handle_next_tc(time_stamp);
if res.is_err() { const SERVICE_STR: &'static str = "test";
warn!("PUS17 handler failed with error {:?}", res.unwrap_err());
return true; fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let res = self
.handler
.poll_and_handle_next_tc(error_handler, timestamp);
if let Err(e) = res {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
} }
match res.unwrap() { match res.unwrap() {
PusPacketHandlerResult::RequestHandled => { DirectPusPacketHandlerResult::Handled(handling_status) => {
info!("Received PUS ping command TC[17,1]"); if handling_status == HandlingStatus::HandledOne {
info!("Sent ping reply PUS TM[17,2]"); info!("Received PUS ping command TC[17,1]");
info!("Sent ping reply PUS TM[17,2]");
}
return handling_status;
} }
PusPacketHandlerResult::RequestHandledPartialSuccess(partial_err) => { DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!( log::warn!(
"Handled PUS ping command with partial success: {:?}", "PUS {}({}) subservice {} not implemented",
partial_err Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
); );
} }
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => { DirectPusPacketHandlerResult::CustomSubservice(subservice, token) => {
warn!("PUS17: Subservice {subservice} not implemented")
}
PusPacketHandlerResult::CustomSubservice(subservice, token) => {
let (tc, _) = PusTcReader::new( let (tc, _) = PusTcReader::new(
self.handler self.handler
.service_helper .service_helper
@@ -95,46 +115,44 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
.tc_slice_raw(), .tc_slice_raw(),
) )
.unwrap(); .unwrap();
let time_stamper = CdsTime::now_with_u16_days().unwrap();
let mut stamp_buf: [u8; 7] = [0; 7];
time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
if subservice == 128 { if subservice == 128 {
info!("Generating test event"); info!("generating test event");
self.test_srv_event_sender self.event_tx
.send((TEST_EVENT.into(), None)) .send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
.expect("Sending test event failed"); .expect("Sending test event failed");
let start_token = self match self.handler.service_helper.verif_reporter().start_success(
.handler self.handler.service_helper.tm_sender(),
.service_helper token,
.verif_reporter() timestamp,
.start_success( ) {
self.handler.service_helper.common.id, Ok(started_token) => {
self.handler.service_helper.tm_sender(), if let Err(e) = self
token, .handler
&stamp_buf, .service_helper
) .verif_reporter()
.expect("Error sending start success"); .completion_success(
self.handler self.handler.service_helper.tm_sender(),
.service_helper started_token,
.verif_reporter() timestamp,
.completion_success( )
self.handler.service_helper.id(), {
self.handler.service_helper.tm_sender(), error_handler(&PartialPusHandlingError::Verification(e));
start_token, }
&stamp_buf, }
) Err(e) => {
.expect("Error sending completion success"); error_handler(&PartialPusHandlingError::Verification(e));
}
}
} else { } else {
let fail_data = [tc.subservice()]; let fail_data = [tc.subservice()];
self.handler self.handler
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.start_failure( .start_failure(
self.handler.service_helper.id(),
self.handler.service_helper.tm_sender(), self.handler.service_helper.tm_sender(),
token, token,
FailParams::new( FailParams::new(
&stamp_buf, timestamp,
&tmtc_err::INVALID_PUS_SUBSERVICE, &tmtc_err::INVALID_PUS_SUBSERVICE,
&fail_data, &fail_data,
), ),
@@ -142,10 +160,7 @@ impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
.expect("Sending start failure verification failed"); .expect("Sending start failure verification failed");
} }
} }
PusPacketHandlerResult::Empty => {
return true;
}
} }
false HandlingStatus::HandledOne
} }
} }

View File

@@ -1,45 +0,0 @@
/// Generic error type for sending something via a message queue.
#[derive(Debug, Copy, Clone)]
pub enum GenericSendError {
RxDisconnected,
QueueFull(Option<u32>),
}
impl Display for GenericSendError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
GenericSendError::RxDisconnected => {
write!(f, "rx side has disconnected")
}
GenericSendError::QueueFull(max_cap) => {
write!(f, "queue with max capacity of {max_cap:?} is full")
}
}
}
}
#[cfg(feature = "std")]
impl Error for GenericSendError {}
/// Generic error type for sending something via a message queue.
#[derive(Debug, Copy, Clone)]
pub enum GenericRecvError {
Empty,
TxDisconnected,
}
impl Display for GenericRecvError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::TxDisconnected => {
write!(f, "tx side has disconnected")
}
Self::Empty => {
write!(f, "nothing to receive")
}
}
}
}
#[cfg(feature = "std")]
impl Error for GenericRecvError {}

View File

@@ -8,9 +8,9 @@ use satrs::mode::ModeRequest;
use satrs::pus::verification::{ use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken, FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
}; };
use satrs::pus::{ActiveRequestProvider, EcssTmSenderCore, GenericRoutingError, PusRequestRouter}; use satrs::pus::{ActiveRequestProvider, EcssTmSender, GenericRoutingError, PusRequestRouter};
use satrs::queue::GenericSendError; use satrs::queue::GenericSendError;
use satrs::request::{GenericMessage, MessageMetadata}; use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket; use satrs::spacepackets::ecss::PusPacket;
use satrs::ComponentId; use satrs::ComponentId;
@@ -47,7 +47,7 @@ impl GenericRequestRouter {
active_request: &impl ActiveRequestProvider, active_request: &impl ActiveRequestProvider,
tc: &PusTcReader, tc: &PusTcReader,
error: GenericRoutingError, error: GenericRoutingError,
tm_sender: &(impl EcssTmSenderCore + ?Sized), tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider, verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8], time_stamp: &[u8],
) { ) {
@@ -61,11 +61,13 @@ impl GenericRequestRouter {
.expect("token is not in accepted state"); .expect("token is not in accepted state");
match error { match error {
GenericRoutingError::UnknownTargetId(id) => { GenericRoutingError::UnknownTargetId(id) => {
let apid_target_id = UniqueApidTargetId::from(id);
warn!("Target APID for request: {}", apid_target_id.apid);
warn!("Target Unique ID for request: {}", apid_target_id.unique_id);
let mut fail_data: [u8; 8] = [0; 8]; let mut fail_data: [u8; 8] = [0; 8];
fail_data.copy_from_slice(&id.to_be_bytes()); fail_data.copy_from_slice(&id.to_be_bytes());
verif_reporter verif_reporter
.completion_failure( .completion_failure(
self.id,
tm_sender, tm_sender,
accepted_token, accepted_token,
FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data), FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data),
@@ -77,7 +79,6 @@ impl GenericRequestRouter {
fail_data.copy_from_slice(&active_request.target_id().to_be_bytes()); fail_data.copy_from_slice(&active_request.target_id().to_be_bytes());
verif_reporter verif_reporter
.completion_failure( .completion_failure(
self.id,
tm_sender, tm_sender,
accepted_token, accepted_token,
FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data), FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data),

View File

@@ -1,212 +0,0 @@
use log::warn;
use satrs::pus::{
EcssTcAndToken, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, ReceivesEcssPusTc,
};
use satrs::spacepackets::SpHeader;
use std::sync::mpsc::{self, Receiver, SendError, Sender, SyncSender, TryRecvError};
use thiserror::Error;
use crate::pus::PusReceiver;
use satrs::pool::{PoolProvider, SharedStaticMemoryPool, StoreAddr, StoreError};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::tmtc::ReceivesCcsdsTc;
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum MpscStoreAndSendError {
#[error("Store error: {0}")]
Store(#[from] StoreError),
#[error("TC send error: {0}")]
TcSend(#[from] SendError<EcssTcAndToken>),
#[error("TMTC send error: {0}")]
TmTcSend(#[from] SendError<StoreAddr>),
}
#[derive(Clone)]
pub struct SharedTcPool {
pub pool: SharedStaticMemoryPool,
}
impl SharedTcPool {
pub fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<StoreAddr, StoreError> {
let mut pg = self.pool.write().expect("error locking TC store");
let addr = pg.free_element(pus_tc.len_packed(), |buf| {
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
})?;
Ok(addr)
}
}
#[derive(Clone)]
pub struct PusTcSourceProviderSharedPool {
pub tc_source: SyncSender<StoreAddr>,
pub shared_pool: SharedTcPool,
}
impl PusTcSourceProviderSharedPool {
#[allow(dead_code)]
pub fn clone_backing_pool(&self) -> SharedStaticMemoryPool {
self.shared_pool.pool.clone()
}
}
impl ReceivesEcssPusTc for PusTcSourceProviderSharedPool {
type Error = MpscStoreAndSendError;
fn pass_pus_tc(&mut self, _: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
let addr = self.shared_pool.add_pus_tc(pus_tc)?;
self.tc_source.send(addr)?;
Ok(())
}
}
impl ReceivesCcsdsTc for PusTcSourceProviderSharedPool {
type Error = MpscStoreAndSendError;
fn pass_ccsds(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut pool = self.shared_pool.pool.write().expect("locking pool failed");
let addr = pool.add(tc_raw)?;
drop(pool);
self.tc_source.send(addr)?;
Ok(())
}
}
// Newtype, can not implement necessary traits on MPSC sender directly because of orphan rules.
#[derive(Clone)]
pub struct PusTcSourceProviderDynamic(pub Sender<Vec<u8>>);
impl ReceivesEcssPusTc for PusTcSourceProviderDynamic {
type Error = SendError<Vec<u8>>;
fn pass_pus_tc(&mut self, _: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
self.0.send(pus_tc.raw_data().to_vec())?;
Ok(())
}
}
impl ReceivesCcsdsTc for PusTcSourceProviderDynamic {
type Error = mpsc::SendError<Vec<u8>>;
fn pass_ccsds(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.0.send(tc_raw.to_vec())?;
Ok(())
}
}
// TC source components where static pools are the backing memory of the received telecommands.
pub struct TcSourceTaskStatic {
shared_tc_pool: SharedTcPool,
tc_receiver: Receiver<StoreAddr>,
tc_buf: [u8; 4096],
pus_receiver: PusReceiver<MpscTmInSharedPoolSenderBounded>,
}
impl TcSourceTaskStatic {
pub fn new(
shared_tc_pool: SharedTcPool,
tc_receiver: Receiver<StoreAddr>,
pus_receiver: PusReceiver<MpscTmInSharedPoolSenderBounded>,
) -> Self {
Self {
shared_tc_pool,
tc_receiver,
tc_buf: [0; 4096],
pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> bool {
match self.tc_receiver.try_recv() {
Ok(addr) => {
let pool = self
.shared_tc_pool
.pool
.read()
.expect("locking tc pool failed");
pool.read(&addr, &mut self.tc_buf)
.expect("reading pool failed");
drop(pool);
match PusTcReader::new(&self.tc_buf) {
Ok((pus_tc, _)) => {
self.pus_receiver
.handle_tc_packet(
satrs::pus::TcInMemory::StoreAddr(addr),
pus_tc.service(),
&pus_tc,
)
.ok();
true
}
Err(e) => {
warn!("error creating PUS TC from raw data: {e}");
warn!("raw data: {:x?}", self.tc_buf);
true
}
}
}
Err(e) => match e {
TryRecvError::Empty => false,
TryRecvError::Disconnected => {
warn!("tmtc thread: sender disconnected");
false
}
},
}
}
}
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub tc_receiver: Receiver<Vec<u8>>,
pus_receiver: PusReceiver<MpscTmAsVecSender>,
}
impl TcSourceTaskDynamic {
pub fn new(
tc_receiver: Receiver<Vec<u8>>,
pus_receiver: PusReceiver<MpscTmAsVecSender>,
) -> Self {
Self {
tc_receiver,
pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> bool {
match self.tc_receiver.try_recv() {
Ok(tc) => match PusTcReader::new(&tc) {
Ok((pus_tc, _)) => {
self.pus_receiver
.handle_tc_packet(
satrs::pus::TcInMemory::Vec(tc.clone()),
pus_tc.service(),
&pus_tc,
)
.ok();
true
}
Err(e) => {
warn!("error creating PUS TC from raw data: {e}");
warn!("raw data: {:x?}", tc);
true
}
},
Err(e) => match e {
TryRecvError::Empty => false,
TryRecvError::Disconnected => {
warn!("tmtc thread: sender disconnected");
false
}
},
}
}
}

View File

@@ -0,0 +1,2 @@
pub mod tc_source;
pub mod tm_sink;

View File

@@ -0,0 +1,107 @@
use satrs::{
pool::PoolProvider,
pus::HandlingStatus,
tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool, SharedPacketPool},
};
use std::sync::mpsc::{self, TryRecvError};
use satrs::pus::MpscTmAsVecSender;
use crate::pus::PusTcDistributor;
// TC source components where static pools are the backing memory of the received telecommands.
pub struct TcSourceTaskStatic {
shared_tc_pool: SharedPacketPool,
tc_receiver: mpsc::Receiver<PacketInPool>,
tc_buf: [u8; 4096],
pus_distributor: PusTcDistributor<PacketSenderWithSharedPool>,
}
impl TcSourceTaskStatic {
pub fn new(
shared_tc_pool: SharedPacketPool,
tc_receiver: mpsc::Receiver<PacketInPool>,
pus_receiver: PusTcDistributor<PacketSenderWithSharedPool>,
) -> Self {
Self {
shared_tc_pool,
tc_receiver,
tc_buf: [0; 4096],
pus_distributor: pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> HandlingStatus {
// Right now, we only expect ECSS PUS packets.
// If packets like CFDP are expected, we might have to check the APID first.
match self.tc_receiver.try_recv() {
Ok(packet_in_pool) => {
let pool = self
.shared_tc_pool
.0
.read()
.expect("locking tc pool failed");
pool.read(&packet_in_pool.store_addr, &mut self.tc_buf)
.expect("reading pool failed");
drop(pool);
self.pus_distributor
.handle_tc_packet_in_store(packet_in_pool, &self.tc_buf)
.ok();
HandlingStatus::HandledOne
}
Err(e) => match e {
TryRecvError::Empty => HandlingStatus::Empty,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
HandlingStatus::Empty
}
},
}
}
}
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_distributor: PusTcDistributor<MpscTmAsVecSender>,
}
impl TcSourceTaskDynamic {
pub fn new(
tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_receiver: PusTcDistributor<MpscTmAsVecSender>,
) -> Self {
Self {
tc_receiver,
pus_distributor: pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> HandlingStatus {
// Right now, we only expect ECSS PUS packets.
// If packets like CFDP are expected, we might have to check the APID first.
match self.tc_receiver.try_recv() {
Ok(packet_as_vec) => {
self.pus_distributor
.handle_tc_packet_vec(packet_as_vec)
.ok();
HandlingStatus::HandledOne
}
Err(e) => match e {
TryRecvError::Empty => HandlingStatus::Empty,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
HandlingStatus::Empty
}
},
}
}
}

View File

@@ -4,7 +4,7 @@ use std::{
}; };
use log::info; use log::info;
use satrs::pus::{PusTmAsVec, PusTmInPool}; use satrs::tmtc::{PacketAsVec, PacketInPool, SharedPacketPool};
use satrs::{ use satrs::{
pool::PoolProvider, pool::PoolProvider,
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}, seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
@@ -13,10 +13,9 @@ use satrs::{
time::cds::MIN_CDS_FIELD_LEN, time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket, CcsdsPacket,
}, },
tmtc::tm_helper::SharedTmPool,
}; };
use crate::tcp::SyncTcpTmSource; use crate::interface::tcp::SyncTcpTmSource;
#[derive(Default)] #[derive(Default)]
pub struct CcsdsSeqCounterMap { pub struct CcsdsSeqCounterMap {
@@ -71,23 +70,28 @@ impl TmFunnelCommon {
} }
fn packet_printout(tm: &PusTmZeroCopyWriter) { fn packet_printout(tm: &PusTmZeroCopyWriter) {
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice()); info!(
"Sending PUS TM[{},{}] with APID {}",
tm.service(),
tm.subservice(),
tm.apid()
);
} }
} }
pub struct TmFunnelStatic { pub struct TmSinkStatic {
common: TmFunnelCommon, common: TmFunnelCommon,
shared_tm_store: SharedTmPool, shared_tm_store: SharedPacketPool,
tm_funnel_rx: mpsc::Receiver<PusTmInPool>, tm_funnel_rx: mpsc::Receiver<PacketInPool>,
tm_server_tx: mpsc::SyncSender<PusTmInPool>, tm_server_tx: mpsc::SyncSender<PacketInPool>,
} }
impl TmFunnelStatic { impl TmSinkStatic {
pub fn new( pub fn new(
shared_tm_store: SharedTmPool, shared_tm_store: SharedPacketPool,
sync_tm_tcp_source: SyncTcpTmSource, sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PusTmInPool>, tm_funnel_rx: mpsc::Receiver<PacketInPool>,
tm_server_tx: mpsc::SyncSender<PusTmInPool>, tm_server_tx: mpsc::SyncSender<PacketInPool>,
) -> Self { ) -> Self {
Self { Self {
common: TmFunnelCommon::new(sync_tm_tcp_source), common: TmFunnelCommon::new(sync_tm_tcp_source),
@@ -101,7 +105,7 @@ impl TmFunnelStatic {
if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() { if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update // Read the TM, set sequence counter and message counter, and finally update
// the CRC. // the CRC.
let shared_pool = self.shared_tm_store.clone_backing_pool(); let shared_pool = self.shared_tm_store.0.clone();
let mut pool_guard = shared_pool.write().expect("Locking TM pool failed"); let mut pool_guard = shared_pool.write().expect("Locking TM pool failed");
let mut tm_copy = Vec::new(); let mut tm_copy = Vec::new();
pool_guard pool_guard
@@ -122,17 +126,17 @@ impl TmFunnelStatic {
} }
} }
pub struct TmFunnelDynamic { pub struct TmSinkDynamic {
common: TmFunnelCommon, common: TmFunnelCommon,
tm_funnel_rx: mpsc::Receiver<PusTmAsVec>, tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_server_tx: mpsc::Sender<PusTmAsVec>, tm_server_tx: mpsc::Sender<PacketAsVec>,
} }
impl TmFunnelDynamic { impl TmSinkDynamic {
pub fn new( pub fn new(
sync_tm_tcp_source: SyncTcpTmSource, sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PusTmAsVec>, tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_server_tx: mpsc::Sender<PusTmAsVec>, tm_server_tx: mpsc::Sender<PacketAsVec>,
) -> Self { ) -> Self {
Self { Self {
common: TmFunnelCommon::new(sync_tm_tcp_source), common: TmFunnelCommon::new(sync_tm_tcp_source),

View File

@@ -8,6 +8,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased] # [unreleased]
# [v0.1.2] 2024-04-17
Allow `satrs-shared` from `v0.1.3` to `<v0.2`.
# [v0.1.1] 2024-02-17 # [v0.1.1] 2024-02-17
- Bumped `spacepackets` to v0.10.0 - Bumped `spacepackets` to v0.10.0

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "satrs-mib" name = "satrs-mib"
version = "0.1.1" version = "0.1.2"
edition = "2021" edition = "2021"
rust-version = "1.61" rust-version = "1.61"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"] authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
@@ -23,13 +23,12 @@ version = "1"
optional = true optional = true
[dependencies.satrs-shared] [dependencies.satrs-shared]
path = "../satrs-shared" version = ">=0.1.3, <0.2"
version = "0.1.3"
features = ["serde"] features = ["serde"]
[dependencies.satrs-mib-codegen] [dependencies.satrs-mib-codegen]
path = "codegen" path = "codegen"
version = "0.1.1" version = "0.1.2"
[dependencies.serde] [dependencies.serde]
version = "1" version = "1"

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "satrs-mib-codegen" name = "satrs-mib-codegen"
version = "0.1.1" version = "0.1.2"
edition = "2021" edition = "2021"
description = "satrs-mib proc macro implementation" description = "satrs-mib proc macro implementation"
homepage = "https://egit.irs.uni-stuttgart.de/rust/sat-rs" homepage = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
@@ -28,8 +28,7 @@ features = ["full"]
trybuild = { version = "1", features = ["diff"] } trybuild = { version = "1", features = ["diff"] }
[dev-dependencies.satrs-shared] [dev-dependencies.satrs-shared]
version = "0.1.3" version = ">=0.1.3, <0.2"
path = "../../satrs-shared"
[dev-dependencies.satrs-mib] [dev-dependencies.satrs-mib]
path = ".." path = ".."

View File

@@ -10,9 +10,14 @@ serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"
log = "0.4" log = "0.4"
thiserror = "1" thiserror = "1"
fern = "0.5"
humantime = "2"
[dependencies.asynchronix] [dependencies.asynchronix]
version = "0.2.1" version = "0.2.1"
git = "https://github.com/asynchronics/asynchronix.git"
branch = "main"
features = ["serde"]
[dependencies.satrs] [dependencies.satrs]
path = "../satrs" path = "../satrs"

View File

@@ -189,11 +189,11 @@ pub mod tests {
#[test] #[test]
fn test_basic_mgm_request() { fn test_basic_mgm_request() {
let mut sim_testbench = SimTestbench::new(); let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(MgmRequest::RequestSensorData); let request = SimRequest::new_with_epoch_time(MgmRequest::RequestSensorData);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply(); let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some()); assert!(sim_reply.is_some());
@@ -212,11 +212,11 @@ pub mod tests {
let mut sim_testbench = SimTestbench::new(); let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgm); switch_device_on(&mut sim_testbench, PcduSwitch::Mgm);
let mut request = SimRequest::new(MgmRequest::RequestSensorData); let mut request = SimRequest::new_with_epoch_time(MgmRequest::RequestSensorData);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
let mut sim_reply_res = sim_testbench.try_receive_next_reply(); let mut sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some()); assert!(sim_reply_res.is_some());
@@ -226,11 +226,11 @@ pub mod tests {
.expect("failed to deserialize MGM sensor values"); .expect("failed to deserialize MGM sensor values");
sim_testbench.step_by(Duration::from_millis(50)); sim_testbench.step_by(Duration::from_millis(50));
request = SimRequest::new(MgmRequest::RequestSensorData); request = SimRequest::new_with_epoch_time(MgmRequest::RequestSensorData);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
sim_reply_res = sim_testbench.try_receive_next_reply(); sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some()); assert!(sim_reply_res.is_some());
@@ -245,11 +245,11 @@ pub mod tests {
#[test] #[test]
fn test_basic_mgt_request_is_off() { fn test_basic_mgt_request_is_off() {
let mut sim_testbench = SimTestbench::new(); let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(MgtRequest::RequestHk); let request = SimRequest::new_with_epoch_time(MgtRequest::RequestHk);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply(); let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_none()); assert!(sim_reply_res.is_none());
@@ -259,12 +259,12 @@ pub mod tests {
fn test_basic_mgt_request_is_on() { fn test_basic_mgt_request_is_on() {
let mut sim_testbench = SimTestbench::new(); let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgt); switch_device_on(&mut sim_testbench, PcduSwitch::Mgt);
let request = SimRequest::new(MgtRequest::RequestHk); let request = SimRequest::new_with_epoch_time(MgtRequest::RequestHk);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply(); let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some()); assert!(sim_reply_res.is_some());
@@ -281,11 +281,11 @@ pub mod tests {
} }
fn check_mgt_hk(sim_testbench: &mut SimTestbench, expected_hk_set: MgtHkSet) { fn check_mgt_hk(sim_testbench: &mut SimTestbench, expected_hk_set: MgtHkSet) {
let request = SimRequest::new(MgtRequest::RequestHk); let request = SimRequest::new_with_epoch_time(MgtRequest::RequestHk);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply(); let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some()); assert!(sim_reply_res.is_some());
@@ -309,14 +309,14 @@ pub mod tests {
y: 200, y: 200,
z: 1000, z: 1000,
}; };
let request = SimRequest::new(MgtRequest::ApplyTorque { let request = SimRequest::new_with_epoch_time(MgtRequest::ApplyTorque {
duration: Duration::from_millis(100), duration: Duration::from_millis(100),
dipole: commanded_dipole, dipole: commanded_dipole,
}); });
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step_by(Duration::from_millis(5)); sim_testbench.step_by(Duration::from_millis(5));
check_mgt_hk( check_mgt_hk(

View File

@@ -49,25 +49,27 @@ impl SimController {
} }
pub fn run(&mut self, start_time: MonotonicTime, udp_polling_interval_ms: u64) { pub fn run(&mut self, start_time: MonotonicTime, udp_polling_interval_ms: u64) {
let mut t = start_time + Duration::from_millis(udp_polling_interval_ms); let mut t = start_time;
self.sys_clock.synchronize(t);
loop { loop {
let t_old = t;
// Check for UDP requests every millisecond. Shift the simulator ahead here to prevent // Check for UDP requests every millisecond. Shift the simulator ahead here to prevent
// replies lying in the past. // replies lying in the past.
t += Duration::from_millis(udp_polling_interval_ms); t += Duration::from_millis(udp_polling_interval_ms);
self.sys_clock.synchronize(t);
self.handle_sim_requests(t_old);
self.simulation self.simulation
.step_until(t) .step_until(t)
.expect("simulation step failed"); .expect("simulation step failed");
self.handle_sim_requests();
self.sys_clock.synchronize(t);
} }
} }
pub fn handle_sim_requests(&mut self) { pub fn handle_sim_requests(&mut self, old_timestamp: MonotonicTime) {
loop { loop {
match self.request_receiver.try_recv() { match self.request_receiver.try_recv() {
Ok(request) => { Ok(request) => {
if request.timestamp < old_timestamp {
log::warn!("stale data with timestamp {:?} received", request.timestamp);
}
if let Err(e) = match request.target() { if let Err(e) = match request.target() {
SimTarget::SimCtrl => self.handle_ctrl_request(&request), SimTarget::SimCtrl => self.handle_ctrl_request(&request),
SimTarget::Mgm => self.handle_mgm_request(&request), SimTarget::Mgm => self.handle_mgm_request(&request),
@@ -172,11 +174,11 @@ mod tests {
#[test] #[test]
fn test_basic_ping() { fn test_basic_ping() {
let mut sim_testbench = SimTestbench::new(); let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(SimCtrlRequest::Ping); let request = SimRequest::new_with_epoch_time(SimCtrlRequest::Ping);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending sim ctrl request failed"); .expect("sending sim ctrl request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply(); let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some()); assert!(sim_reply.is_some());

View File

@@ -86,14 +86,14 @@ pub(crate) mod tests {
switch: PcduSwitch, switch: PcduSwitch,
target: SwitchStateBinary, target: SwitchStateBinary,
) { ) {
let request = SimRequest::new(PcduRequest::SwitchDevice { let request = SimRequest::new_with_epoch_time(PcduRequest::SwitchDevice {
switch, switch,
state: target, state: target,
}); });
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM switch request failed"); .expect("sending MGM switch request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
} }
@@ -113,11 +113,11 @@ pub(crate) mod tests {
} }
fn check_switch_state(sim_testbench: &mut SimTestbench, expected_switch_map: &SwitchMap) { fn check_switch_state(sim_testbench: &mut SimTestbench, expected_switch_map: &SwitchMap) {
let request = SimRequest::new(PcduRequest::RequestSwitchInfo); let request = SimRequest::new_with_epoch_time(PcduRequest::RequestSwitchInfo);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step(); sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply(); let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some()); assert!(sim_reply.is_some());
@@ -143,11 +143,11 @@ pub(crate) mod tests {
#[test] #[test]
fn test_pcdu_switcher_request() { fn test_pcdu_switcher_request() {
let mut sim_testbench = SimTestbench::new(); let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(PcduRequest::RequestSwitchInfo); let request = SimRequest::new_with_epoch_time(PcduRequest::RequestSwitchInfo);
sim_testbench sim_testbench
.send_request(request) .send_request(request)
.expect("sending MGM request failed"); .expect("sending MGM request failed");
sim_testbench.handle_sim_requests(); sim_testbench.handle_sim_requests_time_agnostic();
sim_testbench.step_by(Duration::from_millis(1)); sim_testbench.step_by(Duration::from_millis(1));
let sim_reply = sim_testbench.try_receive_next_reply(); let sim_reply = sim_testbench.try_receive_next_reply();

View File

@@ -1,5 +1,8 @@
use asynchronix::time::MonotonicTime;
use serde::{de::DeserializeOwned, Deserialize, Serialize}; use serde::{de::DeserializeOwned, Deserialize, Serialize};
pub const SIM_CTRL_UDP_PORT: u16 = 7303;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimTarget { pub enum SimTarget {
SimCtrl, SimCtrl,
@@ -19,6 +22,7 @@ pub struct SimMessage {
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SimRequest { pub struct SimRequest {
inner: SimMessage, inner: SimMessage,
pub timestamp: MonotonicTime,
} }
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
@@ -53,12 +57,22 @@ pub trait SimMessageProvider: Serialize + DeserializeOwned + Clone + Sized {
} }
impl SimRequest { impl SimRequest {
pub fn new<T: SerializableSimMsgPayload<SimRequest>>(serializable_request: T) -> Self { pub fn new_with_epoch_time<T: SerializableSimMsgPayload<SimRequest>>(
serializable_request: T,
) -> Self {
Self::new(serializable_request, MonotonicTime::EPOCH)
}
pub fn new<T: SerializableSimMsgPayload<SimRequest>>(
serializable_request: T,
timestamp: MonotonicTime,
) -> Self {
Self { Self {
inner: SimMessage { inner: SimMessage {
target: T::TARGET, target: T::TARGET,
payload: serde_json::to_string(&serializable_request).unwrap(), payload: serde_json::to_string(&serializable_request).unwrap(),
}, },
timestamp,
} }
} }
} }
@@ -363,7 +377,7 @@ pub mod tests {
#[test] #[test]
fn test_basic_request() { fn test_basic_request() {
let sim_request = SimRequest::new(DummyRequest::Ping); let sim_request = SimRequest::new_with_epoch_time(DummyRequest::Ping);
assert_eq!(sim_request.target(), SimTarget::SimCtrl); assert_eq!(sim_request.target(), SimTarget::SimCtrl);
assert_eq!(sim_request.msg_type(), SimMessageType::Request); assert_eq!(sim_request.msg_type(), SimMessageType::Request);
let dummy_request = let dummy_request =

View File

@@ -3,7 +3,7 @@ use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{MonotonicTime, SystemClock}; use asynchronix::time::{MonotonicTime, SystemClock};
use controller::SimController; use controller::SimController;
use eps::PcduModel; use eps::PcduModel;
use satrs_minisim::{SimReply, SimRequest}; use satrs_minisim::{SimReply, SimRequest, SIM_CTRL_UDP_PORT};
use std::sync::mpsc; use std::sync::mpsc;
use std::thread; use std::thread;
use std::time::{Duration, SystemTime}; use std::time::{Duration, SystemTime};
@@ -83,14 +83,38 @@ fn main() {
let t0 = MonotonicTime::EPOCH; let t0 = MonotonicTime::EPOCH;
let mut sim_ctrl = let mut sim_ctrl =
create_sim_controller(ThreadingModel::Default, t0, reply_sender, request_receiver); create_sim_controller(ThreadingModel::Default, t0, reply_sender, request_receiver);
// Configure logger at runtime
fern::Dispatch::new()
// Perform allocation-free log formatting
.format(|out, message, record| {
out.finish(format_args!(
"[{} {} {}] {}",
humantime::format_rfc3339(std::time::SystemTime::now()),
record.level(),
record.target(),
message
))
})
// Add blanket level filter -
.level(log::LevelFilter::Debug)
// - and per-module overrides
// Output to stdout, files, and other Dispatch configurations
.chain(std::io::stdout())
.chain(fern::log_file("output.log").expect("could not open log output file"))
// Apply globally
.apply()
.expect("could not apply logger configuration");
log::info!("starting simulation thread");
// This thread schedules the simulator. // This thread schedules the simulator.
let sim_thread = thread::spawn(move || { let sim_thread = thread::spawn(move || {
sim_ctrl.run(t0, 1); sim_ctrl.run(t0, 1);
}); });
let mut udp_server = SimUdpServer::new(0, request_sender, reply_receiver, 200, None) let mut udp_server =
.expect("could not create UDP request server"); SimUdpServer::new(SIM_CTRL_UDP_PORT, request_sender, reply_receiver, 200, None)
.expect("could not create UDP request server");
log::info!("starting UDP server on port {}", SIM_CTRL_UDP_PORT);
// This thread manages the simulator UDP server. // This thread manages the simulator UDP server.
let udp_tc_thread = thread::spawn(move || { let udp_tc_thread = thread::spawn(move || {
udp_server.run(); udp_server.run();

View File

@@ -26,10 +26,13 @@ impl SimTestbench {
request_sender, request_sender,
} }
} }
pub fn handle_sim_requests_time_agnostic(&mut self) {
self.handle_sim_requests(MonotonicTime::EPOCH);
}
delegate! { delegate! {
to self.sim_controller { to self.sim_controller {
pub fn handle_sim_requests(&mut self); pub fn handle_sim_requests(&mut self, old_timestamp: MonotonicTime);
} }
to self.sim_controller.simulation { to self.sim_controller.simulation {
pub fn step(&mut self); pub fn step(&mut self);

View File

@@ -270,7 +270,7 @@ mod tests {
UdpTestbench::new(true, Some(SERVER_WAIT_TIME_MS), 10) UdpTestbench::new(true, Some(SERVER_WAIT_TIME_MS), 10)
.expect("could not create testbench"); .expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run()); let server_thread = std::thread::spawn(move || udp_server.run());
let sim_request = SimRequest::new(PcduRequest::RequestSwitchInfo); let sim_request = SimRequest::new_with_epoch_time(PcduRequest::RequestSwitchInfo);
udp_testbench udp_testbench
.send_request(&sim_request) .send_request(&sim_request)
.expect("sending request failed"); .expect("sending request failed");
@@ -292,7 +292,7 @@ mod tests {
.expect("could not create testbench"); .expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run()); let server_thread = std::thread::spawn(move || udp_server.run());
udp_testbench udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping)) .send_request(&SimRequest::new_with_epoch_time(SimCtrlRequest::Ping))
.expect("sending request failed"); .expect("sending request failed");
let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map())); let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map()));
@@ -316,7 +316,7 @@ mod tests {
// Send a ping so that the server knows the address of the client. // Send a ping so that the server knows the address of the client.
// Do not check that the request arrives on the receiver side, is done by other test. // Do not check that the request arrives on the receiver side, is done by other test.
udp_testbench udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping)) .send_request(&SimRequest::new_with_epoch_time(SimCtrlRequest::Ping))
.expect("sending request failed"); .expect("sending request failed");
// Send a reply to the server, ensure it gets forwarded to the client. // Send a reply to the server, ensure it gets forwarded to the client.
@@ -347,7 +347,7 @@ mod tests {
// Connect by sending a ping. // Connect by sending a ping.
udp_testbench udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping)) .send_request(&SimRequest::new_with_epoch_time(SimCtrlRequest::Ping))
.expect("sending request failed"); .expect("sending request failed");
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS)); std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
@@ -376,7 +376,7 @@ mod tests {
// Connect by sending a ping. // Connect by sending a ping.
udp_testbench udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping)) .send_request(&SimRequest::new_with_epoch_time(SimCtrlRequest::Ping))
.expect("sending request failed"); .expect("sending request failed");
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS)); std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));

View File

@@ -8,6 +8,19 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased] # [unreleased]
# [v0.1.4] 2024-04-24
## Added
- `ResultU16::from_be_bytes`
- `From<u16>` impl for `ResultU16`.
- Optional `defmt` support: `defmt::Format` impl on `ResultU16` if the `defmt` feature is
activated.
# [v0.1.3] 2024-04-16
Allow `spacepackets` range starting with v0.10 and v0.11.
# [v0.1.2] 2024-02-17 # [v0.1.2] 2024-02-17
- Bumped `spacepackets` to v0.10.0 for `UnsignedEnum` trait change. - Bumped `spacepackets` to v0.10.0 for `UnsignedEnum` trait change.

View File

@@ -1,7 +1,7 @@
[package] [package]
name = "satrs-shared" name = "satrs-shared"
description = "Components shared by multiple sat-rs crates" description = "Components shared by multiple sat-rs crates"
version = "0.1.3" version = "0.1.4"
edition = "2021" edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"] authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/" homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
@@ -17,14 +17,17 @@ version = "1"
default-features = false default-features = false
optional = true optional = true
[dependencies.defmt]
version = "0.3"
optional = true
[dependencies.spacepackets] [dependencies.spacepackets]
git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" version = ">0.9, <=0.11"
version = "0.11.0-rc.0"
branch = "main"
default-features = false default-features = false
[features] [features]
serde = ["dep:serde", "spacepackets/serde"] serde = ["dep:serde", "spacepackets/serde"]
spacepackets = ["dep:defmt", "spacepackets/defmt"]
[package.metadata.docs.rs] [package.metadata.docs.rs]
rustdoc-args = ["--cfg", "doc_cfg", "--generate-link-to-definition"] rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]

View File

@@ -1,3 +1,4 @@
//! This crates contains modules shared among other sat-rs framework crates. //! This crates contains modules shared among other sat-rs framework crates.
#![no_std] #![no_std]
#![cfg_attr(docs_rs, feature(doc_auto_cfg))]
pub mod res_code; pub mod res_code;

View File

@@ -7,6 +7,7 @@ use spacepackets::ByteConversionError;
/// Simple [u16] based result code type which also allows to group related resultcodes. /// Simple [u16] based result code type which also allows to group related resultcodes.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct ResultU16 { pub struct ResultU16 {
group_id: u8, group_id: u8,
unique_id: u8, unique_id: u8,
@@ -19,15 +20,28 @@ impl ResultU16 {
unique_id, unique_id,
} }
} }
pub fn raw(&self) -> u16 { pub fn raw(&self) -> u16 {
((self.group_id as u16) << 8) | self.unique_id as u16 ((self.group_id as u16) << 8) | self.unique_id as u16
} }
pub fn group_id(&self) -> u8 { pub fn group_id(&self) -> u8 {
self.group_id self.group_id
} }
pub fn unique_id(&self) -> u8 { pub fn unique_id(&self) -> u8 {
self.unique_id self.unique_id
} }
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
}
impl From<u16> for ResultU16 {
fn from(value: u16) -> Self {
Self::new(((value >> 8) & 0xff) as u8, (value & 0xff) as u8)
}
} }
impl From<ResultU16> for EcssEnumU16 { impl From<ResultU16> for EcssEnumU16 {
@@ -84,5 +98,14 @@ mod tests {
assert_eq!(written, 2); assert_eq!(written, 2);
assert_eq!(buf[0], 1); assert_eq!(buf[0], 1);
assert_eq!(buf[1], 1); assert_eq!(buf[1], 1);
let read_back = ResultU16::from_be_bytes(buf);
assert_eq!(read_back, result_code);
}
#[test]
fn test_from_u16() {
let result_code = ResultU16::new(1, 1);
let result_code_2 = ResultU16::from(result_code.raw());
assert_eq!(result_code, result_code_2);
} }
} }

View File

@@ -8,8 +8,104 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased] # [unreleased]
# [v0.2.1] 2024-05-19
## Changed ## Changed
- The HAL TCP server `ServerConfig::new` method now sets the `reuse_port` and `reuse_addr`
fields to `true`.
## Fixed
- Possibly subtly broken v0.2.0 build artifact.
# [v0.2.0] 2024-05-02
## Changed
- Various improvements for the PUS stack components.
## Added
- Added `HandlingStatus` enumeration.
# [v0.2.0-rc.5] 2024-04-24
## Added
- Optional `defmt::Format` support for the event types, if the `defmt` feature is activated.
## Changed
- Removed `MpscEventReceiver`, the `EventReceiveProvider` trait is implemented directly
on `mpsc::Receiver<EventMessage<Event>>`
- Renamed `PusEventDispatcher` to `PusEventTmCreatorWithMap`.
- Renamed `DefaultPusEventU32Dispatcher` to `DefaultPusEventU32EventCreator`.
- Renamed `PusEventMgmtBackendProvider` renamed to `PusEventReportingMap`.
- Reanmed Event `const_new` methods to `new` and the former `new` methods to `new_checked`
# [v0.2.0-rc.4] 2024-04-23
## Changed
- The `parse_for_ccsds_space_packets` method now expects a non-mutable slice and does not copy
broken tail packets anymore. It also does not expect a mutable `next_write_idx` argument anymore.
Instead, a `ParseResult` structure is returned which contains the `packets_found` and an
optional `incomplete_tail_start` value.
## Fixed
- `parse_for_ccsds_space_packets` did not detect CCSDS space packets at the buffer end with the
smallest possible size of 7 bytes.
- TCP server component now re-registers the internal `mio::Poll` object if the client reset
the connection unexpectedly. Not doing so prevented the server from functioning properly
after a re-connect.
# [v0.2.0-rc.3] 2024-04-17
docs-rs hotfix 2
# [v0.2.0-rc.2] 2024-04-17
docs-rs hotfix
# [v0.2.0-rc.1] 2024-04-17
- `spacepackets` v0.11
## Added
- Added `params::WritableToBeBytes::to_vec`.
- New `ComponentId` (`u64` typedef for now) which replaces former `TargetId` as a generic
way to identify components.
- Various abstraction and objects for targeted requests. This includes mode request/reply
types for actions, HK and modes.
- `VerificationReportingProvider::owner_id` method.
- Introduced generic `EventMessage` which is generic over the event type and the additional
parameter type. This message also contains the sender ID which can be useful for debugging
or application layer / FDIR logic.
- Stop signal handling for the TCP servers.
- TCP server now uses `mio` crate to allow non-blocking operation. The server can now handle
multiple connections at once, and the context information about handled transfers is
passed via a callback which is inserted as a generic as well.
## Changed
- Renamed `ReceivesTcCore` to `PacketSenderRaw` to better show its primary purpose. It now contains
a `send_raw_tc` method which is not mutable anymore.
- Renamed `TmPacketSourceCore` to `TmPacketSource`.
- Renamed `EcssTmSenderCore` to `EcssTmSender`.
- Renamed `StoreAddr` to `PoolAddr`.
- Reanmed `StoreError` to `PoolError`.
- TCP server generics order. The error generics come last now.
- `encoding::ccsds::PacketIdValidator` renamed to `ValidatorU16Id`, which lives in the crate root.
It can be used for both CCSDS packet ID and CCSDS APID validation.
- `EventManager::try_event_handling` not expects a mutable error handling closure instead of
returning the occured errors.
- Renamed `EventManagerBase` to `EventReportCreator`
- Renamed `VerificationReporterCore` to `VerificationReportCreator`.
- Removed `VerificationReporterCore`. The high-level API exposed by `VerificationReporter` and
the low level API exposed by `VerificationReportCreator` should be sufficient for all use-cases.
- Refactored `EventManager` to heavily use generics instead of trait objects. - Refactored `EventManager` to heavily use generics instead of trait objects.
- `SendEventProvider` -> `EventSendProvider`. `id` trait method renamed to `channel_id`. - `SendEventProvider` -> `EventSendProvider`. `id` trait method renamed to `channel_id`.
- `ListenerTable` -> `ListenerMapProvider` - `ListenerTable` -> `ListenerMapProvider`
@@ -28,11 +124,30 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
- Replace `TargetId` by `ComponentId`. - Replace `TargetId` by `ComponentId`.
- Replace most usages of `ChannelId` by `ComponentId`. A dedicated channel ID has limited usage - Replace most usages of `ChannelId` by `ComponentId`. A dedicated channel ID has limited usage
due to the nature of typed channels in Rust. due to the nature of typed channels in Rust.
- `CheckTimer` renamed to `CountdownProvider`.
- Renamed `TargetId` to `ComponentId`.
- Replaced most `ChannelId` occurences with `ComponentId`. For typed channels, there is generally
no need for dedicated channel IDs.
- Changed `params::WritableToBeBytes::raw_len` to `written_len` for consistency.
- `EventReporter` caches component ID.
- Renamed `PusService11SchedHandler` to `PusSchedServiceHandler`.
- Fixed general naming of PUS handlers from `handle_one_tc` to `poll_and_handle_next_tc`.
- Reworked verification module: The sender (`impl EcssTmSenderCore`)
now needs to be passed explicitely to the `VerificationReportingProvider` abstraction. This
allows easier sharing of the TM sender component.
## Fixed ## Fixed
- Update deprecated API for `PusScheduler::insert_wrapped_tc_cds_short` - Update deprecated API for `PusScheduler::insert_wrapped_tc_cds_short`
and `PusScheduler::insert_wrapped_tc_cds_long`. and `PusScheduler::insert_wrapped_tc_cds_long`.
- `EventReporter` uses interior mutability pattern to allow non-mutable API.
## Removed
- Remove `objects` module.
- Removed CCSDS and PUS distributor modules. Their worth is questionable in an architecture
where routing traits are sufficient and the core logic to demultiplex and distribute packets
is simple enough to be application code.
# [v0.2.0-rc.0] 2024-02-21 # [v0.2.0-rc.0] 2024-02-21

View File

@@ -1,8 +1,8 @@
[package] [package]
name = "satrs" name = "satrs"
version = "0.2.0-rc.0" version = "0.2.1"
edition = "2021" edition = "2021"
rust-version = "1.61" rust-version = "1.71.1"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"] authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "A framework to build software for remote systems" description = "A framework to build software for remote systems"
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/" homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
@@ -15,17 +15,31 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
[dependencies] [dependencies]
delegate = ">0.7, <=0.10" delegate = ">0.7, <=0.10"
paste = "1" paste = "1"
derive-new = "0.6"
smallvec = "1" smallvec = "1"
crc = "3" crc = "3"
[dependencies.satrs-shared] [dependencies.satrs-shared]
version = "0.1.3" version = ">=0.1.3, <0.2"
path = "../satrs-shared"
[dependencies.num_enum] [dependencies.num_enum]
version = ">0.5, <=0.7" version = ">0.5, <=0.7"
default-features = false default-features = false
[dependencies.spacepackets]
version = "0.11"
default-features = false
[dependencies.cobs]
git = "https://github.com/robamu/cobs.rs.git"
version = "0.2.3"
branch = "all_features"
default-features = false
[dependencies.num-traits]
version = "0.2"
default-features = false
[dependencies.dyn-clone] [dependencies.dyn-clone]
version = "1" version = "1"
optional = true optional = true
@@ -38,10 +52,6 @@ optional = true
version = "0.7" version = "0.7"
optional = true optional = true
[dependencies.num-traits]
version = "0.2"
default-features = false
[dependencies.downcast-rs] [dependencies.downcast-rs]
version = "1.2" version = "1.2"
default-features = false default-features = false
@@ -70,22 +80,19 @@ version = "0.5.4"
features = ["all"] features = ["all"]
optional = true optional = true
[dependencies.spacepackets] [dependencies.mio]
git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git" version = "0.8"
version = "0.11.0-rc.0" features = ["os-poll", "net"]
branch = "main" optional = true
default-features = false
[dependencies.cobs] [dependencies.defmt]
git = "https://github.com/robamu/cobs.rs.git" version = "0.3"
version = "0.2.3" optional = true
branch = "all_features"
default-features = false
[dev-dependencies] [dev-dependencies]
serde = "1" serde = "1"
zerocopy = "0.7" zerocopy = "0.7"
once_cell = "1.13" once_cell = "1"
serde_json = "1" serde_json = "1"
rand = "0.8" rand = "0.8"
tempfile = "3" tempfile = "3"
@@ -105,7 +112,8 @@ std = [
"spacepackets/std", "spacepackets/std",
"num_enum/std", "num_enum/std",
"thiserror", "thiserror",
"socket2" "socket2",
"mio"
] ]
alloc = [ alloc = [
"serde/alloc", "serde/alloc",
@@ -117,9 +125,10 @@ alloc = [
serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"] serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"]
crossbeam = ["crossbeam-channel"] crossbeam = ["crossbeam-channel"]
heapless = ["dep:heapless"] heapless = ["dep:heapless"]
defmt = ["dep:defmt", "spacepackets/defmt"]
test_util = [] test_util = []
doc-images = [] doc-images = []
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
rustdoc-args = ["--cfg", "doc_cfg", "--generate-link-to-definition"] rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]

View File

@@ -4,11 +4,11 @@ Checklist for new releases
# Pre-Release # Pre-Release
1. Make sure any new modules are documented sufficiently enough and check docs with 1. Make sure any new modules are documented sufficiently enough and check docs with
`cargo +nightly doc --all-features --config 'rustdocflags=["--cfg", "doc_cfg"]' --open`. `cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]' --open`.
2. Bump version specifier in `Cargo.toml`. 2. Bump version specifier in `Cargo.toml`.
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new 3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
`unreleased` section. `unreleased` section.
4. Run `cargo test --all-features`. 4. Run `cargo test --all-features` or `cargo nextest r --all-features` and `cargo test --doc`.
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`. 5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal 6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
targets. targets.

View File

@@ -1,7 +1,6 @@
use crate::{params::Params, pool::StoreAddr}; use crate::{params::Params, pool::PoolAddr};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*; pub use alloc_mod::*;
pub type ActionId = u32; pub type ActionId = u32;
@@ -22,9 +21,8 @@ impl ActionRequest {
#[derive(Clone, Eq, PartialEq, Debug)] #[derive(Clone, Eq, PartialEq, Debug)]
pub enum ActionRequestVariant { pub enum ActionRequestVariant {
NoData, NoData,
StoreData(StoreAddr), StoreData(PoolAddr),
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
VecData(alloc::vec::Vec<u8>), VecData(alloc::vec::Vec<u8>),
} }
@@ -44,28 +42,21 @@ pub enum ActionReplyVariant {
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod alloc_mod { pub mod alloc_mod {
use super::*; use super::*;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[derive(Debug, Eq, PartialEq, Clone)] #[derive(Debug, Eq, PartialEq, Clone)]
pub struct ActionRequestStringId { pub struct ActionRequestStringId {
pub action_id: alloc::string::String, pub action_id: alloc::string::String,
pub variant: ActionRequestVariant, pub variant: ActionRequestVariant,
} }
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
impl ActionRequestStringId { impl ActionRequestStringId {
pub fn new(action_id: alloc::string::String, variant: ActionRequestVariant) -> Self { pub fn new(action_id: alloc::string::String, variant: ActionRequestVariant) -> Self {
Self { action_id, variant } Self { action_id, variant }
} }
} }
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct ActionReplyStringId { pub struct ActionReplyStringId {
pub action_id: alloc::string::String, pub action_id: alloc::string::String,

View File

@@ -1,163 +1,163 @@
#[cfg(feature = "alloc")] use spacepackets::{CcsdsPacket, SpHeader};
use alloc::vec::Vec;
#[cfg(feature = "alloc")]
use hashbrown::HashSet;
use spacepackets::PacketId;
use crate::tmtc::ReceivesTcCore; use crate::{tmtc::PacketSenderRaw, ComponentId};
pub trait PacketIdLookup { #[derive(Debug, Copy, Clone, PartialEq, Eq)]
fn validate(&self, packet_id: u16) -> bool; pub enum SpValidity {
Valid,
/// The space packet can be assumed to have a valid format, but the packet should
/// be skipped.
Skip,
/// The space packet or space packet header has an invalid format, for example a CRC check
/// failed. In that case, the parser loses the packet synchronization and needs to check for
/// the start of a new space packet header start again. The space packet header
/// [spacepackets::PacketId] can be used as a synchronization marker to detect the start
/// of a possible valid packet again.
Invalid,
} }
#[cfg(feature = "alloc")] /// Simple trait to allow user code to check the validity of a space packet.
impl PacketIdLookup for Vec<u16> { pub trait SpacePacketValidator {
fn validate(&self, packet_id: u16) -> bool { fn validate(&self, sp_header: &SpHeader, raw_buf: &[u8]) -> SpValidity;
self.contains(&packet_id)
}
} }
#[cfg(feature = "alloc")] #[derive(Default, Debug, PartialEq, Eq)]
impl PacketIdLookup for HashSet<u16> { pub struct ParseResult {
fn validate(&self, packet_id: u16) -> bool { pub packets_found: u32,
self.contains(&packet_id) /// If an incomplete space packet was found, its start index is indicated by this value.
} pub incomplete_tail_start: Option<usize>,
}
impl PacketIdLookup for [u16] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&packet_id).is_ok()
}
}
impl PacketIdLookup for &[u16] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&packet_id).is_ok()
}
}
#[cfg(feature = "alloc")]
impl PacketIdLookup for Vec<PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
#[cfg(feature = "alloc")]
impl PacketIdLookup for HashSet<PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
impl PacketIdLookup for [PacketId] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&PacketId::from(packet_id)).is_ok()
}
}
impl PacketIdLookup for &[PacketId] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&PacketId::from(packet_id)).is_ok()
}
} }
/// This function parses a given buffer for tightly packed CCSDS space packets. It uses the /// This function parses a given buffer for tightly packed CCSDS space packets. It uses the
/// [PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet and then /// [spacepackets::SpHeader] of the CCSDS packets and a user provided [SpacePacketValidator]
/// uses the length field of the packet to extract CCSDS packets. /// to check whether a received space packet is relevant for processing.
/// ///
/// This function is also able to deal with broken tail packets at the end as long a the parser /// This function is also able to deal with broken tail packets at the end as long a the parser
/// can read the full 7 bytes which constitue a space packet header plus one byte minimal size. /// can read the full 7 bytes which constitue a space packet header plus one byte minimal size.
/// If broken tail packets are detected, they are moved to the front of the buffer, and the write /// If broken tail packets are detected, they are moved to the front of the buffer, and the write
/// index for future write operations will be written to the `next_write_idx` argument. /// index for future write operations will be written to the `next_write_idx` argument.
/// ///
/// The parser will write all packets which were decoded successfully to the given `tc_receiver` /// The parses will behave differently based on the [SpValidity] returned from the user provided
/// and return the number of packets found. If the [ReceivesTcCore::pass_tc] calls fails, the /// [SpacePacketValidator]:
/// error will be returned. ///
pub fn parse_buffer_for_ccsds_space_packets<E>( /// 1. [SpValidity::Valid]: The parser will forward all packets to the given `packet_sender` and
buf: &mut [u8], /// return the number of packets found.If the [PacketSenderRaw::send_packet] calls fails, the
packet_id_lookup: &(impl PacketIdLookup + ?Sized), /// error will be returned.
tc_receiver: &mut (impl ReceivesTcCore<Error = E> + ?Sized), /// 2. [SpValidity::Invalid]: The parser assumes that the synchronization is lost and tries to
next_write_idx: &mut usize, /// find the start of a new space packet header by scanning all the following bytes.
) -> Result<u32, E> { /// 3. [SpValidity::Skip]: The parser skips the packet using the packet length determined from the
*next_write_idx = 0; /// space packet header.
let mut packets_found = 0; pub fn parse_buffer_for_ccsds_space_packets<SendError>(
buf: &[u8],
packet_validator: &(impl SpacePacketValidator + ?Sized),
sender_id: ComponentId,
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
) -> Result<ParseResult, SendError> {
let mut parse_result = ParseResult::default();
let mut current_idx = 0; let mut current_idx = 0;
let buf_len = buf.len(); let buf_len = buf.len();
loop { loop {
if current_idx + 7 >= buf.len() { if current_idx + 7 > buf.len() {
break; break;
} }
let packet_id = u16::from_be_bytes(buf[current_idx..current_idx + 2].try_into().unwrap()); let sp_header = SpHeader::from_be_bytes(&buf[current_idx..]).unwrap().0;
if packet_id_lookup.validate(packet_id) { match packet_validator.validate(&sp_header, &buf[current_idx..]) {
let length_field = SpValidity::Valid => {
u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap()); let packet_size = sp_header.total_len();
let packet_size = length_field + 7; if (current_idx + packet_size) <= buf_len {
if (current_idx + packet_size as usize) <= buf_len { packet_sender
tc_receiver.pass_tc(&buf[current_idx..current_idx + packet_size as usize])?; .send_packet(sender_id, &buf[current_idx..current_idx + packet_size])?;
packets_found += 1; parse_result.packets_found += 1;
} else { } else {
// Move packet to start of buffer if applicable. // Move packet to start of buffer if applicable.
if current_idx > 0 { parse_result.incomplete_tail_start = Some(current_idx);
buf.copy_within(current_idx.., 0);
*next_write_idx = buf.len() - current_idx;
} }
current_idx += packet_size;
continue;
}
SpValidity::Skip => {
current_idx += sp_header.total_len();
}
// We might have lost sync. Try to find the start of a new space packet header.
SpValidity::Invalid => {
current_idx += 1;
} }
current_idx += packet_size as usize;
continue;
} }
current_idx += 1;
} }
Ok(packets_found) Ok(parse_result)
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use spacepackets::{ use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket}, ecss::{tc::PusTcCreator, WritablePusPacket},
PacketId, SpHeader, CcsdsPacket, PacketId, PacketSequenceCtrl, PacketType, SequenceFlags, SpHeader,
}; };
use crate::encoding::tests::TcCacher; use crate::{encoding::tests::TcCacher, ComponentId};
use super::parse_buffer_for_ccsds_space_packets; use super::{parse_buffer_for_ccsds_space_packets, SpValidity, SpacePacketValidator};
const PARSER_ID: ComponentId = 0x05;
const TEST_APID_0: u16 = 0x02; const TEST_APID_0: u16 = 0x02;
const TEST_APID_1: u16 = 0x10; const TEST_APID_1: u16 = 0x10;
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0); const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1); const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
#[derive(Default)]
struct SimpleVerificator {
pub enable_second_id: bool,
}
impl SimpleVerificator {
pub fn new_with_second_id() -> Self {
Self {
enable_second_id: true,
}
}
}
impl SpacePacketValidator for SimpleVerificator {
fn validate(&self, sp_header: &SpHeader, _raw_buf: &[u8]) -> super::SpValidity {
if sp_header.packet_id() == TEST_PACKET_ID_0
|| (self.enable_second_id && sp_header.packet_id() == TEST_PACKET_ID_1)
{
return SpValidity::Valid;
}
SpValidity::Skip
}
}
#[test] #[test]
fn test_basic() { fn test_basic() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len = ping_tc let packet_len = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
.expect("writing packet failed"); .expect("writing packet failed");
let valid_packet_ids = [TEST_PACKET_ID_0]; let tc_cacher = TcCacher::default();
let mut tc_cacher = TcCacher::default();
let mut next_write_idx = 0;
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer, &buffer,
valid_packet_ids.as_slice(), &SimpleVerificator::default(),
&mut tc_cacher, PARSER_ID,
&mut next_write_idx, &tc_cacher,
); );
assert!(parse_result.is_ok()); assert!(parse_result.is_ok());
let parsed_packets = parse_result.unwrap(); let parse_result = parse_result.unwrap();
assert_eq!(parsed_packets, 1); assert_eq!(parse_result.packets_found, 1);
assert_eq!(tc_cacher.tc_queue.len(), 1); let mut queue = tc_cacher.tc_queue.borrow_mut();
assert_eq!( assert_eq!(queue.len(), 1);
tc_cacher.tc_queue.pop_front().unwrap(), let packet_with_sender = queue.pop_front().unwrap();
buffer[..packet_len] assert_eq!(packet_with_sender.packet, buffer[..packet_len]);
); assert_eq!(packet_with_sender.sender_id, PARSER_ID);
} }
#[test] #[test]
fn test_multi_packet() { fn test_multi_packet() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
@@ -165,35 +165,35 @@ mod tests {
let packet_len_action = action_tc let packet_len_action = action_tc
.write_to_bytes(&mut buffer[packet_len_ping..]) .write_to_bytes(&mut buffer[packet_len_ping..])
.expect("writing packet failed"); .expect("writing packet failed");
let valid_packet_ids = [TEST_PACKET_ID_0]; let tc_cacher = TcCacher::default();
let mut tc_cacher = TcCacher::default();
let mut next_write_idx = 0;
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer, &buffer,
valid_packet_ids.as_slice(), &SimpleVerificator::default(),
&mut tc_cacher, PARSER_ID,
&mut next_write_idx, &tc_cacher,
); );
assert!(parse_result.is_ok()); assert!(parse_result.is_ok());
let parsed_packets = parse_result.unwrap(); let parse_result = parse_result.unwrap();
assert_eq!(parsed_packets, 2); assert_eq!(parse_result.packets_found, 2);
assert_eq!(tc_cacher.tc_queue.len(), 2); let mut queue = tc_cacher.tc_queue.borrow_mut();
assert_eq!(queue.len(), 2);
let packet_with_addr = queue.pop_front().unwrap();
assert_eq!(packet_with_addr.packet, buffer[..packet_len_ping]);
assert_eq!(packet_with_addr.sender_id, PARSER_ID);
let packet_with_addr = queue.pop_front().unwrap();
assert_eq!(packet_with_addr.sender_id, PARSER_ID);
assert_eq!( assert_eq!(
tc_cacher.tc_queue.pop_front().unwrap(), packet_with_addr.packet,
buffer[..packet_len_ping]
);
assert_eq!(
tc_cacher.tc_queue.pop_front().unwrap(),
buffer[packet_len_ping..packet_len_ping + packet_len_action] buffer[packet_len_ping..packet_len_ping + packet_len_action]
); );
} }
#[test] #[test]
fn test_multi_apid() { fn test_multi_apid() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(TEST_APID_1);
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
@@ -201,35 +201,30 @@ mod tests {
let packet_len_action = action_tc let packet_len_action = action_tc
.write_to_bytes(&mut buffer[packet_len_ping..]) .write_to_bytes(&mut buffer[packet_len_ping..])
.expect("writing packet failed"); .expect("writing packet failed");
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1]; let tc_cacher = TcCacher::default();
let mut tc_cacher = TcCacher::default(); let verificator = SimpleVerificator::new_with_second_id();
let mut next_write_idx = 0; let parse_result =
let parse_result = parse_buffer_for_ccsds_space_packets( parse_buffer_for_ccsds_space_packets(&buffer, &verificator, PARSER_ID, &tc_cacher);
&mut buffer,
valid_packet_ids.as_slice(),
&mut tc_cacher,
&mut next_write_idx,
);
assert!(parse_result.is_ok()); assert!(parse_result.is_ok());
let parsed_packets = parse_result.unwrap(); let parse_result = parse_result.unwrap();
assert_eq!(parsed_packets, 2); assert_eq!(parse_result.packets_found, 2);
assert_eq!(tc_cacher.tc_queue.len(), 2); let mut queue = tc_cacher.tc_queue.borrow_mut();
assert_eq!(queue.len(), 2);
let packet_with_addr = queue.pop_front().unwrap();
assert_eq!(packet_with_addr.packet, buffer[..packet_len_ping]);
let packet_with_addr = queue.pop_front().unwrap();
assert_eq!( assert_eq!(
tc_cacher.tc_queue.pop_front().unwrap(), packet_with_addr.packet,
buffer[..packet_len_ping]
);
assert_eq!(
tc_cacher.tc_queue.pop_front().unwrap(),
buffer[packet_len_ping..packet_len_ping + packet_len_action] buffer[packet_len_ping..packet_len_ping + packet_len_action]
); );
} }
#[test] #[test]
fn test_split_packet_multi() { fn test_split_packet_multi() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let ping_tc =
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); let action_tc =
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
@@ -237,45 +232,68 @@ mod tests {
let packet_len_action = action_tc let packet_len_action = action_tc
.write_to_bytes(&mut buffer[packet_len_ping..]) .write_to_bytes(&mut buffer[packet_len_ping..])
.expect("writing packet failed"); .expect("writing packet failed");
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1]; let tc_cacher = TcCacher::default();
let mut tc_cacher = TcCacher::default(); let verificator = SimpleVerificator::new_with_second_id();
let mut next_write_idx = 0;
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer[..packet_len_ping + packet_len_action - 4], &buffer[..packet_len_ping + packet_len_action - 4],
valid_packet_ids.as_slice(), &verificator,
&mut tc_cacher, PARSER_ID,
&mut next_write_idx, &tc_cacher,
); );
assert!(parse_result.is_ok()); assert!(parse_result.is_ok());
let parsed_packets = parse_result.unwrap(); let parse_result = parse_result.unwrap();
assert_eq!(parsed_packets, 1); assert_eq!(parse_result.packets_found, 1);
assert_eq!(tc_cacher.tc_queue.len(), 1); assert!(parse_result.incomplete_tail_start.is_some());
let incomplete_tail_idx = parse_result.incomplete_tail_start.unwrap();
assert_eq!(incomplete_tail_idx, packet_len_ping);
let queue = tc_cacher.tc_queue.borrow();
assert_eq!(queue.len(), 1);
// The broken packet was moved to the start, so the next write index should be after the // The broken packet was moved to the start, so the next write index should be after the
// last segment missing 4 bytes. // last segment missing 4 bytes.
assert_eq!(next_write_idx, packet_len_action - 4);
} }
#[test] #[test]
fn test_one_split_packet() { fn test_one_split_packet() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let ping_tc =
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
.expect("writing packet failed"); .expect("writing packet failed");
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1]; let tc_cacher = TcCacher::default();
let mut tc_cacher = TcCacher::default();
let mut next_write_idx = 0; let verificator = SimpleVerificator::new_with_second_id();
let parse_result = parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut buffer[..packet_len_ping - 4], &buffer[..packet_len_ping - 4],
valid_packet_ids.as_slice(), &verificator,
&mut tc_cacher, PARSER_ID,
&mut next_write_idx, &tc_cacher,
); );
assert_eq!(next_write_idx, 0);
assert!(parse_result.is_ok()); assert!(parse_result.is_ok());
let parsed_packets = parse_result.unwrap(); let parse_result = parse_result.unwrap();
assert_eq!(parsed_packets, 0); assert_eq!(parse_result.packets_found, 0);
assert_eq!(tc_cacher.tc_queue.len(), 0); let queue = tc_cacher.tc_queue.borrow();
assert_eq!(queue.len(), 0);
}
#[test]
fn test_smallest_packet() {
let ccsds_header_only = SpHeader::new(
PacketId::new(PacketType::Tc, true, TEST_APID_0),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0),
0,
);
let mut buf: [u8; 7] = [0; 7];
ccsds_header_only
.write_to_be_bytes(&mut buf)
.expect("writing failed");
let verificator = SimpleVerificator::default();
let tc_cacher = TcCacher::default();
let parse_result =
parse_buffer_for_ccsds_space_packets(&buf, &verificator, PARSER_ID, &tc_cacher);
assert!(parse_result.is_ok());
let parse_result = parse_result.unwrap();
assert_eq!(parse_result.packets_found, 1);
} }
} }

View File

@@ -1,4 +1,4 @@
use crate::tmtc::ReceivesTcCore; use crate::{tmtc::PacketSenderRaw, ComponentId};
use cobs::{decode_in_place, encode, max_encoding_length}; use cobs::{decode_in_place, encode, max_encoding_length};
/// This function encodes the given packet with COBS and also wraps the encoded packet with /// This function encodes the given packet with COBS and also wraps the encoded packet with
@@ -55,11 +55,12 @@ pub fn encode_packet_with_cobs(
/// future write operations will be written to the `next_write_idx` argument. /// future write operations will be written to the `next_write_idx` argument.
/// ///
/// The parser will write all packets which were decoded successfully to the given `tc_receiver`. /// The parser will write all packets which were decoded successfully to the given `tc_receiver`.
pub fn parse_buffer_for_cobs_encoded_packets<E>( pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
buf: &mut [u8], buf: &mut [u8],
tc_receiver: &mut dyn ReceivesTcCore<Error = E>, sender_id: ComponentId,
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
next_write_idx: &mut usize, next_write_idx: &mut usize,
) -> Result<u32, E> { ) -> Result<u32, SendError> {
let mut start_index_packet = 0; let mut start_index_packet = 0;
let mut start_found = false; let mut start_found = false;
let mut last_byte = false; let mut last_byte = false;
@@ -78,8 +79,10 @@ pub fn parse_buffer_for_cobs_encoded_packets<E>(
let decode_result = decode_in_place(&mut buf[start_index_packet..i]); let decode_result = decode_in_place(&mut buf[start_index_packet..i]);
if let Ok(packet_len) = decode_result { if let Ok(packet_len) = decode_result {
packets_found += 1; packets_found += 1;
tc_receiver packet_sender.send_packet(
.pass_tc(&buf[start_index_packet..start_index_packet + packet_len])?; sender_id,
&buf[start_index_packet..start_index_packet + packet_len],
)?;
} }
start_found = false; start_found = false;
} else { } else {
@@ -100,32 +103,39 @@ pub fn parse_buffer_for_cobs_encoded_packets<E>(
pub(crate) mod tests { pub(crate) mod tests {
use cobs::encode; use cobs::encode;
use crate::encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET}; use crate::{
encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET},
ComponentId,
};
use super::parse_buffer_for_cobs_encoded_packets; use super::parse_buffer_for_cobs_encoded_packets;
const PARSER_ID: ComponentId = 0x05;
#[test] #[test]
fn test_parsing_simple_packet() { fn test_parsing_simple_packet() {
let mut test_sender = TcCacher::default(); let test_sender = TcCacher::default();
let mut encoded_buf: [u8; 16] = [0; 16]; let mut encoded_buf: [u8; 16] = [0; 16];
let mut current_idx = 0; let mut current_idx = 0;
encode_simple_packet(&mut encoded_buf, &mut current_idx); encode_simple_packet(&mut encoded_buf, &mut current_idx);
let mut next_read_idx = 0; let mut next_read_idx = 0;
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
&mut encoded_buf[0..current_idx], &mut encoded_buf[0..current_idx],
&mut test_sender, PARSER_ID,
&test_sender,
&mut next_read_idx, &mut next_read_idx,
) )
.unwrap(); .unwrap();
assert_eq!(packets, 1); assert_eq!(packets, 1);
assert_eq!(test_sender.tc_queue.len(), 1); let queue = test_sender.tc_queue.borrow();
let packet = &test_sender.tc_queue[0]; assert_eq!(queue.len(), 1);
assert_eq!(packet, &SIMPLE_PACKET); let packet = &queue[0];
assert_eq!(packet.packet, &SIMPLE_PACKET);
} }
#[test] #[test]
fn test_parsing_consecutive_packets() { fn test_parsing_consecutive_packets() {
let mut test_sender = TcCacher::default(); let test_sender = TcCacher::default();
let mut encoded_buf: [u8; 16] = [0; 16]; let mut encoded_buf: [u8; 16] = [0; 16];
let mut current_idx = 0; let mut current_idx = 0;
encode_simple_packet(&mut encoded_buf, &mut current_idx); encode_simple_packet(&mut encoded_buf, &mut current_idx);
@@ -139,21 +149,23 @@ pub(crate) mod tests {
let mut next_read_idx = 0; let mut next_read_idx = 0;
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
&mut encoded_buf[0..current_idx], &mut encoded_buf[0..current_idx],
&mut test_sender, PARSER_ID,
&test_sender,
&mut next_read_idx, &mut next_read_idx,
) )
.unwrap(); .unwrap();
assert_eq!(packets, 2); assert_eq!(packets, 2);
assert_eq!(test_sender.tc_queue.len(), 2); let queue = test_sender.tc_queue.borrow();
let packet0 = &test_sender.tc_queue[0]; assert_eq!(queue.len(), 2);
assert_eq!(packet0, &SIMPLE_PACKET); let packet0 = &queue[0];
let packet1 = &test_sender.tc_queue[1]; assert_eq!(packet0.packet, &SIMPLE_PACKET);
assert_eq!(packet1, &INVERTED_PACKET); let packet1 = &queue[1];
assert_eq!(packet1.packet, &INVERTED_PACKET);
} }
#[test] #[test]
fn test_split_tail_packet_only() { fn test_split_tail_packet_only() {
let mut test_sender = TcCacher::default(); let test_sender = TcCacher::default();
let mut encoded_buf: [u8; 16] = [0; 16]; let mut encoded_buf: [u8; 16] = [0; 16];
let mut current_idx = 0; let mut current_idx = 0;
encode_simple_packet(&mut encoded_buf, &mut current_idx); encode_simple_packet(&mut encoded_buf, &mut current_idx);
@@ -161,17 +173,19 @@ pub(crate) mod tests {
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
// Cut off the sentinel byte at the end. // Cut off the sentinel byte at the end.
&mut encoded_buf[0..current_idx - 1], &mut encoded_buf[0..current_idx - 1],
&mut test_sender, PARSER_ID,
&test_sender,
&mut next_read_idx, &mut next_read_idx,
) )
.unwrap(); .unwrap();
assert_eq!(packets, 0); assert_eq!(packets, 0);
assert_eq!(test_sender.tc_queue.len(), 0); let queue = test_sender.tc_queue.borrow();
assert_eq!(queue.len(), 0);
assert_eq!(next_read_idx, 0); assert_eq!(next_read_idx, 0);
} }
fn generic_test_split_packet(cut_off: usize) { fn generic_test_split_packet(cut_off: usize) {
let mut test_sender = TcCacher::default(); let test_sender = TcCacher::default();
let mut encoded_buf: [u8; 16] = [0; 16]; let mut encoded_buf: [u8; 16] = [0; 16];
assert!(cut_off < INVERTED_PACKET.len() + 1); assert!(cut_off < INVERTED_PACKET.len() + 1);
let mut current_idx = 0; let mut current_idx = 0;
@@ -193,13 +207,15 @@ pub(crate) mod tests {
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
// Cut off the sentinel byte at the end. // Cut off the sentinel byte at the end.
&mut encoded_buf[0..current_idx - cut_off], &mut encoded_buf[0..current_idx - cut_off],
&mut test_sender, PARSER_ID,
&test_sender,
&mut next_write_idx, &mut next_write_idx,
) )
.unwrap(); .unwrap();
assert_eq!(packets, 1); assert_eq!(packets, 1);
assert_eq!(test_sender.tc_queue.len(), 1); let queue = test_sender.tc_queue.borrow();
assert_eq!(&test_sender.tc_queue[0], &SIMPLE_PACKET); assert_eq!(queue.len(), 1);
assert_eq!(&queue[0].packet, &SIMPLE_PACKET);
assert_eq!(next_write_idx, next_expected_write_idx); assert_eq!(next_write_idx, next_expected_write_idx);
assert_eq!(encoded_buf[..next_expected_write_idx], expected_at_start); assert_eq!(encoded_buf[..next_expected_write_idx], expected_at_start);
} }
@@ -221,7 +237,7 @@ pub(crate) mod tests {
#[test] #[test]
fn test_zero_at_end() { fn test_zero_at_end() {
let mut test_sender = TcCacher::default(); let test_sender = TcCacher::default();
let mut encoded_buf: [u8; 16] = [0; 16]; let mut encoded_buf: [u8; 16] = [0; 16];
let mut next_write_idx = 0; let mut next_write_idx = 0;
let mut current_idx = 0; let mut current_idx = 0;
@@ -233,31 +249,35 @@ pub(crate) mod tests {
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
// Cut off the sentinel byte at the end. // Cut off the sentinel byte at the end.
&mut encoded_buf[0..current_idx], &mut encoded_buf[0..current_idx],
&mut test_sender, PARSER_ID,
&test_sender,
&mut next_write_idx, &mut next_write_idx,
) )
.unwrap(); .unwrap();
assert_eq!(packets, 1); assert_eq!(packets, 1);
assert_eq!(test_sender.tc_queue.len(), 1); let queue = test_sender.tc_queue.borrow_mut();
assert_eq!(&test_sender.tc_queue[0], &SIMPLE_PACKET); assert_eq!(queue.len(), 1);
assert_eq!(&queue[0].packet, &SIMPLE_PACKET);
assert_eq!(next_write_idx, 1); assert_eq!(next_write_idx, 1);
assert_eq!(encoded_buf[0], 0); assert_eq!(encoded_buf[0], 0);
} }
#[test] #[test]
fn test_all_zeroes() { fn test_all_zeroes() {
let mut test_sender = TcCacher::default(); let test_sender = TcCacher::default();
let mut all_zeroes: [u8; 5] = [0; 5]; let mut all_zeroes: [u8; 5] = [0; 5];
let mut next_write_idx = 0; let mut next_write_idx = 0;
let packets = parse_buffer_for_cobs_encoded_packets( let packets = parse_buffer_for_cobs_encoded_packets(
// Cut off the sentinel byte at the end. // Cut off the sentinel byte at the end.
&mut all_zeroes, &mut all_zeroes,
&mut test_sender, PARSER_ID,
&test_sender,
&mut next_write_idx, &mut next_write_idx,
) )
.unwrap(); .unwrap();
assert_eq!(packets, 0); assert_eq!(packets, 0);
assert!(test_sender.tc_queue.is_empty()); let queue = test_sender.tc_queue.borrow();
assert!(queue.is_empty());
assert_eq!(next_write_idx, 0); assert_eq!(next_write_idx, 0);
} }
} }

View File

@@ -6,9 +6,14 @@ pub use crate::encoding::cobs::{encode_packet_with_cobs, parse_buffer_for_cobs_e
#[cfg(test)] #[cfg(test)]
pub(crate) mod tests { pub(crate) mod tests {
use alloc::{collections::VecDeque, vec::Vec}; use core::cell::RefCell;
use crate::tmtc::ReceivesTcCore; use alloc::collections::VecDeque;
use crate::{
tmtc::{PacketAsVec, PacketSenderRaw},
ComponentId,
};
use super::cobs::encode_packet_with_cobs; use super::cobs::encode_packet_with_cobs;
@@ -17,14 +22,15 @@ pub(crate) mod tests {
#[derive(Default)] #[derive(Default)]
pub(crate) struct TcCacher { pub(crate) struct TcCacher {
pub(crate) tc_queue: VecDeque<Vec<u8>>, pub(crate) tc_queue: RefCell<VecDeque<PacketAsVec>>,
} }
impl ReceivesTcCore for TcCacher { impl PacketSenderRaw for TcCacher {
type Error = (); type Error = ();
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.tc_queue.push_back(tc_raw.to_vec()); let mut mut_queue = self.tc_queue.borrow_mut();
mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
Ok(()) Ok(())
} }
} }

View File

@@ -1,17 +1,15 @@
//! Event management and forwarding //! Event management and forwarding
//! //!
//! This module provides components to perform event routing. The most important component for this
//! task is the [EventManager]. It receives all events and then routes them to event subscribers
//! where appropriate. One common use case for satellite systems is to offer a light-weight
//! publish-subscribe mechanism and IPC mechanism for software and hardware events which are also
//! packaged as telemetry (TM) or can trigger a system response.
//!
//! It is recommended to read the //! It is recommended to read the
//! [sat-rs book chapter](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/events.html) //! [sat-rs book chapter](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/events.html)
//! about events first: //! about events first.
//!
//! This module provides components to perform event routing. The most important component for this
//! task is the [EventManager]. It receives all events and then routes them to event subscribers
//! where appropriate.
//! //!
//! The event manager has a listener table abstracted by the [ListenerMapProvider], which maps //! The event manager has a listener table abstracted by the [ListenerMapProvider], which maps
//! listener groups identified by [ListenerKey]s to a [sender ID][ComponentId]. //! listener groups identified by [ListenerKey]s to a [listener ID][ComponentId].
//! It also contains a sender table abstracted by the [SenderMapProvider] which maps these sender //! It also contains a sender table abstracted by the [SenderMapProvider] which maps these sender
//! IDs to concrete [EventSendProvider]s. A simple approach would be to use one send event provider //! IDs to concrete [EventSendProvider]s. A simple approach would be to use one send event provider
//! for each OBSW thread and then subscribe for all interesting events for a particular thread //! for each OBSW thread and then subscribe for all interesting events for a particular thread
@@ -21,15 +19,15 @@
//! //!
//! 1. Provide a concrete [EventReceiveProvider] implementation. This abstraction allow to use different //! 1. Provide a concrete [EventReceiveProvider] implementation. This abstraction allow to use different
//! message queue backends. A straightforward implementation where dynamic memory allocation is //! message queue backends. A straightforward implementation where dynamic memory allocation is
//! not a big concern could use [std::sync::mpsc::channel] to do this and is provided in //! not a big concern would be to use the [std::sync::mpsc::Receiver] handle. The trait is
//! form of the [MpscEventReceiver]. //! already implemented for this type.
//! 2. To set up event creators, create channel pairs using some message queue implementation. //! 2. To set up event creators, create channel pairs using some message queue implementation.
//! Each event creator gets a (cloned) sender component which allows it to send events to the //! Each event creator gets a (cloned) sender component which allows it to send events to the
//! manager. //! manager.
//! 3. The event manager receives the receiver component as part of a [EventReceiveProvider] //! 3. The event manager receives the receiver component as part of a [EventReceiveProvider]
//! implementation so all events are routed to the manager. //! implementation so all events are routed to the manager.
//! 4. Create the [send event providers][EventSendProvider]s which allow routing events to //! 4. Create the [event sender map][SenderMapProvider]s which allow routing events to
//! subscribers. You can now use their [sender IDs][EventSendProvider::channel_id] to subscribe //! subscribers. You can now use the subscriber component IDs to subscribe
//! for event groups, for example by using the [EventManager::subscribe_single] method. //! for event groups, for example by using the [EventManager::subscribe_single] method.
//! 5. Add the send provider as well using the [EventManager::add_sender] call so the event //! 5. Add the send provider as well using the [EventManager::add_sender] call so the event
//! manager can route listener groups to a the send provider. //! manager can route listener groups to a the send provider.
@@ -44,9 +42,16 @@
//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/pus_events.rs) //! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/pus_events.rs)
//! for a concrete example using multi-threading where events are routed to //! for a concrete example using multi-threading where events are routed to
//! different threads. //! different threads.
//!
//! The [satrs-example](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example)
//! also contains a full event manager instance and exposes a test event via the PUS test service.
//! The [PUS event](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/pus/event.rs)
//! module and the generic [events module](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/events.rs)
//! show how the event management modules can be integrated into a more complex software.
use crate::events::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw}; use crate::events::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw};
use crate::params::{Params, ParamsHeapless}; use crate::params::Params;
use crate::queue::GenericSendError; use crate::queue::GenericSendError;
use core::fmt::Debug;
use core::marker::PhantomData; use core::marker::PhantomData;
use core::slice::Iter; use core::slice::Iter;
@@ -65,45 +70,81 @@ pub enum ListenerKey {
All, All,
} }
pub type EventWithHeaplessAuxData<Event> = (Event, Option<ParamsHeapless>); #[derive(Debug)]
pub type EventU32WithHeaplessAuxData = EventWithHeaplessAuxData<EventU32>; pub struct EventMessage<Event: GenericEvent, ParamProvider: Debug = Params> {
pub type EventU16WithHeaplessAuxData = EventWithHeaplessAuxData<EventU16>; sender_id: ComponentId,
event: Event,
params: Option<ParamProvider>,
}
pub type EventWithAuxData<Event> = (Event, Option<Params>); impl<Event: GenericEvent, ParamProvider: Debug + Clone> EventMessage<Event, ParamProvider> {
pub type EventU32WithAuxData = EventWithAuxData<EventU32>; pub fn new_generic(
pub type EventU16WithAuxData = EventWithAuxData<EventU16>; sender_id: ComponentId,
event: Event,
pub trait EventSendProvider<EV: GenericEvent, AuxDataProvider = Params> { params: Option<&ParamProvider>,
fn target_id(&self) -> ComponentId; ) -> Self {
Self {
fn send_no_data(&self, event: EV) -> Result<(), GenericSendError> { sender_id,
self.send(event, None) event,
params: params.cloned(),
}
} }
fn send(&self, event: EV, aux_data: Option<AuxDataProvider>) -> Result<(), GenericSendError>; pub fn sender_id(&self) -> ComponentId {
self.sender_id
}
pub fn event(&self) -> Event {
self.event
}
pub fn params(&self) -> Option<&ParamProvider> {
self.params.as_ref()
}
pub fn new(sender_id: ComponentId, event: Event) -> Self {
Self::new_generic(sender_id, event, None)
}
pub fn new_with_params(sender_id: ComponentId, event: Event, params: &ParamProvider) -> Self {
Self::new_generic(sender_id, event, Some(params))
}
}
pub type EventMessageU32 = EventMessage<EventU32, Params>;
pub type EventMessageU16 = EventMessage<EventU16, Params>;
/// Generic abstraction
pub trait EventSendProvider<Event: GenericEvent, ParamProvider: Debug = Params> {
type Error;
fn target_id(&self) -> ComponentId;
fn send(&self, message: EventMessage<Event, ParamProvider>) -> Result<(), Self::Error>;
} }
/// Generic abstraction for an event receiver. /// Generic abstraction for an event receiver.
pub trait EventReceiveProvider<Event: GenericEvent, AuxDataProvider = Params> { pub trait EventReceiveProvider<Event: GenericEvent, ParamsProvider: Debug = Params> {
type Error;
/// This function has to be provided by any event receiver. A call may or may not return /// This function has to be provided by any event receiver. A call may or may not return
/// an event and optional auxiliary data. /// an event and optional auxiliary data.
fn try_recv_event(&self) -> Option<(Event, Option<AuxDataProvider>)>; fn try_recv_event(&self) -> Result<Option<EventMessage<Event, ParamsProvider>>, Self::Error>;
} }
pub trait ListenerMapProvider { pub trait ListenerMapProvider {
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
fn get_listeners(&self) -> alloc::vec::Vec<ListenerKey>; fn get_listeners(&self) -> alloc::vec::Vec<ListenerKey>;
fn contains_listener(&self, key: &ListenerKey) -> bool; fn contains_listener(&self, key: &ListenerKey) -> bool;
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ComponentId>>; fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ComponentId>>;
fn add_listener(&mut self, key: ListenerKey, sender_id: ComponentId) -> bool; fn add_listener(&mut self, key: ListenerKey, listener_id: ComponentId) -> bool;
fn remove_duplicates(&mut self, key: &ListenerKey); fn remove_duplicates(&mut self, key: &ListenerKey);
} }
pub trait SenderMapProvider< pub trait SenderMapProvider<
EventSender: EventSendProvider<Ev, Data>, EventSender: EventSendProvider<Event, ParamProvider>,
Ev: GenericEvent = EventU32, Event: GenericEvent = EventU32,
Data = Params, ParamProvider: Debug = Params,
> >
{ {
fn contains_send_event_provider(&self, target_id: &ComponentId) -> bool; fn contains_send_event_provider(&self, target_id: &ComponentId) -> bool;
@@ -120,32 +161,32 @@ pub trait SenderMapProvider<
/// * `SenderMap`: [SenderMapProvider] which maps channel IDs to send providers. /// * `SenderMap`: [SenderMapProvider] which maps channel IDs to send providers.
/// * `ListenerMap`: [ListenerMapProvider] which maps listener keys to channel IDs. /// * `ListenerMap`: [ListenerMapProvider] which maps listener keys to channel IDs.
/// * `EventSender`: [EventSendProvider] contained within the sender map which sends the events. /// * `EventSender`: [EventSendProvider] contained within the sender map which sends the events.
/// * `Ev`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32] /// * `Event`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32]
/// and [EventU16] are supported. /// and [EventU16] are supported.
/// * `Data`: Auxiliary data which is sent with the event to provide optional context information /// * `ParamProvider`: Auxiliary data which is sent with the event to provide optional context
/// information
pub struct EventManager< pub struct EventManager<
EventReceiver: EventReceiveProvider<Ev, Data>, EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSender, Ev, Data>, SenderMap: SenderMapProvider<EventSender, Event, ParamProvider>,
ListenerMap: ListenerMapProvider, ListenerMap: ListenerMapProvider,
EventSender: EventSendProvider<Ev, Data>, EventSender: EventSendProvider<Event, ParamProvider>,
Ev: GenericEvent = EventU32, Event: GenericEvent = EventU32,
Data = Params, ParamProvider: Debug = Params,
> { > {
event_receiver: EventReceiver, event_receiver: EventReceiver,
sender_map: SenderMap, sender_map: SenderMap,
listener_map: ListenerMap, listener_map: ListenerMap,
phantom: core::marker::PhantomData<(EventSender, Ev, Data)>, phantom: core::marker::PhantomData<(EventSender, Event, ParamProvider)>,
} }
#[derive(Debug)] #[derive(Debug)]
pub enum EventRoutingResult<EV: GenericEvent, AUX> { pub enum EventRoutingResult<Event: GenericEvent, ParamProvider: Debug> {
/// No event was received /// No event was received
Empty, Empty,
/// An event was received and routed to listeners. /// An event was received and routed to listeners.
Handled { Handled {
num_recipients: u32, num_recipients: u32,
event: EV, event_msg: EventMessage<Event, ParamProvider>,
aux_data: Option<AUX>,
}, },
} }
@@ -156,27 +197,21 @@ pub enum EventRoutingError {
NoSenderForId(ComponentId), NoSenderForId(ComponentId),
} }
#[derive(Debug)]
pub struct EventRoutingErrorsWithResult<Ev: GenericEvent, Data> {
pub result: EventRoutingResult<Ev, Data>,
pub errors: [Option<EventRoutingError>; 3],
}
impl< impl<
EventReceiver: EventReceiveProvider<Ev, Data>, EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSender, Ev, Data>, SenderMap: SenderMapProvider<EventSender, Event, ParamProvider>,
ListenerMap: ListenerMapProvider, ListenerMap: ListenerMapProvider,
EventSender: EventSendProvider<Ev, Data>, EventSender: EventSendProvider<Event, ParamProvider>,
Ev: GenericEvent + Copy, Event: GenericEvent + Copy,
Data: Clone, ParamProvider: Debug,
> EventManager<EventReceiver, SenderMap, ListenerMap, EventSender, Ev, Data> > EventManager<EventReceiver, SenderMap, ListenerMap, EventSender, Event, ParamProvider>
{ {
pub fn remove_duplicates(&mut self, key: &ListenerKey) { pub fn remove_duplicates(&mut self, key: &ListenerKey) {
self.listener_map.remove_duplicates(key) self.listener_map.remove_duplicates(key)
} }
/// Subscribe for a unique event. /// Subscribe for a unique event.
pub fn subscribe_single(&mut self, event: &Ev, sender_id: ComponentId) { pub fn subscribe_single(&mut self, event: &Event, sender_id: ComponentId) {
self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id); self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id);
} }
@@ -193,17 +228,20 @@ impl<
self.update_listeners(ListenerKey::All, sender_id); self.update_listeners(ListenerKey::All, sender_id);
} }
} }
impl< impl<
ERP: EventReceiveProvider<EV, AUX>, EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SMP: SenderMapProvider<SP, EV, AUX>, SenderMap: SenderMapProvider<EventSenderMap, Event, ParamProvider>,
LTR: ListenerMapProvider, ListenerMap: ListenerMapProvider,
SP: EventSendProvider<EV, AUX>, EventSenderMap: EventSendProvider<Event, ParamProvider>,
EV: GenericEvent + Copy, Event: GenericEvent + Copy,
AUX: Clone, ParamProvider: Debug,
> EventManager<ERP, SMP, LTR, SP, EV, AUX> > EventManager<EventReceiver, SenderMap, ListenerMap, EventSenderMap, Event, ParamProvider>
{ {
pub fn new_with_custom_maps(event_receiver: ERP, sender_map: SMP, listener_map: LTR) -> Self { pub fn new_with_custom_maps(
event_receiver: EventReceiver,
sender_map: SenderMap,
listener_map: ListenerMap,
) -> Self {
EventManager { EventManager {
listener_map, listener_map,
sender_map, sender_map,
@@ -213,7 +251,7 @@ impl<
} }
/// Add a new sender component which can be used to send events to subscribers. /// Add a new sender component which can be used to send events to subscribers.
pub fn add_sender(&mut self, send_provider: SP) { pub fn add_sender(&mut self, send_provider: EventSenderMap) {
if !self if !self
.sender_map .sender_map
.contains_send_event_provider(&send_provider.target_id()) .contains_send_event_provider(&send_provider.target_id())
@@ -226,68 +264,66 @@ impl<
fn update_listeners(&mut self, key: ListenerKey, sender_id: ComponentId) { fn update_listeners(&mut self, key: ListenerKey, sender_id: ComponentId) {
self.listener_map.add_listener(key, sender_id); self.listener_map.add_listener(key, sender_id);
} }
}
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSenderMap, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSenderMap: EventSendProvider<Event, ParamProvider, Error = GenericSendError>,
Event: GenericEvent + Copy,
ParamProvider: Clone + Debug,
> EventManager<EventReceiver, SenderMap, ListenerMap, EventSenderMap, Event, ParamProvider>
{
/// This function will use the cached event receiver and try to receive one event. /// This function will use the cached event receiver and try to receive one event.
/// If an event was received, it will try to route that event to all subscribed event listeners. /// If an event was received, it will try to route that event to all subscribed event listeners.
/// If this works without any issues, the [EventRoutingResult] will contain context information /// If this works without any issues, the [EventRoutingResult] will contain context information
/// about the routed event. /// about the routed event.
/// ///
/// This function will track up to 3 errors returned as part of the /// If an error occurs during the routing, the error handler will be called. The error handler
/// [EventRoutingErrorsWithResult] error struct. /// should take a reference to the event message as the first argument, and the routing error
pub fn try_event_handling( /// as the second argument.
pub fn try_event_handling<E: FnMut(&EventMessage<Event, ParamProvider>, EventRoutingError)>(
&self, &self,
) -> Result<EventRoutingResult<EV, AUX>, EventRoutingErrorsWithResult<EV, AUX>> { mut error_handler: E,
let mut err_idx = 0; ) -> EventRoutingResult<Event, ParamProvider> {
let mut err_slice = [None, None, None];
let mut num_recipients = 0; let mut num_recipients = 0;
let mut add_error = |error: EventRoutingError| { let mut send_handler =
if err_idx < 3 { |key: &ListenerKey, event_msg: &EventMessage<Event, ParamProvider>| {
err_slice[err_idx] = Some(error); if self.listener_map.contains_listener(key) {
err_idx += 1; if let Some(ids) = self.listener_map.get_listener_ids(key) {
} for id in ids {
}; if let Some(sender) = self.sender_map.get_send_event_provider(id) {
let mut send_handler = |key: &ListenerKey, event: EV, aux_data: &Option<AUX>| { if let Err(e) = sender.send(EventMessage::new_generic(
if self.listener_map.contains_listener(key) { event_msg.sender_id,
if let Some(ids) = self.listener_map.get_listener_ids(key) { event_msg.event,
for id in ids { event_msg.params.as_ref(),
if let Some(sender) = self.sender_map.get_send_event_provider(id) { )) {
if let Err(e) = sender.send(event, aux_data.clone()) { error_handler(event_msg, EventRoutingError::Send(e));
add_error(EventRoutingError::Send(e)); } else {
num_recipients += 1;
}
} else { } else {
num_recipients += 1; error_handler(event_msg, EventRoutingError::NoSenderForId(*id));
} }
} else {
add_error(EventRoutingError::NoSenderForId(*id));
} }
} else {
error_handler(event_msg, EventRoutingError::NoSendersForKey(*key));
} }
} else {
add_error(EventRoutingError::NoSendersForKey(*key));
} }
} };
}; if let Ok(Some(event_msg)) = self.event_receiver.try_recv_event() {
if let Some((event, aux_data)) = self.event_receiver.try_recv_event() { let single_key = ListenerKey::Single(event_msg.event.raw_as_largest_type());
let single_key = ListenerKey::Single(event.raw_as_largest_type()); send_handler(&single_key, &event_msg);
send_handler(&single_key, event, &aux_data); let group_key = ListenerKey::Group(event_msg.event.group_id_as_largest_type());
let group_key = ListenerKey::Group(event.group_id_as_largest_type()); send_handler(&group_key, &event_msg);
send_handler(&group_key, event, &aux_data); send_handler(&ListenerKey::All, &event_msg);
send_handler(&ListenerKey::All, event, &aux_data); return EventRoutingResult::Handled {
if err_idx > 0 {
return Err(EventRoutingErrorsWithResult {
result: EventRoutingResult::Handled {
num_recipients,
event,
aux_data,
},
errors: err_slice,
});
}
return Ok(EventRoutingResult::Handled {
num_recipients, num_recipients,
event, event_msg,
aux_data, };
});
} }
Ok(EventRoutingResult::Empty) EventRoutingResult::Empty
} }
} }
@@ -300,34 +336,42 @@ pub mod alloc_mod {
/// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap] /// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap]
/// and the [DefaultListenerMap]. It uses regular mpsc channels as the message queue backend. /// and the [DefaultListenerMap]. It uses regular mpsc channels as the message queue backend.
pub type EventManagerWithMpsc<EV = EventU32, AUX = Params> = EventManager< pub type EventManagerWithMpsc<Event = EventU32, ParamProvider = Params> = EventManager<
MpscEventReceiver, EventU32ReceiverMpsc<ParamProvider>,
DefaultSenderMap<EventSenderMpsc<EV>, EV, AUX>, DefaultSenderMap<EventSenderMpsc<Event>, Event, ParamProvider>,
DefaultListenerMap, DefaultListenerMap,
EventSenderMpsc<EV>, EventSenderMpsc<Event>,
>; >;
/// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap] /// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap]
/// and the [DefaultListenerMap]. It uses /// and the [DefaultListenerMap]. It uses
/// [bounded mpsc senders](https://doc.rust-lang.org/std/sync/mpsc/struct.SyncSender.html) as the /// [bounded mpsc senders](https://doc.rust-lang.org/std/sync/mpsc/struct.SyncSender.html) as the
/// message queue backend. /// message queue backend.
pub type EventManagerWithBoundedMpsc<EV = EventU32, AUX = Params> = EventManager< pub type EventManagerWithBoundedMpsc<Event = EventU32, ParamProvider = Params> = EventManager<
MpscEventReceiver, EventU32ReceiverMpsc<ParamProvider>,
DefaultSenderMap<EventSenderMpscBounded<EV>, EV, AUX>, DefaultSenderMap<EventSenderMpscBounded<Event>, Event, ParamProvider>,
DefaultListenerMap, DefaultListenerMap,
EventSenderMpscBounded<EV>, EventSenderMpscBounded<Event>,
>; >;
impl< impl<
ER: EventReceiveProvider<EV, AUX>, EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SP: EventSendProvider<EV, AUX>, EventSender: EventSendProvider<Event, ParamProvider>,
EV: GenericEvent + Copy, Event: GenericEvent + Copy,
AUX: 'static, ParamProvider: 'static + Debug,
> EventManager<ER, DefaultSenderMap<SP, EV, AUX>, DefaultListenerMap, SP, EV, AUX> >
EventManager<
EventReceiver,
DefaultSenderMap<EventSender, Event, ParamProvider>,
DefaultListenerMap,
EventSender,
Event,
ParamProvider,
>
{ {
/// Create an event manager where the sender table will be the [DefaultSenderMap] /// Create an event manager where the sender table will be the [DefaultSenderMap]
/// and the listener table will be the [DefaultListenerMap]. /// and the listener table will be the [DefaultListenerMap].
pub fn new(event_receiver: ER) -> Self { pub fn new(event_receiver: EventReceiver) -> Self {
Self { Self {
listener_map: DefaultListenerMap::default(), listener_map: DefaultListenerMap::default(),
sender_map: DefaultSenderMap::default(), sender_map: DefaultSenderMap::default(),
@@ -384,16 +428,19 @@ pub mod alloc_mod {
/// ///
/// Simple implementation which uses a [HashMap] internally. /// Simple implementation which uses a [HashMap] internally.
pub struct DefaultSenderMap< pub struct DefaultSenderMap<
SP: EventSendProvider<EV, AUX>, EventSender: EventSendProvider<Event, ParamProvider>,
EV: GenericEvent = EventU32, Event: GenericEvent = EventU32,
AUX = Params, ParamProvider: Debug = Params,
> { > {
senders: HashMap<ComponentId, SP>, senders: HashMap<ComponentId, EventSender>,
phantom: PhantomData<(EV, AUX)>, phantom: PhantomData<(Event, ParamProvider)>,
} }
impl<SP: EventSendProvider<EV, AUX>, EV: GenericEvent, AUX> Default impl<
for DefaultSenderMap<SP, EV, AUX> EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent,
ParamProvider: Debug,
> Default for DefaultSenderMap<EventSender, Event, ParamProvider>
{ {
fn default() -> Self { fn default() -> Self {
Self { Self {
@@ -403,20 +450,24 @@ pub mod alloc_mod {
} }
} }
impl<SP: EventSendProvider<EV, AUX>, EV: GenericEvent, AUX> SenderMapProvider<SP, EV, AUX> impl<
for DefaultSenderMap<SP, EV, AUX> EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent,
ParamProvider: Debug,
> SenderMapProvider<EventSender, Event, ParamProvider>
for DefaultSenderMap<EventSender, Event, ParamProvider>
{ {
fn contains_send_event_provider(&self, id: &ComponentId) -> bool { fn contains_send_event_provider(&self, id: &ComponentId) -> bool {
self.senders.contains_key(id) self.senders.contains_key(id)
} }
fn get_send_event_provider(&self, id: &ComponentId) -> Option<&SP> { fn get_send_event_provider(&self, id: &ComponentId) -> Option<&EventSender> {
self.senders self.senders
.get(id) .get(id)
.filter(|sender| sender.target_id() == *id) .filter(|sender| sender.target_id() == *id)
} }
fn add_send_event_provider(&mut self, send_provider: SP) -> bool { fn add_send_event_provider(&mut self, send_provider: EventSender) -> bool {
let id = send_provider.target_id(); let id = send_provider.target_id();
if self.senders.contains_key(&id) { if self.senders.contains_key(&id) {
return false; return false;
@@ -428,53 +479,61 @@ pub mod alloc_mod {
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub mod std_mod { pub mod std_mod {
use crate::queue::GenericReceiveError;
use super::*; use super::*;
use std::sync::mpsc; use std::sync::mpsc;
pub struct MpscEventReceiver<Event: GenericEvent + Send = EventU32> { impl<Event: GenericEvent + Send, ParamProvider: Debug>
mpsc_receiver: mpsc::Receiver<(Event, Option<Params>)>, EventReceiveProvider<Event, ParamProvider>
} for mpsc::Receiver<EventMessage<Event, ParamProvider>>
{
type Error = GenericReceiveError;
impl<Event: GenericEvent + Send> MpscEventReceiver<Event> { fn try_recv_event(
pub fn new(receiver: mpsc::Receiver<(Event, Option<Params>)>) -> Self { &self,
Self { ) -> Result<Option<EventMessage<Event, ParamProvider>>, Self::Error> {
mpsc_receiver: receiver, match self.try_recv() {
Ok(msg) => Ok(Some(msg)),
Err(e) => match e {
mpsc::TryRecvError::Empty => Ok(None),
mpsc::TryRecvError::Disconnected => {
Err(GenericReceiveError::TxDisconnected(None))
}
},
} }
} }
} }
impl<Event: GenericEvent + Send> EventReceiveProvider<Event> for MpscEventReceiver<Event> {
fn try_recv_event(&self) -> Option<EventWithAuxData<Event>> {
if let Ok(event_and_data) = self.mpsc_receiver.try_recv() {
return Some(event_and_data);
}
None
}
}
pub type MpscEventU32Receiver = MpscEventReceiver<EventU32>; pub type EventU32ReceiverMpsc<ParamProvider = Params> =
pub type MpscEventU16Receiver = MpscEventReceiver<EventU16>; mpsc::Receiver<EventMessage<EventU32, ParamProvider>>;
pub type EventU16ReceiverMpsc<ParamProvider = Params> =
mpsc::Receiver<EventMessage<EventU16, ParamProvider>>;
/// Generic event sender which uses a regular [mpsc::Sender] as the messaging backend to /// Generic event sender which uses a regular [mpsc::Sender] as the messaging backend to
/// send events. /// send events.
#[derive(Clone)] #[derive(Clone)]
pub struct EventSenderMpsc<Event: GenericEvent + Send> { pub struct EventSenderMpsc<Event: GenericEvent + Send> {
target_id: ComponentId, target_id: ComponentId,
sender: mpsc::Sender<(Event, Option<Params>)>, sender: mpsc::Sender<EventMessage<Event>>,
} }
impl<Event: GenericEvent + Send> EventSenderMpsc<Event> { impl<Event: GenericEvent + Send> EventSenderMpsc<Event> {
pub fn new(target_id: ComponentId, sender: mpsc::Sender<(Event, Option<Params>)>) -> Self { pub fn new(target_id: ComponentId, sender: mpsc::Sender<EventMessage<Event>>) -> Self {
Self { target_id, sender } Self { target_id, sender }
} }
} }
impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpsc<Event> { impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpsc<Event> {
type Error = GenericSendError;
fn target_id(&self) -> ComponentId { fn target_id(&self) -> ComponentId {
self.target_id self.target_id
} }
fn send(&self, event: Event, aux_data: Option<Params>) -> Result<(), GenericSendError> {
fn send(&self, event_msg: EventMessage<Event>) -> Result<(), GenericSendError> {
self.sender self.sender
.send((event, aux_data)) .send(event_msg)
.map_err(|_| GenericSendError::RxDisconnected) .map_err(|_| GenericSendError::RxDisconnected)
} }
} }
@@ -484,14 +543,14 @@ pub mod std_mod {
#[derive(Clone)] #[derive(Clone)]
pub struct EventSenderMpscBounded<Event: GenericEvent + Send> { pub struct EventSenderMpscBounded<Event: GenericEvent + Send> {
target_id: ComponentId, target_id: ComponentId,
sender: mpsc::SyncSender<(Event, Option<Params>)>, sender: mpsc::SyncSender<EventMessage<Event>>,
capacity: usize, capacity: usize,
} }
impl<Event: GenericEvent + Send> EventSenderMpscBounded<Event> { impl<Event: GenericEvent + Send> EventSenderMpscBounded<Event> {
pub fn new( pub fn new(
target_id: ComponentId, target_id: ComponentId,
sender: mpsc::SyncSender<(Event, Option<Params>)>, sender: mpsc::SyncSender<EventMessage<Event>>,
capacity: usize, capacity: usize,
) -> Self { ) -> Self {
Self { Self {
@@ -503,11 +562,14 @@ pub mod std_mod {
} }
impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpscBounded<Event> { impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpscBounded<Event> {
type Error = GenericSendError;
fn target_id(&self) -> ComponentId { fn target_id(&self) -> ComponentId {
self.target_id self.target_id
} }
fn send(&self, event: Event, aux_data: Option<Params>) -> Result<(), GenericSendError> {
if let Err(e) = self.sender.try_send((event, aux_data)) { fn send(&self, event_msg: EventMessage<Event>) -> Result<(), Self::Error> {
if let Err(e) = self.sender.try_send(event_msg) {
return match e { return match e {
mpsc::TrySendError::Full(_) => { mpsc::TrySendError::Full(_) => {
Err(GenericSendError::QueueFull(Some(self.capacity as u32))) Err(GenericSendError::QueueFull(Some(self.capacity as u32)))
@@ -530,19 +592,20 @@ mod tests {
use super::*; use super::*;
use crate::event_man::EventManager; use crate::event_man::EventManager;
use crate::events::{EventU32, GenericEvent, Severity}; use crate::events::{EventU32, GenericEvent, Severity};
use crate::params::ParamsRaw; use crate::params::{ParamsHeapless, ParamsRaw};
use crate::pus::test_util::{TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1};
use std::format; use std::format;
use std::sync::mpsc::{self, channel, Receiver, Sender}; use std::sync::mpsc::{self};
const TEST_EVENT: EventU32 = EventU32::const_new(Severity::INFO, 0, 5); const TEST_EVENT: EventU32 = EventU32::new(Severity::Info, 0, 5);
fn check_next_event( fn check_next_event(
expected: EventU32, expected: EventU32,
receiver: &Receiver<EventU32WithAuxData>, receiver: &mpsc::Receiver<EventMessageU32>,
) -> Option<Params> { ) -> Option<Params> {
if let Ok(event) = receiver.try_recv() { if let Ok(event_msg) = receiver.try_recv() {
assert_eq!(event.0, expected); assert_eq!(event_msg.event, expected);
return event.1; return event_msg.params;
} }
None None
} }
@@ -551,72 +614,79 @@ mod tests {
res: EventRoutingResult<EventU32, Params>, res: EventRoutingResult<EventU32, Params>,
expected: EventU32, expected: EventU32,
expected_num_sent: u32, expected_num_sent: u32,
expected_sender_id: ComponentId,
) { ) {
assert!(matches!(res, EventRoutingResult::Handled { .. })); assert!(matches!(res, EventRoutingResult::Handled { .. }));
if let EventRoutingResult::Handled { if let EventRoutingResult::Handled {
num_recipients, num_recipients,
event, event_msg,
..
} = res } = res
{ {
assert_eq!(event, expected); assert_eq!(event_msg.event, expected);
assert_eq!(event_msg.sender_id, expected_sender_id);
assert_eq!(num_recipients, expected_num_sent); assert_eq!(num_recipients, expected_num_sent);
} }
} }
fn generic_event_man() -> (Sender<EventU32WithAuxData>, EventManagerWithMpsc) { fn generic_event_man() -> (mpsc::Sender<EventMessageU32>, EventManagerWithMpsc) {
let (event_sender, manager_queue) = channel(); let (event_sender, event_receiver) = mpsc::channel();
let event_man_receiver = MpscEventReceiver::new(manager_queue); (event_sender, EventManager::new(event_receiver))
(event_sender, EventManager::new(event_man_receiver))
} }
#[test] #[test]
fn test_basic() { fn test_basic() {
let (event_sender, mut event_man) = generic_event_man(); let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap(); let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); let event_grp_1_0 = EventU32::new(Severity::High, 1, 0);
let (single_event_sender, single_event_receiver) = channel(); let (single_event_sender, single_event_receiver) = mpsc::channel();
let single_event_listener = EventSenderMpsc::new(0, single_event_sender); let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.target_id()); event_man.subscribe_single(&event_grp_0, single_event_listener.target_id());
event_man.add_sender(single_event_listener); event_man.add_sender(single_event_listener);
let (group_event_sender_0, group_event_receiver_0) = channel(); let (group_event_sender_0, group_event_receiver_0) = mpsc::channel();
let group_event_listener = EventU32SenderMpsc::new(1, group_event_sender_0); let group_event_listener = EventU32SenderMpsc::new(1, group_event_sender_0);
event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.target_id()); event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.target_id());
event_man.add_sender(group_event_listener); event_man.add_sender(group_event_listener);
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
// Test event with one listener // Test event with one listener
event_sender event_sender
.send((event_grp_0, None)) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0))
.expect("Sending single error failed"); .expect("Sending single error failed");
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(&error_handler);
assert!(res.is_ok()); check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
check_handled_event(res.unwrap(), event_grp_0, 1);
check_next_event(event_grp_0, &single_event_receiver); check_next_event(event_grp_0, &single_event_receiver);
// Test event which is sent to all group listeners // Test event which is sent to all group listeners
event_sender event_sender
.send((event_grp_1_0, None)) .send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0))
.expect("Sending group error failed"); .expect("Sending group error failed");
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(&error_handler);
assert!(res.is_ok()); check_handled_event(res, event_grp_1_0, 1, TEST_COMPONENT_ID_1.id());
check_handled_event(res.unwrap(), event_grp_1_0, 1);
check_next_event(event_grp_1_0, &group_event_receiver_0); check_next_event(event_grp_1_0, &group_event_receiver_0);
} }
#[test] #[test]
fn test_with_basic_aux_data() { fn test_with_basic_params() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man(); let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap(); let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let (single_event_sender, single_event_receiver) = channel(); let (single_event_sender, single_event_receiver) = mpsc::channel();
let single_event_listener = EventSenderMpsc::new(0, single_event_sender); let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.target_id()); event_man.subscribe_single(&event_grp_0, single_event_listener.target_id());
event_man.add_sender(single_event_listener); event_man.add_sender(single_event_listener);
event_sender event_sender
.send((event_grp_0, Some(Params::Heapless((2_u32, 3_u32).into())))) .send(EventMessage::new_with_params(
TEST_COMPONENT_ID_0.id(),
event_grp_0,
&Params::Heapless((2_u32, 3_u32).into()),
))
.expect("Sending group error failed"); .expect("Sending group error failed");
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(&error_handler);
assert!(res.is_ok()); check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
check_handled_event(res.unwrap(), event_grp_0, 1);
let aux = check_next_event(event_grp_0, &single_event_receiver); let aux = check_next_event(event_grp_0, &single_event_receiver);
assert!(aux.is_some()); assert!(aux.is_some());
let aux = aux.unwrap(); let aux = aux.unwrap();
@@ -631,15 +701,16 @@ mod tests {
/// Test listening for multiple groups /// Test listening for multiple groups
#[test] #[test]
fn test_multi_group() { fn test_multi_group() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man(); let (event_sender, mut event_man) = generic_event_man();
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(error_handler);
assert!(res.is_ok()); assert!(matches!(res, EventRoutingResult::Empty));
let hres = res.unwrap();
assert!(matches!(hres, EventRoutingResult::Empty));
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap(); let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); let event_grp_1_0 = EventU32::new(Severity::High, 1, 0);
let (event_grp_0_sender, event_grp_0_receiver) = channel(); let (event_grp_0_sender, event_grp_0_receiver) = mpsc::channel();
let event_grp_0_and_1_listener = EventU32SenderMpsc::new(0, event_grp_0_sender); let event_grp_0_and_1_listener = EventU32SenderMpsc::new(0, event_grp_0_sender);
event_man.subscribe_group( event_man.subscribe_group(
event_grp_0.group_id(), event_grp_0.group_id(),
@@ -652,17 +723,15 @@ mod tests {
event_man.add_sender(event_grp_0_and_1_listener); event_man.add_sender(event_grp_0_and_1_listener);
event_sender event_sender
.send((event_grp_0, None)) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0))
.expect("Sending Event Group 0 failed"); .expect("Sending Event Group 0 failed");
event_sender event_sender
.send((event_grp_1_0, None)) .send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0))
.expect("Sendign Event Group 1 failed"); .expect("Sendign Event Group 1 failed");
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(error_handler);
assert!(res.is_ok()); check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
check_handled_event(res.unwrap(), event_grp_0, 1); let res = event_man.try_event_handling(error_handler);
let res = event_man.try_event_handling(); check_handled_event(res, event_grp_1_0, 1, TEST_COMPONENT_ID_1.id());
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_grp_1_0, 1);
check_next_event(event_grp_0, &event_grp_0_receiver); check_next_event(event_grp_0, &event_grp_0_receiver);
check_next_event(event_grp_1_0, &event_grp_0_receiver); check_next_event(event_grp_1_0, &event_grp_0_receiver);
@@ -672,11 +741,14 @@ mod tests {
/// to both group and single events from one listener /// to both group and single events from one listener
#[test] #[test]
fn test_listening_to_same_event_and_multi_type() { fn test_listening_to_same_event_and_multi_type() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man(); let (event_sender, mut event_man) = generic_event_man();
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap(); let event_0 = EventU32::new(Severity::Info, 0, 5);
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); let event_1 = EventU32::new(Severity::High, 1, 0);
let (event_0_tx_0, event_0_rx_0) = channel(); let (event_0_tx_0, event_0_rx_0) = mpsc::channel();
let (event_0_tx_1, event_0_rx_1) = channel(); let (event_0_tx_1, event_0_rx_1) = mpsc::channel();
let event_listener_0 = EventU32SenderMpsc::new(0, event_0_tx_0); let event_listener_0 = EventU32SenderMpsc::new(0, event_0_tx_0);
let event_listener_1 = EventU32SenderMpsc::new(1, event_0_tx_1); let event_listener_1 = EventU32SenderMpsc::new(1, event_0_tx_1);
let event_listener_0_sender_id = event_listener_0.target_id(); let event_listener_0_sender_id = event_listener_0.target_id();
@@ -686,28 +758,25 @@ mod tests {
event_man.subscribe_single(&event_0, event_listener_1_sender_id); event_man.subscribe_single(&event_0, event_listener_1_sender_id);
event_man.add_sender(event_listener_1); event_man.add_sender(event_listener_1);
event_sender event_sender
.send((event_0, None)) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering Event 0 failed"); .expect("Triggering Event 0 failed");
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(error_handler);
assert!(res.is_ok()); check_handled_event(res, event_0, 2, TEST_COMPONENT_ID_0.id());
check_handled_event(res.unwrap(), event_0, 2);
check_next_event(event_0, &event_0_rx_0); check_next_event(event_0, &event_0_rx_0);
check_next_event(event_0, &event_0_rx_1); check_next_event(event_0, &event_0_rx_1);
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id); event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
event_sender event_sender
.send((event_0, None)) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering Event 0 failed"); .expect("Triggering Event 0 failed");
event_sender event_sender
.send((event_1, None)) .send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1))
.expect("Triggering Event 1 failed"); .expect("Triggering Event 1 failed");
// 3 Events messages will be sent now // 3 Events messages will be sent now
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(error_handler);
assert!(res.is_ok()); check_handled_event(res, event_0, 2, TEST_COMPONENT_ID_0.id());
check_handled_event(res.unwrap(), event_0, 2); let res = event_man.try_event_handling(error_handler);
let res = event_man.try_event_handling(); check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_1.id());
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_1, 1);
// Both the single event and the group event should arrive now // Both the single event and the group event should arrive now
check_next_event(event_0, &event_0_rx_0); check_next_event(event_0, &event_0_rx_0);
check_next_event(event_1, &event_0_rx_0); check_next_event(event_1, &event_0_rx_0);
@@ -716,36 +785,35 @@ mod tests {
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id); event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
event_man.remove_duplicates(&ListenerKey::Group(event_1.group_id())); event_man.remove_duplicates(&ListenerKey::Group(event_1.group_id()));
event_sender event_sender
.send((event_1, None)) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_1))
.expect("Triggering Event 1 failed"); .expect("Triggering Event 1 failed");
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(error_handler);
assert!(res.is_ok()); check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_0.id());
check_handled_event(res.unwrap(), event_1, 1);
} }
#[test] #[test]
fn test_all_events_listener() { fn test_all_events_listener() {
let (event_sender, manager_queue) = channel(); let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
let event_man_receiver = MpscEventReceiver::new(manager_queue); panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
let mut event_man = EventManagerWithMpsc::new(event_man_receiver); };
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap(); let (event_sender, event_receiver) = mpsc::channel();
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap(); let mut event_man = EventManagerWithMpsc::new(event_receiver);
let (event_0_tx_0, all_events_rx) = channel(); let event_0 = EventU32::new(Severity::Info, 0, 5);
let event_1 = EventU32::new(Severity::High, 1, 0);
let (event_0_tx_0, all_events_rx) = mpsc::channel();
let all_events_listener = EventU32SenderMpsc::new(0, event_0_tx_0); let all_events_listener = EventU32SenderMpsc::new(0, event_0_tx_0);
event_man.subscribe_all(all_events_listener.target_id()); event_man.subscribe_all(all_events_listener.target_id());
event_man.add_sender(all_events_listener); event_man.add_sender(all_events_listener);
event_sender event_sender
.send((event_0, None)) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering event 0 failed"); .expect("Triggering event 0 failed");
event_sender event_sender
.send((event_1, None)) .send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1))
.expect("Triggering event 1 failed"); .expect("Triggering event 1 failed");
let res = event_man.try_event_handling(); let res = event_man.try_event_handling(error_handler);
assert!(res.is_ok()); check_handled_event(res, event_0, 1, TEST_COMPONENT_ID_0.id());
check_handled_event(res.unwrap(), event_0, 1); let res = event_man.try_event_handling(error_handler);
let res = event_man.try_event_handling(); check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_1.id());
assert!(res.is_ok());
check_handled_event(res.unwrap(), event_1, 1);
check_next_event(event_0, &all_events_rx); check_next_event(event_0, &all_events_rx);
check_next_event(event_1, &all_events_rx); check_next_event(event_1, &all_events_rx);
} }
@@ -755,15 +823,15 @@ mod tests {
let (event_sender, _event_receiver) = mpsc::sync_channel(3); let (event_sender, _event_receiver) = mpsc::sync_channel(3);
let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3); let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3);
event_sender event_sender
.send_no_data(TEST_EVENT) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed"); .expect("sending test event failed");
event_sender event_sender
.send_no_data(TEST_EVENT) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed"); .expect("sending test event failed");
event_sender event_sender
.send_no_data(TEST_EVENT) .send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed"); .expect("sending test event failed");
let error = event_sender.send_no_data(TEST_EVENT); let error = event_sender.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT));
if let Err(e) = error { if let Err(e) = error {
assert!(matches!(e, GenericSendError::QueueFull(Some(3)))); assert!(matches!(e, GenericSendError::QueueFull(Some(3))));
} else { } else {
@@ -775,7 +843,7 @@ mod tests {
let (event_sender, event_receiver) = mpsc::sync_channel(3); let (event_sender, event_receiver) = mpsc::sync_channel(3);
let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3); let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3);
drop(event_receiver); drop(event_receiver);
if let Err(e) = event_sender.send_no_data(TEST_EVENT) { if let Err(e) = event_sender.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT)) {
assert!(matches!(e, GenericSendError::RxDisconnected)); assert!(matches!(e, GenericSendError::RxDisconnected));
} else { } else {
panic!("Expected error"); panic!("Expected error");

View File

@@ -20,12 +20,12 @@
//! ``` //! ```
//! use satrs::events::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo}; //! use satrs::events::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
//! //!
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(1, 0); //! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(1, 0);
//! const MSG_FAILED: EventU32 = EventU32::const_new(Severity::LOW, 1, 1); //! const MSG_FAILED: EventU32 = EventU32::new(Severity::Low, 1, 1);
//! //!
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::const_new(2, 0); //! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(2, 0);
//! //!
//! let small_event = EventU16::new(Severity::INFO, 3, 0); //! let small_event = EventU16::new(Severity::Info, 3, 0);
//! ``` //! ```
use core::fmt::Debug; use core::fmt::Debug;
use core::hash::Hash; use core::hash::Hash;
@@ -40,12 +40,17 @@ pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily /// Using a type definition allows to change this to u32 in the future more easily
pub type LargestGroupIdRaw = u16; pub type LargestGroupIdRaw = u16;
pub const MAX_GROUP_ID_U32_EVENT: u16 = 2_u16.pow(14) - 1;
pub const MAX_GROUP_ID_U16_EVENT: u16 = 2_u16.pow(6) - 1;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)] #[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Severity { pub enum Severity {
INFO = 0, Info = 0,
LOW = 1, Low = 1,
MEDIUM = 2, Medium = 2,
HIGH = 3, High = 3,
} }
pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone { pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
@@ -56,31 +61,31 @@ pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityInfo {} pub struct SeverityInfo {}
impl HasSeverity for SeverityInfo { impl HasSeverity for SeverityInfo {
const SEVERITY: Severity = Severity::INFO; const SEVERITY: Severity = Severity::Info;
} }
/// Type level support struct /// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityLow {} pub struct SeverityLow {}
impl HasSeverity for SeverityLow { impl HasSeverity for SeverityLow {
const SEVERITY: Severity = Severity::LOW; const SEVERITY: Severity = Severity::Low;
} }
/// Type level support struct /// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityMedium {} pub struct SeverityMedium {}
impl HasSeverity for SeverityMedium { impl HasSeverity for SeverityMedium {
const SEVERITY: Severity = Severity::MEDIUM; const SEVERITY: Severity = Severity::Medium;
} }
/// Type level support struct /// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)] #[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityHigh {} pub struct SeverityHigh {}
impl HasSeverity for SeverityHigh { impl HasSeverity for SeverityHigh {
const SEVERITY: Severity = Severity::HIGH; const SEVERITY: Severity = Severity::High;
} }
pub trait GenericEvent: EcssEnumeration { pub trait GenericEvent: EcssEnumeration + Copy + Clone {
type Raw; type Raw;
type GroupId; type GroupId;
type UniqueId; type UniqueId;
@@ -99,27 +104,29 @@ impl TryFrom<u8> for Severity {
fn try_from(value: u8) -> Result<Self, Self::Error> { fn try_from(value: u8) -> Result<Self, Self::Error> {
match value { match value {
x if x == Severity::INFO as u8 => Ok(Severity::INFO), x if x == Severity::Info as u8 => Ok(Severity::Info),
x if x == Severity::LOW as u8 => Ok(Severity::LOW), x if x == Severity::Low as u8 => Ok(Severity::Low),
x if x == Severity::MEDIUM as u8 => Ok(Severity::MEDIUM), x if x == Severity::Medium as u8 => Ok(Severity::Medium),
x if x == Severity::HIGH as u8 => Ok(Severity::HIGH), x if x == Severity::High as u8 => Ok(Severity::High),
_ => Err(()), _ => Err(()),
} }
} }
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
struct EventBase<RAW, GID, UID> { #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
struct EventBase<Raw, GroupId, UniqueId> {
severity: Severity, severity: Severity,
group_id: GID, group_id: GroupId,
unique_id: UID, unique_id: UniqueId,
phantom: PhantomData<RAW>, phantom: PhantomData<Raw>,
} }
impl<RAW: ToBeBytes, GID, UID> EventBase<RAW, GID, UID> { impl<Raw: ToBeBytes, GroupId, UniqueId> EventBase<Raw, GroupId, UniqueId> {
fn write_to_bytes( fn write_to_bytes(
&self, &self,
raw: RAW, raw: Raw,
buf: &mut [u8], buf: &mut [u8],
width: usize, width: usize,
) -> Result<usize, ByteConversionError> { ) -> Result<usize, ByteConversionError> {
@@ -267,6 +274,7 @@ macro_rules! const_from_fn {
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct EventU32 { pub struct EventU32 {
base: EventBase<u32, u16, u16>, base: EventBase<u32, u16, u16>,
} }
@@ -309,12 +317,12 @@ impl EventU32 {
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF. /// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the /// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID /// raw event ID
pub fn new( pub fn new_checked(
severity: Severity, severity: Severity,
group_id: <Self as GenericEvent>::GroupId, group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId, unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> { ) -> Option<Self> {
if group_id > (2u16.pow(14) - 1) { if group_id > MAX_GROUP_ID_U32_EVENT {
return None; return None;
} }
Some(Self { Some(Self {
@@ -326,12 +334,14 @@ impl EventU32 {
}, },
}) })
} }
pub const fn const_new(
/// This constructor will panic if the passed group is is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
severity: Severity, severity: Severity,
group_id: <Self as GenericEvent>::GroupId, group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId, unique_id: <Self as GenericEvent>::UniqueId,
) -> Self { ) -> Self {
if group_id > (2u16.pow(14) - 1) { if group_id > MAX_GROUP_ID_U32_EVENT {
panic!("Group ID too large"); panic!("Group ID too large");
} }
Self { Self {
@@ -344,50 +354,16 @@ impl EventU32 {
} }
} }
pub fn from_be_bytes(bytes: [u8; 4]) -> Self {
Self::from(u32::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo); const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow); const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium); const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh); const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
} }
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// Const version of [Self::new], but panics on invalid group ID input values.
pub const fn const_new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::const_new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::const_new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
impl From<u32> for EventU32 { impl From<u32> for EventU32 {
fn from(raw: u32) -> Self { fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail // Severity conversion from u8 should never fail
@@ -395,15 +371,10 @@ impl From<u32> for EventU32 {
let group_id = ((raw >> 16) & 0x3FFF) as u16; let group_id = ((raw >> 16) & 0x3FFF) as u16;
let unique_id = (raw & 0xFFFF) as u16; let unique_id = (raw & 0xFFFF) as u16;
// Sanitized input, should never fail // Sanitized input, should never fail
Self::const_new(severity, group_id, unique_id) Self::new(severity, group_id, unique_id)
} }
} }
try_from_impls!(SeverityInfo, Severity::INFO, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::LOW, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::MEDIUM, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::HIGH, u32, EventU32TypedSev);
impl UnsignedEnum for EventU32 { impl UnsignedEnum for EventU32 {
fn size(&self) -> usize { fn size(&self) -> usize {
core::mem::size_of::<u32>() core::mem::size_of::<u32>()
@@ -424,6 +395,49 @@ impl EcssEnumeration for EventU32 {
} }
} }
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
try_from_impls!(SeverityInfo, Severity::Info, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u32, EventU32TypedSev);
//noinspection RsTraitImplementation //noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> { impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
delegate!(to self.event { delegate!(to self.event {
@@ -441,6 +455,8 @@ impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EventU16 { pub struct EventU16 {
base: EventBase<u16, u8, u8>, base: EventBase<u16, u8, u8>,
} }
@@ -475,7 +491,7 @@ impl EventU16 {
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F. /// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the /// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID /// raw event ID
pub fn new( pub fn new_checked(
severity: Severity, severity: Severity,
group_id: <Self as GenericEvent>::GroupId, group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId, unique_id: <Self as GenericEvent>::UniqueId,
@@ -493,8 +509,8 @@ impl EventU16 {
}) })
} }
/// Const version of [Self::new], but panics on invalid group ID input values. /// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn const_new( pub const fn new(
severity: Severity, severity: Severity,
group_id: <Self as GenericEvent>::GroupId, group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId, unique_id: <Self as GenericEvent>::UniqueId,
@@ -511,52 +527,26 @@ impl EventU16 {
}, },
} }
} }
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo); const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow); const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium); const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh); const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
} }
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> { impl From<u16> for EventU16 {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to fn from(raw: <Self as GenericEvent>::Raw) -> Self {
/// have distinct types for events with different severities
pub fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// Const version of [Self::new], but panics on invalid group ID input values.
pub const fn const_new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::const_new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap(); let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected { let group_id = ((raw >> 8) & 0x3F) as u8;
return Err(severity); let unique_id = (raw & 0xFF) as u8;
} // Sanitized input, new call should never fail
Ok(Self::const_new( Self::new(severity, group_id, unique_id)
((raw >> 8) & 0x3F) as u8,
(raw & 0xFF) as u8,
))
} }
} }
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
impl UnsignedEnum for EventU16 { impl UnsignedEnum for EventU16 {
fn size(&self) -> usize { fn size(&self) -> usize {
core::mem::size_of::<u16>() core::mem::size_of::<u16>()
@@ -577,6 +567,43 @@ impl EcssEnumeration for EventU16 {
} }
} }
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(((raw >> 8) & 0x3F) as u8, (raw & 0xFF) as u8))
}
}
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
//noinspection RsTraitImplementation //noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> { impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
delegate!(to self.event { delegate!(to self.event {
@@ -593,20 +620,10 @@ impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
}); });
} }
impl From<u16> for EventU16 { try_from_impls!(SeverityInfo, Severity::Info, u16, EventU16TypedSev);
fn from(raw: <Self as GenericEvent>::Raw) -> Self { try_from_impls!(SeverityLow, Severity::Low, u16, EventU16TypedSev);
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap(); try_from_impls!(SeverityMedium, Severity::Medium, u16, EventU16TypedSev);
let group_id = ((raw >> 8) & 0x3F) as u8; try_from_impls!(SeverityHigh, Severity::High, u16, EventU16TypedSev);
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::const_new(severity, group_id, unique_id)
}
}
try_from_impls!(SeverityInfo, Severity::INFO, u16, EventU16TypedSev);
try_from_impls!(SeverityLow, Severity::LOW, u16, EventU16TypedSev);
try_from_impls!(SeverityMedium, Severity::MEDIUM, u16, EventU16TypedSev);
try_from_impls!(SeverityHigh, Severity::HIGH, u16, EventU16TypedSev);
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> { impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
#[inline] #[inline]
@@ -647,12 +664,10 @@ mod tests {
assert_eq!(size_of::<T>(), val); assert_eq!(size_of::<T>(), val);
} }
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0); const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::const_new(0, 0); const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::new(0, 0);
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> = const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(0x3FFF, 0xFFFF);
EventU32TypedSev::const_new(0x3FFF, 0xFFFF); const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> = EventU16TypedSev::new(0x3F, 0xff);
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> =
EventU16TypedSev::const_new(0x3F, 0xff);
/// This working is a test in itself. /// This working is a test in itself.
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT); const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
@@ -683,7 +698,7 @@ mod tests {
#[test] #[test]
fn test_normal_event_getters() { fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::INFO); assert_eq!(INFO_EVENT.severity(), Severity::Info);
assert_eq!(INFO_EVENT.unique_id(), 0); assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0); assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw(); let raw_event = INFO_EVENT.raw();
@@ -692,7 +707,7 @@ mod tests {
#[test] #[test]
fn test_small_event_getters() { fn test_small_event_getters() {
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::INFO); assert_eq!(INFO_EVENT_SMALL.severity(), Severity::Info);
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0); assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
assert_eq!(INFO_EVENT_SMALL.group_id(), 0); assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw(); let raw_event = INFO_EVENT_SMALL.raw();
@@ -701,7 +716,7 @@ mod tests {
#[test] #[test]
fn all_ones_event_regular() { fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::HIGH); assert_eq!(HIGH_SEV_EVENT.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF); assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF); assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw(); let raw_event = HIGH_SEV_EVENT.raw();
@@ -710,7 +725,7 @@ mod tests {
#[test] #[test]
fn all_ones_event_small() { fn all_ones_event_small() {
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::HIGH); assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F); assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF); assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
let raw_event = HIGH_SEV_EVENT_SMALL.raw(); let raw_event = HIGH_SEV_EVENT_SMALL.raw();
@@ -719,18 +734,19 @@ mod tests {
#[test] #[test]
fn invalid_group_id_normal() { fn invalid_group_id_normal() {
assert!(EventU32TypedSev::<SeverityMedium>::new(2_u16.pow(14), 0).is_none()); assert!(EventU32TypedSev::<SeverityMedium>::new_checked(2_u16.pow(14), 0).is_none());
} }
#[test] #[test]
fn invalid_group_id_small() { fn invalid_group_id_small() {
assert!(EventU16TypedSev::<SeverityMedium>::new(2_u8.pow(6), 0).is_none()); assert!(EventU16TypedSev::<SeverityMedium>::new_checked(2_u8.pow(6), 0).is_none());
} }
#[test] #[test]
fn regular_new() { fn regular_new() {
assert_eq!( assert_eq!(
EventU32TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"), EventU32TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT INFO_EVENT
); );
} }
@@ -738,7 +754,8 @@ mod tests {
#[test] #[test]
fn small_new() { fn small_new() {
assert_eq!( assert_eq!(
EventU16TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"), EventU16TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT_SMALL INFO_EVENT_SMALL
); );
} }
@@ -777,6 +794,8 @@ mod tests {
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok()); assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf); let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF); assert_eq!(val_from_raw, 0xFFFFFFFF);
let event_read_back = EventU32::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT);
} }
#[test] #[test]
@@ -785,6 +804,8 @@ mod tests {
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok()); assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf); let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF); assert_eq!(val_from_raw, 0xFFFF);
let event_read_back = EventU16::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT_SMALL);
} }
#[test] #[test]
@@ -815,13 +836,13 @@ mod tests {
fn severity_from_invalid_raw_val() { fn severity_from_invalid_raw_val() {
let invalid = 0xFF; let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err()); assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::HIGH as u8 + 1; let invalid = Severity::High as u8 + 1;
assert!(Severity::try_from(invalid).is_err()); assert!(Severity::try_from(invalid).is_err());
} }
#[test] #[test]
fn reduction() { fn reduction() {
let event = EventU32TypedSev::<SeverityInfo>::const_new(1, 1); let event = EventU32TypedSev::<SeverityInfo>::new(1, 1);
let raw = event.raw(); let raw = event.raw();
let reduced: EventU32 = event.into(); let reduced: EventU32 = event.into();
assert_eq!(reduced.group_id(), 1); assert_eq!(reduced.group_id(), 1);

View File

@@ -1,4 +1,3 @@
//! # Hardware Abstraction Layer module //! # Hardware Abstraction Layer module
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std; pub mod std;

View File

@@ -1,19 +1,25 @@
use alloc::sync::Arc;
use alloc::vec; use alloc::vec;
use cobs::encode; use cobs::encode;
use core::sync::atomic::AtomicBool;
use core::time::Duration;
use delegate::delegate; use delegate::delegate;
use mio::net::{TcpListener, TcpStream};
use std::io::Write; use std::io::Write;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::net::TcpListener;
use std::net::TcpStream;
use std::vec::Vec; use std::vec::Vec;
use crate::encoding::parse_buffer_for_cobs_encoded_packets; use crate::encoding::parse_buffer_for_cobs_encoded_packets;
use crate::tmtc::ReceivesTc; use crate::tmtc::PacketSenderRaw;
use crate::tmtc::TmPacketSource; use crate::tmtc::PacketSource;
use crate::hal::std::tcp_server::{ use crate::hal::std::tcp_server::{
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer, ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
}; };
use crate::ComponentId;
use super::tcp_server::HandledConnectionHandler;
use super::tcp_server::HandledConnectionInfo;
/// Concrete [TcpTcParser] implementation for the [TcpTmtcInCobsServer]. /// Concrete [TcpTcParser] implementation for the [TcpTmtcInCobsServer].
#[derive(Default)] #[derive(Default)]
@@ -23,14 +29,16 @@ impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for CobsTcParser {
fn handle_tc_parsing( fn handle_tc_parsing(
&mut self, &mut self,
tc_buffer: &mut [u8], tc_buffer: &mut [u8],
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized), sender_id: ComponentId,
conn_result: &mut ConnectionResult, tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized),
conn_result: &mut HandledConnectionInfo,
current_write_idx: usize, current_write_idx: usize,
next_write_idx: &mut usize, next_write_idx: &mut usize,
) -> Result<(), TcpTmtcError<TmError, TcError>> { ) -> Result<(), TcpTmtcError<TmError, TcError>> {
conn_result.num_received_tcs += parse_buffer_for_cobs_encoded_packets( conn_result.num_received_tcs += parse_buffer_for_cobs_encoded_packets(
&mut tc_buffer[..current_write_idx], &mut tc_buffer[..current_write_idx],
tc_receiver.upcast_mut(), sender_id,
tc_sender,
next_write_idx, next_write_idx,
) )
.map_err(|e| TcpTmtcError::TcError(e))?; .map_err(|e| TcpTmtcError::TcError(e))?;
@@ -57,8 +65,8 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
fn handle_tm_sending( fn handle_tm_sending(
&mut self, &mut self,
tm_buffer: &mut [u8], tm_buffer: &mut [u8],
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized), tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
conn_result: &mut ConnectionResult, conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream, stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>> { ) -> Result<bool, TcpTmtcError<TmError, TcError>> {
let mut tm_was_sent = false; let mut tm_was_sent = false;
@@ -96,7 +104,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
/// Telemetry will be encoded with the COBS protocol using [cobs::encode] in addition to being /// Telemetry will be encoded with the COBS protocol using [cobs::encode] in addition to being
/// wrapped with the sentinel value 0 as the packet delimiter as well before being sent back to /// wrapped with the sentinel value 0 as the packet delimiter as well before being sent back to
/// the client. Please note that the server will send as much data as it can retrieve from the /// the client. Please note that the server will send as much data as it can retrieve from the
/// [TmPacketSource] in its current implementation. /// [PacketSource] in its current implementation.
/// ///
/// Using a framing protocol like COBS imposes minimal restrictions on the type of TMTC data /// Using a framing protocol like COBS imposes minimal restrictions on the type of TMTC data
/// exchanged while also allowing packets with flexible size and a reliable way to reconstruct full /// exchanged while also allowing packets with flexible size and a reliable way to reconstruct full
@@ -110,21 +118,30 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs) /// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// test also serves as the example application for this module. /// test also serves as the example application for this module.
pub struct TcpTmtcInCobsServer< pub struct TcpTmtcInCobsServer<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = SendError>,
HandledConnection: HandledConnectionHandler,
TmError, TmError,
TcError: 'static, SendError: 'static,
TmSource: TmPacketSource<Error = TmError>,
TcReceiver: ReceivesTc<Error = TcError>,
> { > {
generic_server: pub generic_server: TcpTmtcGenericServer<
TcpTmtcGenericServer<TmError, TcError, TmSource, TcReceiver, CobsTmSender, CobsTcParser>, TmSource,
TcSender,
CobsTmSender,
CobsTcParser,
HandledConnection,
TmError,
SendError,
>,
} }
impl< impl<
TmSource: PacketSource<Error = TmError>,
TcReceiver: PacketSenderRaw<Error = TcError>,
HandledConnection: HandledConnectionHandler,
TmError: 'static, TmError: 'static,
TcError: 'static, TcError: 'static,
TmSource: TmPacketSource<Error = TmError>, > TcpTmtcInCobsServer<TmSource, TcReceiver, HandledConnection, TmError, TcError>
TcReceiver: ReceivesTc<Error = TcError>,
> TcpTmtcInCobsServer<TmError, TcError, TmSource, TcReceiver>
{ {
/// Create a new TCP TMTC server which exchanges TMTC packets encoded with /// Create a new TCP TMTC server which exchanges TMTC packets encoded with
/// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing). /// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing).
@@ -140,6 +157,8 @@ impl<
cfg: ServerConfig, cfg: ServerConfig,
tm_source: TmSource, tm_source: TmSource,
tc_receiver: TcReceiver, tc_receiver: TcReceiver,
handled_connection: HandledConnection,
stop_signal: Option<Arc<AtomicBool>>,
) -> Result<Self, std::io::Error> { ) -> Result<Self, std::io::Error> {
Ok(Self { Ok(Self {
generic_server: TcpTmtcGenericServer::new( generic_server: TcpTmtcGenericServer::new(
@@ -148,6 +167,8 @@ impl<
CobsTmSender::new(cfg.tm_buffer_size), CobsTmSender::new(cfg.tm_buffer_size),
tm_source, tm_source,
tc_receiver, tc_receiver,
handled_connection,
stop_signal,
)?, )?,
}) })
} }
@@ -160,9 +181,10 @@ impl<
/// useful if using the port number 0 for OS auto-assignment. /// useful if using the port number 0 for OS auto-assignment.
pub fn local_addr(&self) -> std::io::Result<SocketAddr>; pub fn local_addr(&self) -> std::io::Result<SocketAddr>;
/// Delegation to the [TcpTmtcGenericServer::handle_next_connection] call. /// Delegation to the [TcpTmtcGenericServer::handle_all_connections] call.
pub fn handle_next_connection( pub fn handle_all_connections(
&mut self, &mut self,
poll_duration: Option<Duration>,
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>; ) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
} }
} }
@@ -177,21 +199,29 @@ mod tests {
use std::{ use std::{
io::{Read, Write}, io::{Read, Write},
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
panic,
sync::mpsc,
thread, thread,
time::Instant,
}; };
use crate::{ use crate::{
encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET}, encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET},
hal::std::tcp_server::{ hal::std::tcp_server::{
tests::{SyncTcCacher, SyncTmSource}, tests::{ConnectionFinishedHandler, SyncTmSource},
ServerConfig, ConnectionResult, ServerConfig,
}, },
queue::GenericSendError,
tmtc::PacketAsVec,
ComponentId,
}; };
use alloc::sync::Arc; use alloc::sync::Arc;
use cobs::encode; use cobs::encode;
use super::TcpTmtcInCobsServer; use super::TcpTmtcInCobsServer;
const TCP_SERVER_ID: ComponentId = 0x05;
fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) { fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
encode_packet(&SIMPLE_PACKET, encoded_buf, current_idx) encode_packet(&SIMPLE_PACKET, encoded_buf, current_idx)
} }
@@ -210,13 +240,22 @@ mod tests {
fn generic_tmtc_server( fn generic_tmtc_server(
addr: &SocketAddr, addr: &SocketAddr,
tc_receiver: SyncTcCacher, tc_sender: mpsc::Sender<PacketAsVec>,
tm_source: SyncTmSource, tm_source: SyncTmSource,
) -> TcpTmtcInCobsServer<(), (), SyncTmSource, SyncTcCacher> { stop_signal: Option<Arc<AtomicBool>>,
) -> TcpTmtcInCobsServer<
SyncTmSource,
mpsc::Sender<PacketAsVec>,
ConnectionFinishedHandler,
(),
GenericSendError,
> {
TcpTmtcInCobsServer::new( TcpTmtcInCobsServer::new(
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024), ServerConfig::new(TCP_SERVER_ID, *addr, Duration::from_millis(2), 1024, 1024),
tm_source, tm_source,
tc_receiver, tc_sender,
ConnectionFinishedHandler::default(),
stop_signal,
) )
.expect("TCP server generation failed") .expect("TCP server generation failed")
} }
@@ -224,9 +263,10 @@ mod tests {
#[test] #[test]
fn test_server_basic_no_tm() { fn test_server_basic_no_tm() {
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
let tc_receiver = SyncTcCacher::default(); let (tc_sender, tc_receiver) = mpsc::channel();
let tm_source = SyncTmSource::default(); let tm_source = SyncTmSource::default();
let mut tcp_server = generic_tmtc_server(&auto_port_addr, tc_receiver.clone(), tm_source); let mut tcp_server =
generic_tmtc_server(&auto_port_addr, tc_sender.clone(), tm_source, None);
let dest_addr = tcp_server let dest_addr = tcp_server
.local_addr() .local_addr()
.expect("retrieving dest addr failed"); .expect("retrieving dest addr failed");
@@ -234,13 +274,20 @@ mod tests {
let set_if_done = conn_handled.clone(); let set_if_done = conn_handled.clone();
// Call the connection handler in separate thread, does block. // Call the connection handler in separate thread, does block.
thread::spawn(move || { thread::spawn(move || {
let result = tcp_server.handle_next_connection(); let result = tcp_server.handle_all_connections(Some(Duration::from_millis(100)));
if result.is_err() { if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err()); panic!("handling connection failed: {:?}", result.unwrap_err());
} }
let conn_result = result.unwrap(); let result = result.unwrap();
assert_eq!(conn_result.num_received_tcs, 1); assert_eq!(result, ConnectionResult::HandledConnections(1));
assert_eq!(conn_result.num_sent_tms, 0); tcp_server
.generic_server
.finished_handler
.check_last_connection(0, 1);
tcp_server
.generic_server
.finished_handler
.check_no_connections_left();
set_if_done.store(true, Ordering::Relaxed); set_if_done.store(true, Ordering::Relaxed);
}); });
// Send TC to server now. // Send TC to server now.
@@ -262,24 +309,20 @@ mod tests {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
// Check that the packet was received and decoded successfully. // Check that the packet was received and decoded successfully.
let mut tc_queue = tc_receiver let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
.tc_queue assert_eq!(packet_with_sender.packet, &SIMPLE_PACKET);
.lock() matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
.expect("locking tc queue failed");
assert_eq!(tc_queue.len(), 1);
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
drop(tc_queue);
} }
#[test] #[test]
fn test_server_basic_multi_tm_multi_tc() { fn test_server_basic_multi_tm_multi_tc() {
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
let tc_receiver = SyncTcCacher::default(); let (tc_sender, tc_receiver) = mpsc::channel();
let mut tm_source = SyncTmSource::default(); let mut tm_source = SyncTmSource::default();
tm_source.add_tm(&INVERTED_PACKET); tm_source.add_tm(&INVERTED_PACKET);
tm_source.add_tm(&SIMPLE_PACKET); tm_source.add_tm(&SIMPLE_PACKET);
let mut tcp_server = let mut tcp_server =
generic_tmtc_server(&auto_port_addr, tc_receiver.clone(), tm_source.clone()); generic_tmtc_server(&auto_port_addr, tc_sender.clone(), tm_source.clone(), None);
let dest_addr = tcp_server let dest_addr = tcp_server
.local_addr() .local_addr()
.expect("retrieving dest addr failed"); .expect("retrieving dest addr failed");
@@ -287,13 +330,20 @@ mod tests {
let set_if_done = conn_handled.clone(); let set_if_done = conn_handled.clone();
// Call the connection handler in separate thread, does block. // Call the connection handler in separate thread, does block.
thread::spawn(move || { thread::spawn(move || {
let result = tcp_server.handle_next_connection(); let result = tcp_server.handle_all_connections(Some(Duration::from_millis(100)));
if result.is_err() { if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err()); panic!("handling connection failed: {:?}", result.unwrap_err());
} }
let conn_result = result.unwrap(); let result = result.unwrap();
assert_eq!(conn_result.num_received_tcs, 2, "Not enough TCs received"); assert_eq!(result, ConnectionResult::HandledConnections(1));
assert_eq!(conn_result.num_sent_tms, 2, "Not enough TMs received"); tcp_server
.generic_server
.finished_handler
.check_last_connection(2, 2);
tcp_server
.generic_server
.finished_handler
.check_no_connections_left();
set_if_done.store(true, Ordering::Relaxed); set_if_done.store(true, Ordering::Relaxed);
}); });
// Send TC to server now. // Send TC to server now.
@@ -367,13 +417,78 @@ mod tests {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
// Check that the packet was received and decoded successfully. // Check that the packet was received and decoded successfully.
let mut tc_queue = tc_receiver let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
.tc_queue let packet = &packet_with_sender.packet;
.lock() assert_eq!(packet, &SIMPLE_PACKET);
.expect("locking tc queue failed"); let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
assert_eq!(tc_queue.len(), 2); let packet = &packet_with_sender.packet;
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET); assert_eq!(packet, &INVERTED_PACKET);
assert_eq!(tc_queue.pop_front().unwrap(), &INVERTED_PACKET); matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
drop(tc_queue); }
#[test]
fn test_server_accept_timeout() {
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
let (tc_sender, _tc_receiver) = mpsc::channel();
let tm_source = SyncTmSource::default();
let mut tcp_server =
generic_tmtc_server(&auto_port_addr, tc_sender.clone(), tm_source, None);
let start = Instant::now();
// Call the connection handler in separate thread, does block.
let thread_jh = thread::spawn(move || loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
}
});
thread_jh.join().expect("thread join failed");
}
#[test]
fn test_server_stop_signal() {
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
let (tc_sender, _tc_receiver) = mpsc::channel();
let tm_source = SyncTmSource::default();
let stop_signal = Arc::new(AtomicBool::new(false));
let mut tcp_server = generic_tmtc_server(
&auto_port_addr,
tc_sender.clone(),
tm_source,
Some(stop_signal.clone()),
);
let dest_addr = tcp_server
.local_addr()
.expect("retrieving dest addr failed");
let stop_signal_copy = stop_signal.clone();
let start = Instant::now();
// Call the connection handler in separate thread, does block.
let thread_jh = thread::spawn(move || loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
panic!("unexpected accept timeout");
}
if stop_signal_copy.load(Ordering::Relaxed) {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
}
});
// We connect but do not do anything.
let _stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
stop_signal.store(true, Ordering::Relaxed);
// No need to drop the connection, the stop signal should take take of everything.
thread_jh.join().expect("thread join failed");
} }
} }

View File

@@ -1,21 +1,23 @@
//! Generic TCP TMTC servers with different TMTC format flavours. //! Generic TCP TMTC servers with different TMTC format flavours.
use alloc::sync::Arc;
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
use core::sync::atomic::AtomicBool;
use core::time::Duration; use core::time::Duration;
use mio::net::{TcpListener, TcpStream};
use mio::{Events, Interest, Poll, Token};
use socket2::{Domain, Socket, Type}; use socket2::{Domain, Socket, Type};
use std::io::Read; use std::io::{self, Read};
use std::net::TcpListener; use std::net::SocketAddr;
use std::net::{SocketAddr, TcpStream};
use std::thread; use std::thread;
use crate::tmtc::{ReceivesTc, TmPacketSource}; use crate::tmtc::{PacketSenderRaw, PacketSource};
use crate::ComponentId;
use thiserror::Error; use thiserror::Error;
// Re-export the TMTC in COBS server. // Re-export the TMTC in COBS server.
pub use crate::hal::std::tcp_cobs_server::{CobsTcParser, CobsTmSender, TcpTmtcInCobsServer}; pub use crate::hal::std::tcp_cobs_server::{CobsTcParser, CobsTmSender, TcpTmtcInCobsServer};
pub use crate::hal::std::tcp_spacepackets_server::{ pub use crate::hal::std::tcp_spacepackets_server::{SpacepacketsTmSender, TcpSpacepacketsServer};
SpacepacketsTcParser, SpacepacketsTmSender, TcpSpacepacketsServer,
};
/// Configuration struct for the generic TCP TMTC server /// Configuration struct for the generic TCP TMTC server
/// ///
@@ -25,7 +27,7 @@ pub use crate::hal::std::tcp_spacepackets_server::{
/// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or /// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or
/// no TM needs to be sent, the TCP server will delay for the specified amount of time /// no TM needs to be sent, the TCP server will delay for the specified amount of time
/// to reduce CPU load. /// to reduce CPU load.
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [TmPacketSource] and /// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [PacketSource] and
/// encoding of that data. This buffer should at large enough to hold the maximum expected /// encoding of that data. This buffer should at large enough to hold the maximum expected
/// TM size read from the packet source. /// TM size read from the packet source.
/// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from /// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from
@@ -41,6 +43,7 @@ pub use crate::hal::std::tcp_spacepackets_server::{
/// default. /// default.
#[derive(Debug, Copy, Clone)] #[derive(Debug, Copy, Clone)]
pub struct ServerConfig { pub struct ServerConfig {
pub id: ComponentId,
pub addr: SocketAddr, pub addr: SocketAddr,
pub inner_loop_delay: Duration, pub inner_loop_delay: Duration,
pub tm_buffer_size: usize, pub tm_buffer_size: usize,
@@ -51,18 +54,20 @@ pub struct ServerConfig {
impl ServerConfig { impl ServerConfig {
pub fn new( pub fn new(
id: ComponentId,
addr: SocketAddr, addr: SocketAddr,
inner_loop_delay: Duration, inner_loop_delay: Duration,
tm_buffer_size: usize, tm_buffer_size: usize,
tc_buffer_size: usize, tc_buffer_size: usize,
) -> Self { ) -> Self {
Self { Self {
id,
addr, addr,
inner_loop_delay, inner_loop_delay,
tm_buffer_size, tm_buffer_size,
tc_buffer_size, tc_buffer_size,
reuse_addr: false, reuse_addr: true,
reuse_port: false, reuse_port: true,
} }
} }
} }
@@ -79,37 +84,62 @@ pub enum TcpTmtcError<TmError, TcError> {
/// Result of one connection attempt. Contains the client address if a connection was established, /// Result of one connection attempt. Contains the client address if a connection was established,
/// in addition to the number of telecommands and telemetry packets exchanged. /// in addition to the number of telecommands and telemetry packets exchanged.
#[derive(Debug, Default)] #[derive(Debug, PartialEq, Eq)]
pub struct ConnectionResult { pub enum ConnectionResult {
pub addr: Option<SocketAddr>, AcceptTimeout,
HandledConnections(u32),
}
#[derive(Debug)]
pub struct HandledConnectionInfo {
pub addr: SocketAddr,
pub num_received_tcs: u32, pub num_received_tcs: u32,
pub num_sent_tms: u32, pub num_sent_tms: u32,
/// The generic TCP server can be stopped using an external signal. If this happened, this
/// boolean will be set to true.
pub stopped_by_signal: bool,
}
impl HandledConnectionInfo {
pub fn new(addr: SocketAddr) -> Self {
Self {
addr,
num_received_tcs: 0,
num_sent_tms: 0,
stopped_by_signal: false,
}
}
}
pub trait HandledConnectionHandler {
fn handled_connection(&mut self, info: HandledConnectionInfo);
} }
/// Generic parser abstraction for an object which can parse for telecommands given a raw /// Generic parser abstraction for an object which can parse for telecommands given a raw
/// bytestream received from a TCP socket and send them to a generic [ReceivesTc] telecommand /// bytestream received from a TCP socket and send them using a generic [PacketSenderRaw]
/// receiver. This allows different encoding schemes for telecommands. /// implementation. This allows different encoding schemes for telecommands.
pub trait TcpTcParser<TmError, TcError> { pub trait TcpTcParser<TmError, SendError> {
fn handle_tc_parsing( fn handle_tc_parsing(
&mut self, &mut self,
tc_buffer: &mut [u8], tc_buffer: &mut [u8],
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized), sender_id: ComponentId,
conn_result: &mut ConnectionResult, tc_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
conn_result: &mut HandledConnectionInfo,
current_write_idx: usize, current_write_idx: usize,
next_write_idx: &mut usize, next_write_idx: &mut usize,
) -> Result<(), TcpTmtcError<TmError, TcError>>; ) -> Result<(), TcpTmtcError<TmError, SendError>>;
} }
/// Generic sender abstraction for an object which can pull telemetry from a given TM source /// Generic sender abstraction for an object which can pull telemetry from a given TM source
/// using a [TmPacketSource] and then send them back to a client using a given [TcpStream]. /// using a [PacketSource] and then send them back to a client using a given [TcpStream].
/// The concrete implementation can also perform any encoding steps which are necessary before /// The concrete implementation can also perform any encoding steps which are necessary before
/// sending back the data to a client. /// sending back the data to a client.
pub trait TcpTmSender<TmError, TcError> { pub trait TcpTmSender<TmError, TcError> {
fn handle_tm_sending( fn handle_tm_sending(
&mut self, &mut self,
tm_buffer: &mut [u8], tm_buffer: &mut [u8],
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized), tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
conn_result: &mut ConnectionResult, conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream, stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>>; ) -> Result<bool, TcpTmtcError<TmError, TcError>>;
} }
@@ -121,9 +151,9 @@ pub trait TcpTmSender<TmError, TcError> {
/// through the following 4 core abstractions: /// through the following 4 core abstractions:
/// ///
/// 1. [TcpTcParser] to parse for telecommands from the raw bytestream received from a client. /// 1. [TcpTcParser] to parse for telecommands from the raw bytestream received from a client.
/// 2. Parsed telecommands will be sent to the [ReceivesTc] telecommand receiver. /// 2. Parsed telecommands will be sent using the [PacketSenderRaw] object.
/// 3. [TcpTmSender] to send telemetry pulled from a TM source back to the client. /// 3. [TcpTmSender] to send telemetry pulled from a TM source back to the client.
/// 4. [TmPacketSource] as a generic TM source used by the [TcpTmSender]. /// 4. [PacketSource] as a generic TM source used by the [TcpTmSender].
/// ///
/// It is possible to specify custom abstractions to build a dedicated TCP TMTC server without /// It is possible to specify custom abstractions to build a dedicated TCP TMTC server without
/// having to re-implement common logic. /// having to re-implement common logic.
@@ -131,32 +161,49 @@ pub trait TcpTmSender<TmError, TcError> {
/// Currently, this framework offers the following concrete implementations: /// Currently, this framework offers the following concrete implementations:
/// ///
/// 1. [TcpTmtcInCobsServer] to exchange TMTC wrapped inside the COBS framing protocol. /// 1. [TcpTmtcInCobsServer] to exchange TMTC wrapped inside the COBS framing protocol.
/// 2. [TcpSpacepacketsServer] to exchange space packets via TCP.
pub struct TcpTmtcGenericServer< pub struct TcpTmtcGenericServer<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcSendError>,
TmSender: TcpTmSender<TmError, TcSendError>,
TcParser: TcpTcParser<TmError, TcSendError>,
HandledConnection: HandledConnectionHandler,
TmError, TmError,
TcError, TcSendError,
TmSource: TmPacketSource<Error = TmError>,
TcReceiver: ReceivesTc<Error = TcError>,
TmSender: TcpTmSender<TmError, TcError>,
TcParser: TcpTcParser<TmError, TcError>,
> { > {
pub id: ComponentId,
pub finished_handler: HandledConnection,
pub(crate) listener: TcpListener, pub(crate) listener: TcpListener,
pub(crate) inner_loop_delay: Duration, pub(crate) inner_loop_delay: Duration,
pub(crate) tm_source: TmSource, pub(crate) tm_source: TmSource,
pub(crate) tm_buffer: Vec<u8>, pub(crate) tm_buffer: Vec<u8>,
pub(crate) tc_receiver: TcReceiver, pub(crate) tc_sender: TcSender,
pub(crate) tc_buffer: Vec<u8>, pub(crate) tc_buffer: Vec<u8>,
tc_handler: TcParser, poll: Poll,
tm_handler: TmSender, events: Events,
pub tc_handler: TcParser,
pub tm_handler: TmSender,
stop_signal: Option<Arc<AtomicBool>>,
} }
impl< impl<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcSendError>,
TmSender: TcpTmSender<TmError, TcSendError>,
TcParser: TcpTcParser<TmError, TcSendError>,
HandledConnection: HandledConnectionHandler,
TmError: 'static, TmError: 'static,
TcError: 'static, TcSendError: 'static,
TmSource: TmPacketSource<Error = TmError>, >
TcReceiver: ReceivesTc<Error = TcError>, TcpTmtcGenericServer<
TmSender: TcpTmSender<TmError, TcError>, TmSource,
TcParser: TcpTcParser<TmError, TcError>, TcSender,
> TcpTmtcGenericServer<TmError, TcError, TmSource, TcReceiver, TmSender, TcParser> TmSender,
TcParser,
HandledConnection,
TmError,
TcSendError,
>
{ {
/// Create a new generic TMTC server instance. /// Create a new generic TMTC server instance.
/// ///
@@ -168,32 +215,58 @@ impl<
/// * `tm_sender` - Sends back telemetry to the client using the specified TM source. /// * `tm_sender` - Sends back telemetry to the client using the specified TM source.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are /// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client. /// then sent back to the client.
/// * `tc_receiver` - Any received telecommand which was decoded successfully will be forwarded /// * `tc_sender` - Any received telecommand which was decoded successfully will be forwarded
/// to this TC receiver. /// using this TC sender.
/// * `stop_signal` - Can be used to stop the server even if a connection is ongoing.
pub fn new( pub fn new(
cfg: ServerConfig, cfg: ServerConfig,
tc_parser: TcParser, tc_parser: TcParser,
tm_sender: TmSender, tm_sender: TmSender,
tm_source: TmSource, tm_source: TmSource,
tc_receiver: TcReceiver, tc_receiver: TcSender,
finished_handler: HandledConnection,
stop_signal: Option<Arc<AtomicBool>>,
) -> Result<Self, std::io::Error> { ) -> Result<Self, std::io::Error> {
// Create a TCP listener bound to two addresses. // Create a TCP listener bound to two addresses.
let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?; let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?;
socket.set_reuse_address(cfg.reuse_addr)?; socket.set_reuse_address(cfg.reuse_addr)?;
#[cfg(unix)] #[cfg(unix)]
socket.set_reuse_port(cfg.reuse_port)?; socket.set_reuse_port(cfg.reuse_port)?;
// MIO does not do this for us. We want the accept calls to be non-blocking.
socket.set_nonblocking(true)?;
let addr = (cfg.addr).into(); let addr = (cfg.addr).into();
socket.bind(&addr)?; socket.bind(&addr)?;
socket.listen(128)?; socket.listen(128)?;
// Create a poll instance.
let poll = Poll::new()?;
// Create storage for events.
let events = Events::with_capacity(32);
let listener: std::net::TcpListener = socket.into();
let mut mio_listener = TcpListener::from_std(listener);
// Start listening for incoming connections.
poll.registry().register(
&mut mio_listener,
Token(0),
Interest::READABLE | Interest::WRITABLE,
)?;
Ok(Self { Ok(Self {
id: cfg.id,
tc_handler: tc_parser, tc_handler: tc_parser,
tm_handler: tm_sender, tm_handler: tm_sender,
listener: socket.into(), poll,
events,
listener: mio_listener,
inner_loop_delay: cfg.inner_loop_delay, inner_loop_delay: cfg.inner_loop_delay,
tm_source, tm_source,
tm_buffer: vec![0; cfg.tm_buffer_size], tm_buffer: vec![0; cfg.tm_buffer_size],
tc_receiver, tc_sender: tc_receiver,
tc_buffer: vec![0; cfg.tc_buffer_size], tc_buffer: vec![0; cfg.tc_buffer_size],
stop_signal,
finished_handler,
}) })
} }
@@ -208,11 +281,11 @@ impl<
self.listener.local_addr() self.listener.local_addr()
} }
/// This call is used to handle the next connection to a client. Right now, it performs /// This call is used to handle all connection from clients. Right now, it performs
/// the following steps: /// the following steps:
/// ///
/// 1. It calls the [std::net::TcpListener::accept] method internally using the blocking API /// 1. It calls the [std::net::TcpListener::accept] method until a client connects. An optional
/// until a client connects. /// timeout can be specified for non-blocking acceptance.
/// 2. It reads all the telecommands from the client and parses all received data using the /// 2. It reads all the telecommands from the client and parses all received data using the
/// user specified [TcpTcParser]. /// user specified [TcpTcParser].
/// 3. After reading and parsing all telecommands, it sends back all telemetry using the /// 3. After reading and parsing all telecommands, it sends back all telemetry using the
@@ -221,15 +294,66 @@ impl<
/// The server will delay for a user-specified period if the client connects to the server /// The server will delay for a user-specified period if the client connects to the server
/// for prolonged periods and there is no traffic for the server. This is the case if the /// for prolonged periods and there is no traffic for the server. This is the case if the
/// client does not send any telecommands and no telemetry needs to be sent back to the client. /// client does not send any telecommands and no telemetry needs to be sent back to the client.
pub fn handle_next_connection( pub fn handle_all_connections(
&mut self, &mut self,
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>> { poll_timeout: Option<Duration>,
let mut connection_result = ConnectionResult::default(); ) -> Result<ConnectionResult, TcpTmtcError<TmError, TcSendError>> {
let mut handled_connections = 0;
// Poll Mio for events.
self.poll.poll(&mut self.events, poll_timeout)?;
let mut acceptable_connection = false;
// Process each event.
for event in self.events.iter() {
if event.token() == Token(0) {
acceptable_connection = true;
} else {
// Should never happen..
panic!("unexpected TCP event token");
}
}
// I'd love to do this in the loop above, but there are issues with multiple borrows.
if acceptable_connection {
// There might be mutliple connections available. Accept until all of them have
// been handled.
loop {
match self.listener.accept() {
Ok((stream, addr)) => {
if let Err(e) = self.handle_accepted_connection(stream, addr) {
self.reregister_poll_interest()?;
return Err(e);
}
handled_connections += 1;
}
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break,
Err(err) => {
self.reregister_poll_interest()?;
return Err(TcpTmtcError::Io(err));
}
}
}
}
if handled_connections > 0 {
return Ok(ConnectionResult::HandledConnections(handled_connections));
}
Ok(ConnectionResult::AcceptTimeout)
}
fn reregister_poll_interest(&mut self) -> io::Result<()> {
self.poll.registry().reregister(
&mut self.listener,
Token(0),
Interest::READABLE | Interest::WRITABLE,
)
}
fn handle_accepted_connection(
&mut self,
mut stream: TcpStream,
addr: SocketAddr,
) -> Result<(), TcpTmtcError<TmError, TcSendError>> {
let mut current_write_idx; let mut current_write_idx;
let mut next_write_idx = 0; let mut next_write_idx = 0;
let (mut stream, addr) = self.listener.accept()?; let mut connection_result = HandledConnectionInfo::new(addr);
stream.set_nonblocking(true)?;
connection_result.addr = Some(addr);
current_write_idx = next_write_idx; current_write_idx = next_write_idx;
loop { loop {
let read_result = stream.read(&mut self.tc_buffer[current_write_idx..]); let read_result = stream.read(&mut self.tc_buffer[current_write_idx..]);
@@ -240,7 +364,8 @@ impl<
if current_write_idx > 0 { if current_write_idx > 0 {
self.tc_handler.handle_tc_parsing( self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer, &mut self.tc_buffer,
&mut self.tc_receiver, self.id,
&self.tc_sender,
&mut connection_result, &mut connection_result,
current_write_idx, current_write_idx,
&mut next_write_idx, &mut next_write_idx,
@@ -254,7 +379,8 @@ impl<
if current_write_idx == self.tc_buffer.capacity() { if current_write_idx == self.tc_buffer.capacity() {
self.tc_handler.handle_tc_parsing( self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer, &mut self.tc_buffer,
&mut self.tc_receiver, self.id,
&self.tc_sender,
&mut connection_result, &mut connection_result,
current_write_idx, current_write_idx,
&mut next_write_idx, &mut next_write_idx,
@@ -268,7 +394,8 @@ impl<
std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut => { std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut => {
self.tc_handler.handle_tc_parsing( self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer, &mut self.tc_buffer,
&mut self.tc_receiver, self.id,
&self.tc_sender,
&mut connection_result, &mut connection_result,
current_write_idx, current_write_idx,
&mut next_write_idx, &mut next_write_idx,
@@ -284,6 +411,18 @@ impl<
// No TC read, no TM was sent, but the client has not disconnected. // No TC read, no TM was sent, but the client has not disconnected.
// Perform an inner delay to avoid burning CPU time. // Perform an inner delay to avoid burning CPU time.
thread::sleep(self.inner_loop_delay); thread::sleep(self.inner_loop_delay);
// Optional stop signal handling.
if self.stop_signal.is_some()
&& self
.stop_signal
.as_ref()
.unwrap()
.load(std::sync::atomic::Ordering::Relaxed)
{
connection_result.stopped_by_signal = true;
self.finished_handler.handled_connection(connection_result);
return Ok(());
}
} }
} }
_ => { _ => {
@@ -298,7 +437,8 @@ impl<
&mut connection_result, &mut connection_result,
&mut stream, &mut stream,
)?; )?;
Ok(connection_result) self.finished_handler.handled_connection(connection_result);
Ok(())
} }
} }
@@ -308,21 +448,9 @@ pub(crate) mod tests {
use alloc::{collections::VecDeque, sync::Arc, vec::Vec}; use alloc::{collections::VecDeque, sync::Arc, vec::Vec};
use crate::tmtc::{ReceivesTcCore, TmPacketSourceCore}; use crate::tmtc::PacketSource;
#[derive(Default, Clone)] use super::*;
pub(crate) struct SyncTcCacher {
pub(crate) tc_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
}
impl ReceivesTcCore for SyncTcCacher {
type Error = ();
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut tc_queue = self.tc_queue.lock().expect("tc forwarder failed");
tc_queue.push_back(tc_raw.to_vec());
Ok(())
}
}
#[derive(Default, Clone)] #[derive(Default, Clone)]
pub(crate) struct SyncTmSource { pub(crate) struct SyncTmSource {
@@ -336,7 +464,7 @@ pub(crate) mod tests {
} }
} }
impl TmPacketSourceCore for SyncTmSource { impl PacketSource for SyncTmSource {
type Error = (); type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> { fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
@@ -356,4 +484,30 @@ pub(crate) mod tests {
Ok(0) Ok(0)
} }
} }
#[derive(Default)]
pub struct ConnectionFinishedHandler {
connection_info: VecDeque<HandledConnectionInfo>,
}
impl HandledConnectionHandler for ConnectionFinishedHandler {
fn handled_connection(&mut self, info: HandledConnectionInfo) {
self.connection_info.push_back(info);
}
}
impl ConnectionFinishedHandler {
pub fn check_last_connection(&mut self, num_tms: u32, num_tcs: u32) {
let last_conn_result = self
.connection_info
.pop_back()
.expect("no connection info available");
assert_eq!(last_conn_result.num_received_tcs, num_tcs);
assert_eq!(last_conn_result.num_sent_tms, num_tms);
}
pub fn check_no_connections_left(&self) {
assert!(self.connection_info.is_empty());
}
}
} }

View File

@@ -1,48 +1,44 @@
use alloc::sync::Arc;
use core::{sync::atomic::AtomicBool, time::Duration};
use delegate::delegate; use delegate::delegate;
use std::{ use mio::net::{TcpListener, TcpStream};
io::Write, use std::{io::Write, net::SocketAddr};
net::{SocketAddr, TcpListener, TcpStream},
};
use alloc::boxed::Box;
use crate::{ use crate::{
encoding::{ccsds::PacketIdLookup, parse_buffer_for_ccsds_space_packets}, encoding::{ccsds::SpacePacketValidator, parse_buffer_for_ccsds_space_packets},
tmtc::{ReceivesTc, TmPacketSource}, tmtc::{PacketSenderRaw, PacketSource},
ComponentId,
}; };
use super::tcp_server::{ use super::tcp_server::{
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer, ConnectionResult, HandledConnectionHandler, HandledConnectionInfo, ServerConfig, TcpTcParser,
TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
}; };
/// Concrete [TcpTcParser] implementation for the [TcpSpacepacketsServer]. impl<T: SpacePacketValidator, TmError, TcError: 'static> TcpTcParser<TmError, TcError> for T {
pub struct SpacepacketsTcParser {
packet_id_lookup: Box<dyn PacketIdLookup + Send>,
}
impl SpacepacketsTcParser {
pub fn new(packet_id_lookup: Box<dyn PacketIdLookup + Send>) -> Self {
Self { packet_id_lookup }
}
}
impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for SpacepacketsTcParser {
fn handle_tc_parsing( fn handle_tc_parsing(
&mut self, &mut self,
tc_buffer: &mut [u8], tc_buffer: &mut [u8],
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized), sender_id: ComponentId,
conn_result: &mut ConnectionResult, tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized),
conn_result: &mut HandledConnectionInfo,
current_write_idx: usize, current_write_idx: usize,
next_write_idx: &mut usize, next_write_idx: &mut usize,
) -> Result<(), TcpTmtcError<TmError, TcError>> { ) -> Result<(), TcpTmtcError<TmError, TcError>> {
// Reader vec full, need to parse for packets. // Reader vec full, need to parse for packets.
conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets( let parse_result = parse_buffer_for_ccsds_space_packets(
&mut tc_buffer[..current_write_idx], &tc_buffer[..current_write_idx],
self.packet_id_lookup.as_ref(), self,
tc_receiver.upcast_mut(), sender_id,
next_write_idx, tc_sender,
) )
.map_err(|e| TcpTmtcError::TcError(e))?; .map_err(|e| TcpTmtcError::TcError(e))?;
if let Some(broken_tail_start) = parse_result.incomplete_tail_start {
// Copy broken tail to front of buffer.
tc_buffer.copy_within(broken_tail_start..current_write_idx, 0);
*next_write_idx = current_write_idx - broken_tail_start;
}
conn_result.num_received_tcs += parse_result.packets_found;
Ok(()) Ok(())
} }
} }
@@ -55,8 +51,8 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
fn handle_tm_sending( fn handle_tm_sending(
&mut self, &mut self,
tm_buffer: &mut [u8], tm_buffer: &mut [u8],
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized), tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
conn_result: &mut ConnectionResult, conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream, stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>> { ) -> Result<bool, TcpTmtcError<TmError, TcError>> {
let mut tm_was_sent = false; let mut tm_was_sent = false;
@@ -83,35 +79,41 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
/// ///
/// This serves only works if /// This serves only works if
/// [CCSDS 133.0-B-2 space packets](https://public.ccsds.org/Pubs/133x0b2e1.pdf) are the only /// [CCSDS 133.0-B-2 space packets](https://public.ccsds.org/Pubs/133x0b2e1.pdf) are the only
/// packet type being exchanged. It uses the CCSDS [spacepackets::PacketId] as the packet delimiter /// packet type being exchanged. It uses the CCSDS space packet header [spacepackets::SpHeader] and
/// and start marker when parsing for packets. The user specifies a set of expected /// a user specified [SpacePacketValidator] to determine the space packets relevant for further
/// [spacepackets::PacketId]s as part of the server configuration for that purpose. /// processing.
/// ///
/// ## Example /// ## Example
///
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs) /// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// also serves as the example application for this module. /// also serves as the example application for this module.
pub struct TcpSpacepacketsServer< pub struct TcpSpacepacketsServer<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = SendError>,
Validator: SpacePacketValidator,
HandledConnection: HandledConnectionHandler,
TmError, TmError,
TcError: 'static, SendError: 'static,
TmSource: TmPacketSource<Error = TmError>,
TcReceiver: ReceivesTc<Error = TcError>,
> { > {
generic_server: TcpTmtcGenericServer< pub generic_server: TcpTmtcGenericServer<
TmError,
TcError,
TmSource, TmSource,
TcReceiver, TcSender,
SpacepacketsTmSender, SpacepacketsTmSender,
SpacepacketsTcParser, Validator,
HandledConnection,
TmError,
SendError,
>, >,
} }
impl< impl<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcError>,
Validator: SpacePacketValidator,
HandledConnection: HandledConnectionHandler,
TmError: 'static, TmError: 'static,
TcError: 'static, TcError: 'static,
TmSource: TmPacketSource<Error = TmError>, > TcpSpacepacketsServer<TmSource, TcSender, Validator, HandledConnection, TmError, TcError>
TcReceiver: ReceivesTc<Error = TcError>,
> TcpSpacepacketsServer<TmError, TcError, TmSource, TcReceiver>
{ {
/// ///
/// ## Parameter /// ## Parameter
@@ -119,23 +121,31 @@ impl<
/// * `cfg` - Configuration of the server. /// * `cfg` - Configuration of the server.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are /// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client. /// then sent back to the client.
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be /// * `tc_sender` - Any received telecommands which were decoded successfully will be
/// forwarded to this TC receiver. /// forwarded using this [PacketSenderRaw].
/// * `packet_id_lookup` - This lookup table contains the relevant packets IDs for packet /// * `validator` - Used to determine the space packets relevant for further processing and
/// parsing. This mechanism is used to have a start marker for finding CCSDS packets. /// to detect broken space packets.
/// * `handled_connection_hook` - Called to notify the user about a succesfully handled
/// connection.
/// * `stop_signal` - Can be used to shut down the TCP server even for longer running
/// connections.
pub fn new( pub fn new(
cfg: ServerConfig, cfg: ServerConfig,
tm_source: TmSource, tm_source: TmSource,
tc_receiver: TcReceiver, tc_sender: TcSender,
packet_id_lookup: Box<dyn PacketIdLookup + Send>, validator: Validator,
handled_connection_hook: HandledConnection,
stop_signal: Option<Arc<AtomicBool>>,
) -> Result<Self, std::io::Error> { ) -> Result<Self, std::io::Error> {
Ok(Self { Ok(Self {
generic_server: TcpTmtcGenericServer::new( generic_server: TcpTmtcGenericServer::new(
cfg, cfg,
SpacepacketsTcParser::new(packet_id_lookup), validator,
SpacepacketsTmSender::default(), SpacepacketsTmSender::default(),
tm_source, tm_source,
tc_receiver, tc_sender,
handled_connection_hook,
stop_signal,
)?, )?,
}) })
} }
@@ -148,9 +158,10 @@ impl<
/// useful if using the port number 0 for OS auto-assignment. /// useful if using the port number 0 for OS auto-assignment.
pub fn local_addr(&self) -> std::io::Result<SocketAddr>; pub fn local_addr(&self) -> std::io::Result<SocketAddr>;
/// Delegation to the [TcpTmtcGenericServer::handle_next_connection] call. /// Delegation to the [TcpTmtcGenericServer::handle_all_connections] call.
pub fn handle_next_connection( pub fn handle_all_connections(
&mut self, &mut self,
poll_timeout: Option<Duration>
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>; ) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
} }
} }
@@ -167,39 +178,70 @@ mod tests {
use std::{ use std::{
io::{Read, Write}, io::{Read, Write},
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream}, net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
sync::mpsc,
thread, thread,
}; };
use alloc::{boxed::Box, sync::Arc}; use alloc::sync::Arc;
use hashbrown::HashSet; use hashbrown::HashSet;
use spacepackets::{ use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket}, ecss::{tc::PusTcCreator, WritablePusPacket},
PacketId, SpHeader, CcsdsPacket, PacketId, SpHeader,
}; };
use crate::hal::std::tcp_server::{ use crate::{
tests::{SyncTcCacher, SyncTmSource}, encoding::ccsds::{SpValidity, SpacePacketValidator},
ServerConfig, hal::std::tcp_server::{
tests::{ConnectionFinishedHandler, SyncTmSource},
ConnectionResult, ServerConfig,
},
queue::GenericSendError,
tmtc::PacketAsVec,
ComponentId,
}; };
use super::TcpSpacepacketsServer; use super::TcpSpacepacketsServer;
const TCP_SERVER_ID: ComponentId = 0x05;
const TEST_APID_0: u16 = 0x02; const TEST_APID_0: u16 = 0x02;
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0); const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_APID_1: u16 = 0x10; const TEST_APID_1: u16 = 0x10;
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1); const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
#[derive(Default)]
pub struct SimpleValidator(pub HashSet<PacketId>);
impl SpacePacketValidator for SimpleValidator {
fn validate(&self, sp_header: &SpHeader, _raw_buf: &[u8]) -> SpValidity {
if self.0.contains(&sp_header.packet_id()) {
return SpValidity::Valid;
}
// Simple case: Assume that the interface always contains valid space packets.
SpValidity::Skip
}
}
fn generic_tmtc_server( fn generic_tmtc_server(
addr: &SocketAddr, addr: &SocketAddr,
tc_receiver: SyncTcCacher, tc_sender: mpsc::Sender<PacketAsVec>,
tm_source: SyncTmSource, tm_source: SyncTmSource,
packet_id_lookup: HashSet<PacketId>, validator: SimpleValidator,
) -> TcpSpacepacketsServer<(), (), SyncTmSource, SyncTcCacher> { stop_signal: Option<Arc<AtomicBool>>,
) -> TcpSpacepacketsServer<
SyncTmSource,
mpsc::Sender<PacketAsVec>,
SimpleValidator,
ConnectionFinishedHandler,
(),
GenericSendError,
> {
TcpSpacepacketsServer::new( TcpSpacepacketsServer::new(
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024), ServerConfig::new(TCP_SERVER_ID, *addr, Duration::from_millis(2), 1024, 1024),
tm_source, tm_source,
tc_receiver, tc_sender,
Box::new(packet_id_lookup), validator,
ConnectionFinishedHandler::default(),
stop_signal,
) )
.expect("TCP server generation failed") .expect("TCP server generation failed")
} }
@@ -207,15 +249,16 @@ mod tests {
#[test] #[test]
fn test_basic_tc_only() { fn test_basic_tc_only() {
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
let tc_receiver = SyncTcCacher::default(); let (tc_sender, tc_receiver) = mpsc::channel();
let tm_source = SyncTmSource::default(); let tm_source = SyncTmSource::default();
let mut packet_id_lookup = HashSet::new(); let mut validator = SimpleValidator::default();
packet_id_lookup.insert(TEST_PACKET_ID_0); validator.0.insert(TEST_PACKET_ID_0);
let mut tcp_server = generic_tmtc_server( let mut tcp_server = generic_tmtc_server(
&auto_port_addr, &auto_port_addr,
tc_receiver.clone(), tc_sender.clone(),
tm_source, tm_source,
packet_id_lookup, validator,
None,
); );
let dest_addr = tcp_server let dest_addr = tcp_server
.local_addr() .local_addr()
@@ -224,17 +267,24 @@ mod tests {
let set_if_done = conn_handled.clone(); let set_if_done = conn_handled.clone();
// Call the connection handler in separate thread, does block. // Call the connection handler in separate thread, does block.
thread::spawn(move || { thread::spawn(move || {
let result = tcp_server.handle_next_connection(); let result = tcp_server.handle_all_connections(Some(Duration::from_millis(100)));
if result.is_err() { if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err()); panic!("handling connection failed: {:?}", result.unwrap_err());
} }
let conn_result = result.unwrap(); let conn_result = result.unwrap();
assert_eq!(conn_result.num_received_tcs, 1); matches!(conn_result, ConnectionResult::HandledConnections(1));
assert_eq!(conn_result.num_sent_tms, 0); tcp_server
.generic_server
.finished_handler
.check_last_connection(0, 1);
tcp_server
.generic_server
.finished_handler
.check_no_connections_left();
set_if_done.store(true, Ordering::Relaxed); set_if_done.store(true, Ordering::Relaxed);
}); });
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let ping_tc =
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let tc_0 = ping_tc.to_vec().expect("packet generation failed"); let tc_0 = ping_tc.to_vec().expect("packet generation failed");
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed"); let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
stream stream
@@ -251,40 +301,40 @@ mod tests {
if !conn_handled.load(Ordering::Relaxed) { if !conn_handled.load(Ordering::Relaxed) {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
// Check that TC has arrived. let packet = tc_receiver.try_recv().expect("receiving TC failed");
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap(); assert_eq!(packet.packet, tc_0);
assert_eq!(tc_queue.len(), 1); matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
} }
#[test] #[test]
fn test_multi_tc_multi_tm() { fn test_multi_tc_multi_tm() {
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0); let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
let tc_receiver = SyncTcCacher::default(); let (tc_sender, tc_receiver) = mpsc::channel();
let mut tm_source = SyncTmSource::default(); let mut tm_source = SyncTmSource::default();
// Add telemetry // Add telemetry
let mut total_tm_len = 0; let mut total_tm_len = 0;
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let verif_tm =
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 1, 1, &[], true);
let tm_0 = verif_tm.to_vec().expect("writing packet failed"); let tm_0 = verif_tm.to_vec().expect("writing packet failed");
total_tm_len += tm_0.len(); total_tm_len += tm_0.len();
tm_source.add_tm(&tm_0); tm_source.add_tm(&tm_0);
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); let verif_tm =
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 3, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 1, 3, &[], true);
let tm_1 = verif_tm.to_vec().expect("writing packet failed"); let tm_1 = verif_tm.to_vec().expect("writing packet failed");
total_tm_len += tm_1.len(); total_tm_len += tm_1.len();
tm_source.add_tm(&tm_1); tm_source.add_tm(&tm_1);
// Set up server // Set up server
let mut packet_id_lookup = HashSet::new(); let mut validator = SimpleValidator::default();
packet_id_lookup.insert(TEST_PACKET_ID_0); validator.0.insert(TEST_PACKET_ID_0);
packet_id_lookup.insert(TEST_PACKET_ID_1); validator.0.insert(TEST_PACKET_ID_1);
let mut tcp_server = generic_tmtc_server( let mut tcp_server = generic_tmtc_server(
&auto_port_addr, &auto_port_addr,
tc_receiver.clone(), tc_sender.clone(),
tm_source, tm_source,
packet_id_lookup, validator,
None,
); );
let dest_addr = tcp_server let dest_addr = tcp_server
.local_addr() .local_addr()
@@ -294,16 +344,20 @@ mod tests {
// Call the connection handler in separate thread, does block. // Call the connection handler in separate thread, does block.
thread::spawn(move || { thread::spawn(move || {
let result = tcp_server.handle_next_connection(); let result = tcp_server.handle_all_connections(Some(Duration::from_millis(100)));
if result.is_err() { if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err()); panic!("handling connection failed: {:?}", result.unwrap_err());
} }
let conn_result = result.unwrap(); let conn_result = result.unwrap();
assert_eq!( matches!(conn_result, ConnectionResult::HandledConnections(1));
conn_result.num_received_tcs, 2, tcp_server
"wrong number of received TCs" .generic_server
); .finished_handler
assert_eq!(conn_result.num_sent_tms, 2, "wrong number of sent TMs"); .check_last_connection(2, 2);
tcp_server
.generic_server
.finished_handler
.check_no_connections_left();
set_if_done.store(true, Ordering::Relaxed); set_if_done.store(true, Ordering::Relaxed);
}); });
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed"); let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
@@ -312,14 +366,14 @@ mod tests {
.expect("setting reas timeout failed"); .expect("setting reas timeout failed");
// Send telecommands // Send telecommands
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let ping_tc =
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let tc_0 = ping_tc.to_vec().expect("ping tc creation failed"); let tc_0 = ping_tc.to_vec().expect("ping tc creation failed");
stream stream
.write_all(&tc_0) .write_all(&tc_0)
.expect("writing to TCP server failed"); .expect("writing to TCP server failed");
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); let action_tc =
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
let tc_1 = action_tc.to_vec().expect("action tc creation failed"); let tc_1 = action_tc.to_vec().expect("action tc creation failed");
stream stream
.write_all(&tc_1) .write_all(&tc_1)
@@ -354,9 +408,10 @@ mod tests {
panic!("connection was not handled properly"); panic!("connection was not handled properly");
} }
// Check that TC has arrived. // Check that TC has arrived.
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap(); let packet_0 = tc_receiver.try_recv().expect("receiving TC failed");
assert_eq!(tc_queue.len(), 2); assert_eq!(packet_0.packet, tc_0);
assert_eq!(tc_queue.pop_front().unwrap(), tc_0); let packet_1 = tc_receiver.try_recv().expect("receiving TC failed");
assert_eq!(tc_queue.pop_front().unwrap(), tc_1); assert_eq!(packet_1.packet, tc_1);
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
} }
} }

View File

@@ -1,7 +1,8 @@
//! Generic UDP TC server. //! Generic UDP TC server.
use crate::tmtc::{ReceivesTc, ReceivesTcCore}; use crate::tmtc::PacketSenderRaw;
use std::boxed::Box; use crate::ComponentId;
use std::io::{Error, ErrorKind}; use core::fmt::Debug;
use std::io::{self, ErrorKind};
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket}; use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
use std::vec; use std::vec;
use std::vec::Vec; use std::vec::Vec;
@@ -11,45 +12,46 @@ use std::vec::Vec;
/// ///
/// It caches all received telecomands into a vector. The maximum expected telecommand size should /// It caches all received telecomands into a vector. The maximum expected telecommand size should
/// be declared upfront. This avoids dynamic allocation during run-time. The user can specify a TC /// be declared upfront. This avoids dynamic allocation during run-time. The user can specify a TC
/// receiver in form of a special trait object which implements [ReceivesTc]. Please note that the /// sender in form of a special trait object which implements [PacketSenderRaw]. For example, this
/// receiver should copy out the received data if it the data is required past the /// can be used to send the telecommands to a centralized TC source component for further
/// [ReceivesTcCore::pass_tc] call. /// processing and routing.
/// ///
/// # Examples /// # Examples
/// ///
/// ``` /// ```
/// use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}; /// use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
/// use std::sync::mpsc;
/// use spacepackets::ecss::WritablePusPacket; /// use spacepackets::ecss::WritablePusPacket;
/// use satrs::hal::std::udp_server::UdpTcServer; /// use satrs::hal::std::udp_server::UdpTcServer;
/// use satrs::tmtc::{ReceivesTc, ReceivesTcCore}; /// use satrs::ComponentId;
/// use satrs::tmtc::PacketSenderRaw;
/// use spacepackets::SpHeader; /// use spacepackets::SpHeader;
/// use spacepackets::ecss::tc::PusTcCreator; /// use spacepackets::ecss::tc::PusTcCreator;
/// ///
/// #[derive (Default)] /// const UDP_SERVER_ID: ComponentId = 0x05;
/// struct PingReceiver {}
/// impl ReceivesTcCore for PingReceiver {
/// type Error = ();
/// fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
/// assert_eq!(tc_raw.len(), 13);
/// Ok(())
/// }
/// }
/// ///
/// let mut buf = [0; 32]; /// let (packet_sender, packet_receiver) = mpsc::channel();
/// let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777); /// let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
/// let ping_receiver = PingReceiver::default(); /// let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, packet_sender)
/// let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
/// .expect("Creating UDP TMTC server failed"); /// .expect("Creating UDP TMTC server failed");
/// let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); /// let sph = SpHeader::new_from_apid(0x02);
/// let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); /// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
/// let len = pus_tc /// // Can not fail.
/// .write_to_bytes(&mut buf) /// let ping_tc_raw = pus_tc.to_vec().unwrap();
/// .expect("Error writing PUS TC packet"); ///
/// assert_eq!(len, 13); /// // Now create a UDP client and send the ping telecommand to the server.
/// let client = UdpSocket::bind("127.0.0.1:7778").expect("Connecting to UDP server failed"); /// let client = UdpSocket::bind("127.0.0.1:0").expect("creating UDP client failed");
/// client /// client
/// .send_to(&buf[0..len], dest_addr) /// .send_to(&ping_tc_raw, dest_addr)
/// .expect("Error sending PUS TC via UDP"); /// .expect("Error sending PUS TC via UDP");
/// let recv_result = udp_tc_server.try_recv_tc();
/// assert!(recv_result.is_ok());
/// // The packet is received by the UDP TC server and sent via the mpsc channel.
/// let sent_packet_with_sender = packet_receiver.try_recv().expect("expected telecommand");
/// assert_eq!(sent_packet_with_sender.packet, ping_tc_raw);
/// assert_eq!(sent_packet_with_sender.sender_id, UDP_SERVER_ID);
/// // No more packets received.
/// matches!(packet_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
/// ``` /// ```
/// ///
/// The [satrs-example crate](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/satrs-example) /// The [satrs-example crate](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/satrs-example)
@@ -57,65 +59,45 @@ use std::vec::Vec;
/// [example code](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/tmtc.rs#L67) /// [example code](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/tmtc.rs#L67)
/// on how to use this TC server. It uses the server to receive PUS telecommands on a specific port /// on how to use this TC server. It uses the server to receive PUS telecommands on a specific port
/// and then forwards them to a generic CCSDS packet receiver. /// and then forwards them to a generic CCSDS packet receiver.
pub struct UdpTcServer<E> { pub struct UdpTcServer<TcSender: PacketSenderRaw<Error = SendError>, SendError> {
pub id: ComponentId,
pub socket: UdpSocket, pub socket: UdpSocket,
recv_buf: Vec<u8>, recv_buf: Vec<u8>,
sender_addr: Option<SocketAddr>, sender_addr: Option<SocketAddr>,
tc_receiver: Box<dyn ReceivesTc<Error = E>>, pub tc_sender: TcSender,
} }
#[derive(Debug)] #[derive(Debug, thiserror::Error)]
pub enum ReceiveResult<E> { pub enum ReceiveResult<SendError: Debug + 'static> {
#[error("nothing was received")]
NothingReceived, NothingReceived,
IoError(Error), #[error(transparent)]
ReceiverError(E), Io(#[from] io::Error),
#[error(transparent)]
Send(SendError),
} }
impl<E> From<Error> for ReceiveResult<E> { impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
fn from(e: Error) -> Self { UdpTcServer<TcSender, SendError>
ReceiveResult::IoError(e) {
}
}
impl<E: PartialEq> PartialEq for ReceiveResult<E> {
fn eq(&self, other: &Self) -> bool {
use ReceiveResult::*;
match (self, other) {
(IoError(ref e), IoError(ref other_e)) => e.kind() == other_e.kind(),
(NothingReceived, NothingReceived) => true,
(ReceiverError(e), ReceiverError(other_e)) => e == other_e,
_ => false,
}
}
}
impl<E: Eq + PartialEq> Eq for ReceiveResult<E> {}
impl<E: 'static> ReceivesTcCore for UdpTcServer<E> {
type Error = E;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.tc_receiver.pass_tc(tc_raw)
}
}
impl<E: 'static> UdpTcServer<E> {
pub fn new<A: ToSocketAddrs>( pub fn new<A: ToSocketAddrs>(
id: ComponentId,
addr: A, addr: A,
max_recv_size: usize, max_recv_size: usize,
tc_receiver: Box<dyn ReceivesTc<Error = E>>, tc_sender: TcSender,
) -> Result<Self, Error> { ) -> Result<Self, io::Error> {
let server = Self { let server = Self {
id,
socket: UdpSocket::bind(addr)?, socket: UdpSocket::bind(addr)?,
recv_buf: vec![0; max_recv_size], recv_buf: vec![0; max_recv_size],
sender_addr: None, sender_addr: None,
tc_receiver, tc_sender,
}; };
server.socket.set_nonblocking(true)?; server.socket.set_nonblocking(true)?;
Ok(server) Ok(server)
} }
pub fn try_recv_tc(&mut self) -> Result<(usize, SocketAddr), ReceiveResult<E>> { pub fn try_recv_tc(&mut self) -> Result<(usize, SocketAddr), ReceiveResult<SendError>> {
let res = match self.socket.recv_from(&mut self.recv_buf) { let res = match self.socket.recv_from(&mut self.recv_buf) {
Ok(res) => res, Ok(res) => res,
Err(e) => { Err(e) => {
@@ -128,9 +110,9 @@ impl<E: 'static> UdpTcServer<E> {
}; };
let (num_bytes, from) = res; let (num_bytes, from) = res;
self.sender_addr = Some(from); self.sender_addr = Some(from);
self.tc_receiver self.tc_sender
.pass_tc(&self.recv_buf[0..num_bytes]) .send_packet(self.id, &self.recv_buf[0..num_bytes])
.map_err(|e| ReceiveResult::ReceiverError(e))?; .map_err(ReceiveResult::Send)?;
Ok(res) Ok(res)
} }
@@ -142,29 +124,35 @@ impl<E: 'static> UdpTcServer<E> {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer}; use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use crate::tmtc::ReceivesTcCore; use crate::queue::GenericSendError;
use crate::tmtc::PacketSenderRaw;
use crate::ComponentId;
use core::cell::RefCell;
use spacepackets::ecss::tc::PusTcCreator; use spacepackets::ecss::tc::PusTcCreator;
use spacepackets::ecss::WritablePusPacket; use spacepackets::ecss::WritablePusPacket;
use spacepackets::SpHeader; use spacepackets::SpHeader;
use std::boxed::Box;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket}; use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::vec::Vec; use std::vec::Vec;
fn is_send<T: Send>(_: &T) {} fn is_send<T: Send>(_: &T) {}
const UDP_SERVER_ID: ComponentId = 0x05;
#[derive(Default)] #[derive(Default)]
struct PingReceiver { struct PingReceiver {
pub sent_cmds: VecDeque<Vec<u8>>, pub sent_cmds: RefCell<VecDeque<Vec<u8>>>,
} }
impl ReceivesTcCore for PingReceiver { impl PacketSenderRaw for PingReceiver {
type Error = (); type Error = GenericSendError;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> { fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
assert_eq!(sender_id, UDP_SERVER_ID);
let mut sent_data = Vec::new(); let mut sent_data = Vec::new();
sent_data.extend_from_slice(tc_raw); sent_data.extend_from_slice(tc_raw);
self.sent_cmds.push_back(sent_data); let mut queue = self.sent_cmds.borrow_mut();
queue.push_back(sent_data);
Ok(()) Ok(())
} }
} }
@@ -175,11 +163,11 @@ mod tests {
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777); let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
let ping_receiver = PingReceiver::default(); let ping_receiver = PingReceiver::default();
is_send(&ping_receiver); is_send(&ping_receiver);
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver)) let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, ping_receiver)
.expect("Creating UDP TMTC server failed"); .expect("Creating UDP TMTC server failed");
is_send(&udp_tc_server); is_send(&udp_tc_server);
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(0x02);
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let len = pus_tc let len = pus_tc
.write_to_bytes(&mut buf) .write_to_bytes(&mut buf)
.expect("Error writing PUS TC packet"); .expect("Error writing PUS TC packet");
@@ -195,9 +183,10 @@ mod tests {
udp_tc_server.last_sender().expect("No sender set"), udp_tc_server.last_sender().expect("No sender set"),
local_addr local_addr
); );
let ping_receiver: &mut PingReceiver = udp_tc_server.tc_receiver.downcast_mut().unwrap(); let ping_receiver = &mut udp_tc_server.tc_sender;
assert_eq!(ping_receiver.sent_cmds.len(), 1); let mut queue = ping_receiver.sent_cmds.borrow_mut();
let sent_cmd = ping_receiver.sent_cmds.pop_front().unwrap(); assert_eq!(queue.len(), 1);
let sent_cmd = queue.pop_front().unwrap();
assert_eq!(sent_cmd, buf[0..len]); assert_eq!(sent_cmd, buf[0..len]);
} }
@@ -205,11 +194,11 @@ mod tests {
fn test_nothing_received() { fn test_nothing_received() {
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7779); let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7779);
let ping_receiver = PingReceiver::default(); let ping_receiver = PingReceiver::default();
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver)) let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, ping_receiver)
.expect("Creating UDP TMTC server failed"); .expect("Creating UDP TMTC server failed");
let res = udp_tc_server.try_recv_tc(); let res = udp_tc_server.try_recv_tc();
assert!(res.is_err()); assert!(res.is_err());
let err = res.unwrap_err(); let err = res.unwrap_err();
assert_eq!(err, ReceiveResult::NothingReceived); matches!(err, ReceiveResult::NothingReceived);
} }
} }

View File

@@ -14,7 +14,7 @@
//! - The [pus] module which provides special support for projects using //! - The [pus] module which provides special support for projects using
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/). //! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
#![no_std] #![no_std]
#![cfg_attr(doc_cfg, feature(doc_cfg))] #![cfg_attr(docs_rs, feature(doc_auto_cfg))]
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
extern crate alloc; extern crate alloc;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
@@ -23,17 +23,14 @@ extern crate downcast_rs;
extern crate std; extern crate std;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod cfdp; pub mod cfdp;
pub mod encoding; pub mod encoding;
pub mod event_man; pub mod event_man;
pub mod events; pub mod events;
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod executable; pub mod executable;
pub mod hal; pub mod hal;
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod mode_tree; pub mod mode_tree;
pub mod pool; pub mod pool;
pub mod power; pub mod power;
@@ -52,7 +49,82 @@ pub mod params;
pub use spacepackets; pub use spacepackets;
pub use queue::ChannelId; use spacepackets::PacketId;
/// Generic component ID type. /// Generic component ID type.
pub type ComponentId = u64; pub type ComponentId = u64;
pub trait ValidatorU16Id {
fn validate(&self, id: u16) -> bool;
}
#[cfg(feature = "alloc")]
impl ValidatorU16Id for alloc::vec::Vec<u16> {
fn validate(&self, id: u16) -> bool {
self.contains(&id)
}
}
#[cfg(feature = "alloc")]
impl ValidatorU16Id for hashbrown::HashSet<u16> {
fn validate(&self, id: u16) -> bool {
self.contains(&id)
}
}
impl ValidatorU16Id for u16 {
fn validate(&self, id: u16) -> bool {
id == *self
}
}
impl ValidatorU16Id for &u16 {
fn validate(&self, id: u16) -> bool {
id == **self
}
}
impl ValidatorU16Id for [u16] {
fn validate(&self, id: u16) -> bool {
self.binary_search(&id).is_ok()
}
}
impl ValidatorU16Id for &[u16] {
fn validate(&self, id: u16) -> bool {
self.binary_search(&id).is_ok()
}
}
#[cfg(feature = "alloc")]
impl ValidatorU16Id for alloc::vec::Vec<spacepackets::PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
#[cfg(feature = "alloc")]
impl ValidatorU16Id for hashbrown::HashSet<spacepackets::PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
#[cfg(feature = "std")]
impl ValidatorU16Id for std::collections::HashSet<PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
impl ValidatorU16Id for [PacketId] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&PacketId::from(packet_id)).is_ok()
}
}
impl ValidatorU16Id for &[PacketId] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&PacketId::from(packet_id)).is_ok()
}
}

View File

@@ -179,6 +179,14 @@ impl From<GenericTargetedMessagingError> for ModeError {
pub trait ModeProvider { pub trait ModeProvider {
fn mode_and_submode(&self) -> ModeAndSubmode; fn mode_and_submode(&self) -> ModeAndSubmode;
fn mode(&self) -> Mode {
self.mode_and_submode().mode()
}
fn submode(&self) -> Submode {
self.mode_and_submode().submode()
}
} }
pub trait ModeRequestHandler: ModeProvider { pub trait ModeRequestHandler: ModeProvider {
@@ -190,7 +198,7 @@ pub trait ModeRequestHandler: ModeProvider {
mode_and_submode: ModeAndSubmode, mode_and_submode: ModeAndSubmode,
) -> Result<(), Self::Error>; ) -> Result<(), Self::Error>;
fn announce_mode(&self, requestor_info: MessageMetadata, recursive: bool); fn announce_mode(&self, requestor_info: Option<MessageMetadata>, recursive: bool);
fn handle_mode_reached( fn handle_mode_reached(
&mut self, &mut self,
@@ -222,11 +230,11 @@ pub trait ModeRequestHandler: ModeProvider {
ModeReply::ModeReply(self.mode_and_submode()), ModeReply::ModeReply(self.mode_and_submode()),
), ),
ModeRequest::AnnounceMode => { ModeRequest::AnnounceMode => {
self.announce_mode(request.requestor_info, false); self.announce_mode(Some(request.requestor_info), false);
Ok(()) Ok(())
} }
ModeRequest::AnnounceModeRecursive => { ModeRequest::AnnounceModeRecursive => {
self.announce_mode(request.requestor_info, true); self.announce_mode(Some(request.requestor_info), true);
Ok(()) Ok(())
} }
ModeRequest::ModeInfo(info) => self.handle_mode_info(request.requestor_info, info), ModeRequest::ModeInfo(info) => self.handle_mode_info(request.requestor_info, info),
@@ -260,16 +268,9 @@ pub trait ModeReplySender {
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod alloc_mod { pub mod alloc_mod {
use crate::{ use crate::request::{
mode::ModeRequest, MessageSender, MessageSenderAndReceiver, MessageSenderMap, RequestAndReplySenderAndReceiver,
queue::GenericTargetedMessagingError,
request::{
MessageMetadata, MessageSender, MessageSenderAndReceiver, MessageSenderMap,
RequestAndReplySenderAndReceiver, RequestId,
},
ComponentId,
}; };
use super::*; use super::*;
@@ -548,12 +549,9 @@ pub mod alloc_mod {
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod { pub mod std_mod {
use std::sync::mpsc; use std::sync::mpsc;
use crate::request::GenericMessage;
use super::*; use super::*;
pub type ModeRequestHandlerMpsc = ModeRequestHandlerInterface< pub type ModeRequestHandlerMpsc = ModeRequestHandlerInterface<

View File

@@ -43,7 +43,7 @@
//! This includes the [ParamsHeapless] enumeration for contained values which do not require heap //! This includes the [ParamsHeapless] enumeration for contained values which do not require heap
//! allocation, and the [Params] which enumerates [ParamsHeapless] and some additional types which //! allocation, and the [Params] which enumerates [ParamsHeapless] and some additional types which
//! require [alloc] support but allow for more flexbility. //! require [alloc] support but allow for more flexbility.
use crate::pool::StoreAddr; use crate::pool::PoolAddr;
use core::fmt::Debug; use core::fmt::Debug;
use core::mem::size_of; use core::mem::size_of;
use paste::paste; use paste::paste;
@@ -588,17 +588,15 @@ from_conversions_for_raw!(
#[non_exhaustive] #[non_exhaustive]
pub enum Params { pub enum Params {
Heapless(ParamsHeapless), Heapless(ParamsHeapless),
Store(StoreAddr), Store(PoolAddr),
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
Vec(Vec<u8>), Vec(Vec<u8>),
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
String(String), String(String),
} }
impl From<StoreAddr> for Params { impl From<PoolAddr> for Params {
fn from(x: StoreAddr) -> Self { fn from(x: PoolAddr) -> Self {
Self::Store(x) Self::Store(x)
} }
} }
@@ -616,7 +614,6 @@ impl From<ParamsRaw> for Params {
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
impl From<Vec<u8>> for Params { impl From<Vec<u8>> for Params {
fn from(val: Vec<u8>) -> Self { fn from(val: Vec<u8>) -> Self {
Self::Vec(val) Self::Vec(val)
@@ -625,7 +622,6 @@ impl From<Vec<u8>> for Params {
/// Converts a byte slice into the [Params::Vec] variant /// Converts a byte slice into the [Params::Vec] variant
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
impl From<&[u8]> for Params { impl From<&[u8]> for Params {
fn from(val: &[u8]) -> Self { fn from(val: &[u8]) -> Self {
Self::Vec(val.to_vec()) Self::Vec(val.to_vec())
@@ -633,7 +629,6 @@ impl From<&[u8]> for Params {
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
impl From<String> for Params { impl From<String> for Params {
fn from(val: String) -> Self { fn from(val: String) -> Self {
Self::String(val) Self::String(val)
@@ -641,7 +636,6 @@ impl From<String> for Params {
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
/// Converts a string slice into the [Params::String] variant /// Converts a string slice into the [Params::String] variant
impl From<&str> for Params { impl From<&str> for Params {
fn from(val: &str) -> Self { fn from(val: &str) -> Self {
@@ -649,52 +643,18 @@ impl From<&str> for Params {
} }
} }
/// Please note while [WritableToBeBytes] is implemented for [Params], the default implementation impl WritableToBeBytes for ParamsHeapless {
/// will not be able to process the [Params::Store] parameter variant.
impl WritableToBeBytes for Params {
fn written_len(&self) -> usize { fn written_len(&self) -> usize {
match self { match self {
Params::Heapless(p) => match p { ParamsHeapless::Raw(raw) => raw.written_len(),
ParamsHeapless::Raw(raw) => raw.written_len(), ParamsHeapless::EcssEnum(ecss_enum) => ecss_enum.written_len(),
ParamsHeapless::EcssEnum(enumeration) => enumeration.written_len(),
},
Params::Store(_) => 0,
#[cfg(feature = "alloc")]
Params::Vec(vec) => vec.len(),
#[cfg(feature = "alloc")]
Params::String(string) => string.len(),
} }
} }
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
match self { match self {
Params::Heapless(p) => match p { ParamsHeapless::Raw(raw) => raw.write_to_be_bytes(buf),
ParamsHeapless::Raw(raw) => raw.write_to_be_bytes(buf), ParamsHeapless::EcssEnum(ecss_enum) => ecss_enum.write_to_be_bytes(buf),
ParamsHeapless::EcssEnum(enumeration) => enumeration.write_to_be_bytes(buf),
},
Params::Store(_) => Ok(0),
#[cfg(feature = "alloc")]
Params::Vec(vec) => {
if buf.len() < vec.len() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: vec.len(),
});
}
buf[0..vec.len()].copy_from_slice(vec);
Ok(vec.len())
}
#[cfg(feature = "alloc")]
Params::String(string) => {
if buf.len() < string.len() {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: string.len(),
});
}
buf[0..string.len()].copy_from_slice(string.as_bytes());
Ok(string.len())
}
} }
} }
} }
@@ -843,10 +803,9 @@ mod tests {
#[test] #[test]
fn test_params_written_len_raw() { fn test_params_written_len_raw() {
let param_raw = ParamsRaw::from((500_u32, 1000_u32)); let param_raw = ParamsRaw::from((500_u32, 1000_u32));
let param: Params = Params::Heapless(param_raw.into()); assert_eq!(param_raw.written_len(), 8);
assert_eq!(param.written_len(), 8);
let mut buf: [u8; 8] = [0; 8]; let mut buf: [u8; 8] = [0; 8];
param param_raw
.write_to_be_bytes(&mut buf) .write_to_be_bytes(&mut buf)
.expect("writing to buffer failed"); .expect("writing to buffer failed");
assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500); assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500);
@@ -854,21 +813,28 @@ mod tests {
} }
#[test] #[test]
fn test_params_written_string() { fn test_heapless_param_writable_trait_raw() {
let string = "Test String".to_string(); let param_heapless = ParamsHeapless::Raw(ParamsRaw::from((500_u32, 1000_u32)));
let param = Params::String(string.clone()); assert_eq!(param_heapless.written_len(), 8);
assert_eq!(param.written_len(), string.len()); let mut buf: [u8; 8] = [0; 8];
let vec = param.to_vec().unwrap(); let size = param_heapless
let string_conv_back = String::from_utf8(vec).expect("conversion to string failed"); .write_to_be_bytes(&mut buf)
assert_eq!(string_conv_back, string); .expect("writing failed");
assert_eq!(size, 8);
assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500);
assert_eq!(u32::from_be_bytes(buf[4..8].try_into().unwrap()), 1000);
} }
#[test] #[test]
fn test_params_written_vec() { fn test_heapless_param_writable_trait_ecss_enum() {
let vec: Vec<u8> = alloc::vec![1, 2, 3, 4, 5]; let param_heapless = ParamsHeapless::EcssEnum(ParamsEcssEnum::U16(5.into()));
let param = Params::Vec(vec.clone()); assert_eq!(param_heapless.written_len(), 2);
assert_eq!(param.written_len(), vec.len()); let mut buf: [u8; 2] = [0; 2];
assert_eq!(param.to_vec().expect("writing vec params failed"), vec); let size = param_heapless
.write_to_be_bytes(&mut buf)
.expect("writing failed");
assert_eq!(size, 2);
assert_eq!(u16::from_be_bytes(buf[0..2].try_into().unwrap()), 5);
} }
#[test] #[test]

View File

@@ -72,7 +72,6 @@
//! } //! }
//! ``` //! ```
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*; pub use alloc_mod::*;
use core::fmt::{Display, Formatter}; use core::fmt::{Display, Formatter};
use delegate::delegate; use delegate::delegate;
@@ -83,7 +82,7 @@ use spacepackets::ByteConversionError;
use std::error::Error; use std::error::Error;
type NumBlocks = u16; type NumBlocks = u16;
pub type StoreAddr = u64; pub type PoolAddr = u64;
/// Simple address type used for transactions with the local pool. /// Simple address type used for transactions with the local pool.
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
@@ -101,14 +100,14 @@ impl StaticPoolAddr {
} }
} }
impl From<StaticPoolAddr> for StoreAddr { impl From<StaticPoolAddr> for PoolAddr {
fn from(value: StaticPoolAddr) -> Self { fn from(value: StaticPoolAddr) -> Self {
((value.pool_idx as u64) << 16) | value.packet_idx as u64 ((value.pool_idx as u64) << 16) | value.packet_idx as u64
} }
} }
impl From<StoreAddr> for StaticPoolAddr { impl From<PoolAddr> for StaticPoolAddr {
fn from(value: StoreAddr) -> Self { fn from(value: PoolAddr) -> Self {
Self { Self {
pool_idx: ((value >> 16) & 0xff) as u16, pool_idx: ((value >> 16) & 0xff) as u16,
packet_idx: (value & 0xff) as u16, packet_idx: (value & 0xff) as u16,
@@ -151,59 +150,59 @@ impl Error for StoreIdError {}
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum StoreError { pub enum PoolError {
/// Requested data block is too large /// Requested data block is too large
DataTooLarge(usize), DataTooLarge(usize),
/// The store is full. Contains the index of the full subpool /// The store is full. Contains the index of the full subpool
StoreFull(u16), StoreFull(u16),
/// Store ID is invalid. This also includes partial errors where only the subpool is invalid /// Store ID is invalid. This also includes partial errors where only the subpool is invalid
InvalidStoreId(StoreIdError, Option<StoreAddr>), InvalidStoreId(StoreIdError, Option<PoolAddr>),
/// Valid subpool and packet index, but no data is stored at the given address /// Valid subpool and packet index, but no data is stored at the given address
DataDoesNotExist(StoreAddr), DataDoesNotExist(PoolAddr),
ByteConversionError(spacepackets::ByteConversionError), ByteConversionError(spacepackets::ByteConversionError),
LockError, LockError,
/// Internal or configuration errors /// Internal or configuration errors
InternalError(u32), InternalError(u32),
} }
impl Display for StoreError { impl Display for PoolError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self { match self {
StoreError::DataTooLarge(size) => { PoolError::DataTooLarge(size) => {
write!(f, "data to store with size {size} is too large") write!(f, "data to store with size {size} is too large")
} }
StoreError::StoreFull(u16) => { PoolError::StoreFull(u16) => {
write!(f, "store is too full. index for full subpool: {u16}") write!(f, "store is too full. index for full subpool: {u16}")
} }
StoreError::InvalidStoreId(id_e, addr) => { PoolError::InvalidStoreId(id_e, addr) => {
write!(f, "invalid store ID: {id_e}, address: {addr:?}") write!(f, "invalid store ID: {id_e}, address: {addr:?}")
} }
StoreError::DataDoesNotExist(addr) => { PoolError::DataDoesNotExist(addr) => {
write!(f, "no data exists at address {addr:?}") write!(f, "no data exists at address {addr:?}")
} }
StoreError::InternalError(e) => { PoolError::InternalError(e) => {
write!(f, "internal error: {e}") write!(f, "internal error: {e}")
} }
StoreError::ByteConversionError(e) => { PoolError::ByteConversionError(e) => {
write!(f, "store error: {e}") write!(f, "store error: {e}")
} }
StoreError::LockError => { PoolError::LockError => {
write!(f, "lock error") write!(f, "lock error")
} }
} }
} }
} }
impl From<ByteConversionError> for StoreError { impl From<ByteConversionError> for PoolError {
fn from(value: ByteConversionError) -> Self { fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value) Self::ByteConversionError(value)
} }
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl Error for StoreError { impl Error for PoolError {
fn source(&self) -> Option<&(dyn Error + 'static)> { fn source(&self) -> Option<&(dyn Error + 'static)> {
if let StoreError::InvalidStoreId(e, _) = self { if let PoolError::InvalidStoreId(e, _) = self {
return Some(e); return Some(e);
} }
None None
@@ -218,44 +217,41 @@ impl Error for StoreError {
/// pool structure being wrapped inside a lock. /// pool structure being wrapped inside a lock.
pub trait PoolProvider { pub trait PoolProvider {
/// Add new data to the pool. The provider should attempt to reserve a memory block with the /// Add new data to the pool. The provider should attempt to reserve a memory block with the
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can /// appropriate size and then copy the given data to the block. Yields a [PoolAddr] which can
/// be used to access the data stored in the pool /// be used to access the data stored in the pool
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>; fn add(&mut self, data: &[u8]) -> Result<PoolAddr, PoolError>;
/// The provider should attempt to reserve a free memory block with the appropriate size first. /// The provider should attempt to reserve a free memory block with the appropriate size first.
/// It then executes a user-provided closure and passes a mutable reference to that memory /// It then executes a user-provided closure and passes a mutable reference to that memory
/// block to the closure. This allows the user to write data to the memory block. /// block to the closure. This allows the user to write data to the memory block.
/// The function should yield a [StoreAddr] which can be used to access the data stored in the /// The function should yield a [PoolAddr] which can be used to access the data stored in the
/// pool. /// pool.
fn free_element<W: FnMut(&mut [u8])>( fn free_element<W: FnMut(&mut [u8])>(
&mut self, &mut self,
len: usize, len: usize,
writer: W, writer: W,
) -> Result<StoreAddr, StoreError>; ) -> Result<PoolAddr, PoolError>;
/// Modify data added previously using a given [StoreAddr]. The provider should use the store /// Modify data added previously using a given [PoolAddr]. The provider should use the store
/// address to determine if a memory block exists for that address. If it does, it should /// address to determine if a memory block exists for that address. If it does, it should
/// call the user-provided closure and pass a mutable reference to the memory block /// call the user-provided closure and pass a mutable reference to the memory block
/// to the closure. This allows the user to modify the memory block. /// to the closure. This allows the user to modify the memory block.
fn modify<U: FnMut(&mut [u8])>( fn modify<U: FnMut(&mut [u8])>(&mut self, addr: &PoolAddr, updater: U)
&mut self, -> Result<(), PoolError>;
addr: &StoreAddr,
updater: U,
) -> Result<(), StoreError>;
/// The provider should copy the data from the memory block to the user-provided buffer if /// The provider should copy the data from the memory block to the user-provided buffer if
/// it exists. /// it exists.
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError>; fn read(&self, addr: &PoolAddr, buf: &mut [u8]) -> Result<usize, PoolError>;
/// Delete data inside the pool given a [StoreAddr]. /// Delete data inside the pool given a [PoolAddr].
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>; fn delete(&mut self, addr: PoolAddr) -> Result<(), PoolError>;
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>; fn has_element_at(&self, addr: &PoolAddr) -> Result<bool, PoolError>;
/// Retrieve the length of the data at the given store address. /// Retrieve the length of the data at the given store address.
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError>; fn len_of_data(&self, addr: &PoolAddr) -> Result<usize, PoolError>;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
fn read_as_vec(&self, addr: &StoreAddr) -> Result<alloc::vec::Vec<u8>, StoreError> { fn read_as_vec(&self, addr: &PoolAddr) -> Result<alloc::vec::Vec<u8>, PoolError> {
let mut vec = alloc::vec![0; self.len_of_data(addr)?]; let mut vec = alloc::vec![0; self.len_of_data(addr)?];
self.read(addr, &mut vec)?; self.read(addr, &mut vec)?;
Ok(vec) Ok(vec)
@@ -272,7 +268,7 @@ pub trait PoolProviderWithGuards: PoolProvider {
/// This can prevent memory leaks. Users can read the data and release the guard /// This can prevent memory leaks. Users can read the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no /// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely. /// manual deletion is necessary when returning from a processing function prematurely.
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self>; fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self>;
/// This function behaves like [PoolProvider::modify], but consumes the provided /// This function behaves like [PoolProvider::modify], but consumes the provided
/// address and returns a RAII conformant guard object. /// address and returns a RAII conformant guard object.
@@ -282,20 +278,20 @@ pub trait PoolProviderWithGuards: PoolProvider {
/// This can prevent memory leaks. Users can read (and modify) the data and release the guard /// This can prevent memory leaks. Users can read (and modify) the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no /// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely. /// manual deletion is necessary when returning from a processing function prematurely.
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self>; fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self>;
} }
pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> { pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
pool: &'a mut MemProvider, pool: &'a mut MemProvider,
pub addr: StoreAddr, pub addr: PoolAddr,
no_deletion: bool, no_deletion: bool,
deletion_failed_error: Option<StoreError>, deletion_failed_error: Option<PoolError>,
} }
/// This helper object can be used to safely access pool data without worrying about memory /// This helper object can be used to safely access pool data without worrying about memory
/// leaks. /// leaks.
impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> { impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self { pub fn new(pool: &'a mut MemProvider, addr: PoolAddr) -> Self {
Self { Self {
pool, pool,
addr, addr,
@@ -304,12 +300,12 @@ impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
} }
} }
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError> { pub fn read(&self, buf: &mut [u8]) -> Result<usize, PoolError> {
self.pool.read(&self.addr, buf) self.pool.read(&self.addr, buf)
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub fn read_as_vec(&self) -> Result<alloc::vec::Vec<u8>, StoreError> { pub fn read_as_vec(&self) -> Result<alloc::vec::Vec<u8>, PoolError> {
self.pool.read_as_vec(&self.addr) self.pool.read_as_vec(&self.addr)
} }
@@ -335,19 +331,19 @@ pub struct PoolRwGuard<'a, MemProvider: PoolProvider + ?Sized> {
} }
impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> { impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> {
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self { pub fn new(pool: &'a mut MemProvider, addr: PoolAddr) -> Self {
Self { Self {
guard: PoolGuard::new(pool, addr), guard: PoolGuard::new(pool, addr),
} }
} }
pub fn update<U: FnMut(&mut [u8])>(&mut self, updater: &mut U) -> Result<(), StoreError> { pub fn update<U: FnMut(&mut [u8])>(&mut self, updater: &mut U) -> Result<(), PoolError> {
self.guard.pool.modify(&self.guard.addr, updater) self.guard.pool.modify(&self.guard.addr, updater)
} }
delegate!( delegate!(
to self.guard { to self.guard {
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError>; pub fn read(&self, buf: &mut [u8]) -> Result<usize, PoolError>;
/// Releasing the pool guard will disable the automatic deletion of the data when the guard /// Releasing the pool guard will disable the automatic deletion of the data when the guard
/// is dropped. /// is dropped.
pub fn release(&mut self); pub fn release(&mut self);
@@ -358,7 +354,7 @@ impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> {
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
mod alloc_mod { mod alloc_mod {
use super::{PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticPoolAddr}; use super::{PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticPoolAddr};
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError}; use crate::pool::{NumBlocks, PoolAddr, PoolError, StoreIdError};
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
use spacepackets::ByteConversionError; use spacepackets::ByteConversionError;
@@ -423,7 +419,7 @@ mod alloc_mod {
/// fitting subpool is full. This might be added in the future. /// fitting subpool is full. This might be added in the future.
/// ///
/// Transactions with the [pool][StaticMemoryPool] are done using a generic /// Transactions with the [pool][StaticMemoryPool] are done using a generic
/// [address][StoreAddr] type. Adding any data to the pool will yield a store address. /// [address][PoolAddr] type. Adding any data to the pool will yield a store address.
/// Modification and read operations are done using a reference to a store address. Deletion /// Modification and read operations are done using a reference to a store address. Deletion
/// will consume the store address. /// will consume the store address.
pub struct StaticMemoryPool { pub struct StaticMemoryPool {
@@ -453,41 +449,41 @@ mod alloc_mod {
local_pool local_pool
} }
fn addr_check(&self, addr: &StaticPoolAddr) -> Result<usize, StoreError> { fn addr_check(&self, addr: &StaticPoolAddr) -> Result<usize, PoolError> {
self.validate_addr(addr)?; self.validate_addr(addr)?;
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
let size_list = self.sizes_lists.get(pool_idx).unwrap(); let size_list = self.sizes_lists.get(pool_idx).unwrap();
let curr_size = size_list[addr.packet_idx as usize]; let curr_size = size_list[addr.packet_idx as usize];
if curr_size == STORE_FREE { if curr_size == STORE_FREE {
return Err(StoreError::DataDoesNotExist(StoreAddr::from(*addr))); return Err(PoolError::DataDoesNotExist(PoolAddr::from(*addr)));
} }
Ok(curr_size) Ok(curr_size)
} }
fn validate_addr(&self, addr: &StaticPoolAddr) -> Result<(), StoreError> { fn validate_addr(&self, addr: &StaticPoolAddr) -> Result<(), PoolError> {
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
if pool_idx >= self.pool_cfg.cfg.len() { if pool_idx >= self.pool_cfg.cfg.len() {
return Err(StoreError::InvalidStoreId( return Err(PoolError::InvalidStoreId(
StoreIdError::InvalidSubpool(addr.pool_idx), StoreIdError::InvalidSubpool(addr.pool_idx),
Some(StoreAddr::from(*addr)), Some(PoolAddr::from(*addr)),
)); ));
} }
if addr.packet_idx >= self.pool_cfg.cfg[addr.pool_idx as usize].0 { if addr.packet_idx >= self.pool_cfg.cfg[addr.pool_idx as usize].0 {
return Err(StoreError::InvalidStoreId( return Err(PoolError::InvalidStoreId(
StoreIdError::InvalidPacketIdx(addr.packet_idx), StoreIdError::InvalidPacketIdx(addr.packet_idx),
Some(StoreAddr::from(*addr)), Some(PoolAddr::from(*addr)),
)); ));
} }
Ok(()) Ok(())
} }
fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, StoreError> { fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, PoolError> {
let mut subpool_idx = self.find_subpool(data_len, 0)?; let mut subpool_idx = self.find_subpool(data_len, 0)?;
if self.pool_cfg.spill_to_higher_subpools { if self.pool_cfg.spill_to_higher_subpools {
while let Err(StoreError::StoreFull(_)) = self.find_empty(subpool_idx) { while let Err(PoolError::StoreFull(_)) = self.find_empty(subpool_idx) {
if (subpool_idx + 1) as usize == self.sizes_lists.len() { if (subpool_idx + 1) as usize == self.sizes_lists.len() {
return Err(StoreError::StoreFull(subpool_idx)); return Err(PoolError::StoreFull(subpool_idx));
} }
subpool_idx += 1; subpool_idx += 1;
} }
@@ -501,7 +497,7 @@ mod alloc_mod {
}) })
} }
fn find_subpool(&self, req_size: usize, start_at_subpool: u16) -> Result<u16, StoreError> { fn find_subpool(&self, req_size: usize, start_at_subpool: u16) -> Result<u16, PoolError> {
for (i, &(_, elem_size)) in self.pool_cfg.cfg.iter().enumerate() { for (i, &(_, elem_size)) in self.pool_cfg.cfg.iter().enumerate() {
if i < start_at_subpool as usize { if i < start_at_subpool as usize {
continue; continue;
@@ -510,21 +506,21 @@ mod alloc_mod {
return Ok(i as u16); return Ok(i as u16);
} }
} }
Err(StoreError::DataTooLarge(req_size)) Err(PoolError::DataTooLarge(req_size))
} }
fn write(&mut self, addr: &StaticPoolAddr, data: &[u8]) -> Result<(), StoreError> { fn write(&mut self, addr: &StaticPoolAddr, data: &[u8]) -> Result<(), PoolError> {
let packet_pos = self.raw_pos(addr).ok_or(StoreError::InternalError(0))?; let packet_pos = self.raw_pos(addr).ok_or(PoolError::InternalError(0))?;
let subpool = self let subpool = self
.pool .pool
.get_mut(addr.pool_idx as usize) .get_mut(addr.pool_idx as usize)
.ok_or(StoreError::InternalError(1))?; .ok_or(PoolError::InternalError(1))?;
let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()]; let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()];
pool_slice.copy_from_slice(data); pool_slice.copy_from_slice(data);
Ok(()) Ok(())
} }
fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), StoreError> { fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), PoolError> {
if let Some(size_list) = self.sizes_lists.get_mut(subpool as usize) { if let Some(size_list) = self.sizes_lists.get_mut(subpool as usize) {
for (i, elem_size) in size_list.iter_mut().enumerate() { for (i, elem_size) in size_list.iter_mut().enumerate() {
if *elem_size == STORE_FREE { if *elem_size == STORE_FREE {
@@ -532,12 +528,12 @@ mod alloc_mod {
} }
} }
} else { } else {
return Err(StoreError::InvalidStoreId( return Err(PoolError::InvalidStoreId(
StoreIdError::InvalidSubpool(subpool), StoreIdError::InvalidSubpool(subpool),
None, None,
)); ));
} }
Err(StoreError::StoreFull(subpool)) Err(PoolError::StoreFull(subpool))
} }
fn raw_pos(&self, addr: &StaticPoolAddr) -> Option<usize> { fn raw_pos(&self, addr: &StaticPoolAddr) -> Option<usize> {
@@ -547,10 +543,10 @@ mod alloc_mod {
} }
impl PoolProvider for StaticMemoryPool { impl PoolProvider for StaticMemoryPool {
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> { fn add(&mut self, data: &[u8]) -> Result<PoolAddr, PoolError> {
let data_len = data.len(); let data_len = data.len();
if data_len > POOL_MAX_SIZE { if data_len > POOL_MAX_SIZE {
return Err(StoreError::DataTooLarge(data_len)); return Err(PoolError::DataTooLarge(data_len));
} }
let addr = self.reserve(data_len)?; let addr = self.reserve(data_len)?;
self.write(&addr, data)?; self.write(&addr, data)?;
@@ -561,9 +557,9 @@ mod alloc_mod {
&mut self, &mut self,
len: usize, len: usize,
mut writer: W, mut writer: W,
) -> Result<StoreAddr, StoreError> { ) -> Result<PoolAddr, PoolError> {
if len > POOL_MAX_SIZE { if len > POOL_MAX_SIZE {
return Err(StoreError::DataTooLarge(len)); return Err(PoolError::DataTooLarge(len));
} }
let addr = self.reserve(len)?; let addr = self.reserve(len)?;
let raw_pos = self.raw_pos(&addr).unwrap(); let raw_pos = self.raw_pos(&addr).unwrap();
@@ -575,9 +571,9 @@ mod alloc_mod {
fn modify<U: FnMut(&mut [u8])>( fn modify<U: FnMut(&mut [u8])>(
&mut self, &mut self,
addr: &StoreAddr, addr: &PoolAddr,
mut updater: U, mut updater: U,
) -> Result<(), StoreError> { ) -> Result<(), PoolError> {
let addr = StaticPoolAddr::from(*addr); let addr = StaticPoolAddr::from(*addr);
let curr_size = self.addr_check(&addr)?; let curr_size = self.addr_check(&addr)?;
let raw_pos = self.raw_pos(&addr).unwrap(); let raw_pos = self.raw_pos(&addr).unwrap();
@@ -587,7 +583,7 @@ mod alloc_mod {
Ok(()) Ok(())
} }
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError> { fn read(&self, addr: &PoolAddr, buf: &mut [u8]) -> Result<usize, PoolError> {
let addr = StaticPoolAddr::from(*addr); let addr = StaticPoolAddr::from(*addr);
let curr_size = self.addr_check(&addr)?; let curr_size = self.addr_check(&addr)?;
if buf.len() < curr_size { if buf.len() < curr_size {
@@ -605,7 +601,7 @@ mod alloc_mod {
Ok(curr_size) Ok(curr_size)
} }
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> { fn delete(&mut self, addr: PoolAddr) -> Result<(), PoolError> {
let addr = StaticPoolAddr::from(addr); let addr = StaticPoolAddr::from(addr);
self.addr_check(&addr)?; self.addr_check(&addr)?;
let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1; let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1;
@@ -618,7 +614,7 @@ mod alloc_mod {
Ok(()) Ok(())
} }
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError> { fn has_element_at(&self, addr: &PoolAddr) -> Result<bool, PoolError> {
let addr = StaticPoolAddr::from(*addr); let addr = StaticPoolAddr::from(*addr);
self.validate_addr(&addr)?; self.validate_addr(&addr)?;
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
@@ -630,7 +626,7 @@ mod alloc_mod {
Ok(true) Ok(true)
} }
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> { fn len_of_data(&self, addr: &PoolAddr) -> Result<usize, PoolError> {
let addr = StaticPoolAddr::from(*addr); let addr = StaticPoolAddr::from(*addr);
self.validate_addr(&addr)?; self.validate_addr(&addr)?;
let pool_idx = addr.pool_idx as usize; let pool_idx = addr.pool_idx as usize;
@@ -644,11 +640,11 @@ mod alloc_mod {
} }
impl PoolProviderWithGuards for StaticMemoryPool { impl PoolProviderWithGuards for StaticMemoryPool {
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self> { fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self> {
PoolRwGuard::new(self, addr) PoolRwGuard::new(self, addr)
} }
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self> { fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self> {
PoolGuard::new(self, addr) PoolGuard::new(self, addr)
} }
} }
@@ -657,8 +653,8 @@ mod alloc_mod {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::pool::{ use crate::pool::{
PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticMemoryPool, PoolError, PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticMemoryPool,
StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError, POOL_MAX_SIZE, StaticPoolAddr, StaticPoolConfig, StoreIdError, POOL_MAX_SIZE,
}; };
use std::vec; use std::vec;
@@ -782,7 +778,7 @@ mod tests {
let res = local_pool.free_element(8, |_| {}); let res = local_pool.free_element(8, |_| {});
assert!(res.is_err()); assert!(res.is_err());
let err = res.unwrap_err(); let err = res.unwrap_err();
assert_eq!(err, StoreError::StoreFull(1)); assert_eq!(err, PoolError::StoreFull(1));
// Verify that the two deletions are successful // Verify that the two deletions are successful
assert!(local_pool.delete(addr0).is_ok()); assert!(local_pool.delete(addr0).is_ok());
@@ -804,7 +800,7 @@ mod tests {
assert!(res.is_err()); assert!(res.is_err());
assert!(matches!( assert!(matches!(
res.unwrap_err(), res.unwrap_err(),
StoreError::DataDoesNotExist { .. } PoolError::DataDoesNotExist { .. }
)); ));
} }
@@ -817,8 +813,8 @@ mod tests {
let res = local_pool.add(&test_buf); let res = local_pool.add(&test_buf);
assert!(res.is_err()); assert!(res.is_err());
let err = res.unwrap_err(); let err = res.unwrap_err();
assert!(matches!(err, StoreError::StoreFull { .. })); assert!(matches!(err, PoolError::StoreFull { .. }));
if let StoreError::StoreFull(subpool) = err { if let PoolError::StoreFull(subpool) = err {
assert_eq!(subpool, 2); assert_eq!(subpool, 2);
} }
} }
@@ -836,7 +832,7 @@ mod tests {
let err = res.unwrap_err(); let err = res.unwrap_err();
assert!(matches!( assert!(matches!(
err, err,
StoreError::InvalidStoreId(StoreIdError::InvalidSubpool(3), Some(_)) PoolError::InvalidStoreId(StoreIdError::InvalidSubpool(3), Some(_))
)); ));
} }
@@ -853,7 +849,7 @@ mod tests {
let err = res.unwrap_err(); let err = res.unwrap_err();
assert!(matches!( assert!(matches!(
err, err,
StoreError::InvalidStoreId(StoreIdError::InvalidPacketIdx(1), Some(_)) PoolError::InvalidStoreId(StoreIdError::InvalidPacketIdx(1), Some(_))
)); ));
} }
@@ -864,7 +860,7 @@ mod tests {
let res = local_pool.add(&data_too_large); let res = local_pool.add(&data_too_large);
assert!(res.is_err()); assert!(res.is_err());
let err = res.unwrap_err(); let err = res.unwrap_err();
assert_eq!(err, StoreError::DataTooLarge(20)); assert_eq!(err, PoolError::DataTooLarge(20));
} }
#[test] #[test]
@@ -872,10 +868,7 @@ mod tests {
let mut local_pool = basic_small_pool(); let mut local_pool = basic_small_pool();
let res = local_pool.free_element(POOL_MAX_SIZE + 1, |_| {}); let res = local_pool.free_element(POOL_MAX_SIZE + 1, |_| {});
assert!(res.is_err()); assert!(res.is_err());
assert_eq!( assert_eq!(res.unwrap_err(), PoolError::DataTooLarge(POOL_MAX_SIZE + 1));
res.unwrap_err(),
StoreError::DataTooLarge(POOL_MAX_SIZE + 1)
);
} }
#[test] #[test]
@@ -884,7 +877,7 @@ mod tests {
// Try to request a slot which is too large // Try to request a slot which is too large
let res = local_pool.free_element(20, |_| {}); let res = local_pool.free_element(20, |_| {});
assert!(res.is_err()); assert!(res.is_err());
assert_eq!(res.unwrap_err(), StoreError::DataTooLarge(20)); assert_eq!(res.unwrap_err(), PoolError::DataTooLarge(20));
} }
#[test] #[test]
@@ -1004,7 +997,7 @@ mod tests {
let should_fail = local_pool.free_element(8, |_| {}); let should_fail = local_pool.free_element(8, |_| {});
assert!(should_fail.is_err()); assert!(should_fail.is_err());
if let Err(err) = should_fail { if let Err(err) = should_fail {
assert_eq!(err, StoreError::StoreFull(1)); assert_eq!(err, PoolError::StoreFull(1));
} else { } else {
panic!("unexpected store address"); panic!("unexpected store address");
} }
@@ -1035,7 +1028,7 @@ mod tests {
let should_fail = local_pool.free_element(8, |_| {}); let should_fail = local_pool.free_element(8, |_| {});
assert!(should_fail.is_err()); assert!(should_fail.is_err());
if let Err(err) = should_fail { if let Err(err) = should_fail {
assert_eq!(err, StoreError::StoreFull(2)); assert_eq!(err, PoolError::StoreFull(2));
} else { } else {
panic!("unexpected store address"); panic!("unexpected store address");
} }

View File

@@ -7,11 +7,9 @@ use crate::{
use satrs_shared::res_code::ResultU16; use satrs_shared::res_code::ResultU16;
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub use std_mod::*; pub use std_mod::*;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[allow(unused_imports)] #[allow(unused_imports)]
pub use alloc_mod::*; pub use alloc_mod::*;
@@ -41,31 +39,30 @@ pub enum ActionReplyVariant {
} }
#[derive(Debug, PartialEq, Clone)] #[derive(Debug, PartialEq, Clone)]
pub struct PusActionReply { pub struct ActionReplyPus {
pub action_id: ActionId, pub action_id: ActionId,
pub variant: ActionReplyVariant, pub variant: ActionReplyVariant,
} }
impl PusActionReply { impl ActionReplyPus {
pub fn new(action_id: ActionId, variant: ActionReplyVariant) -> Self { pub fn new(action_id: ActionId, variant: ActionReplyVariant) -> Self {
Self { action_id, variant } Self { action_id, variant }
} }
} }
pub type GenericActionReplyPus = GenericMessage<PusActionReply>; pub type GenericActionReplyPus = GenericMessage<ActionReplyPus>;
impl GenericActionReplyPus { impl GenericActionReplyPus {
pub fn new_action_reply( pub fn new_action_reply(
requestor_info: MessageMetadata, replier_info: MessageMetadata,
action_id: ActionId, action_id: ActionId,
reply: ActionReplyVariant, reply: ActionReplyVariant,
) -> Self { ) -> Self {
Self::new(requestor_info, PusActionReply::new(action_id, reply)) Self::new(replier_info, ActionReplyPus::new(action_id, reply))
} }
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod alloc_mod { pub mod alloc_mod {
use crate::{ use crate::{
action::ActionRequest, action::ActionRequest,
@@ -76,13 +73,13 @@ pub mod alloc_mod {
ComponentId, ComponentId,
}; };
use super::PusActionReply; use super::ActionReplyPus;
/// Helper type definition for a mode handler which can handle mode requests. /// Helper type definition for a mode handler which can handle mode requests.
pub type ActionRequestHandlerInterface<S, R> = pub type ActionRequestHandlerInterface<S, R> =
MessageSenderAndReceiver<PusActionReply, ActionRequest, S, R>; MessageSenderAndReceiver<ActionReplyPus, ActionRequest, S, R>;
impl<S: MessageSender<PusActionReply>, R: MessageReceiver<ActionRequest>> impl<S: MessageSender<ActionReplyPus>, R: MessageReceiver<ActionRequest>>
ActionRequestHandlerInterface<S, R> ActionRequestHandlerInterface<S, R>
{ {
pub fn try_recv_action_request( pub fn try_recv_action_request(
@@ -95,7 +92,7 @@ pub mod alloc_mod {
&self, &self,
request_id: RequestId, request_id: RequestId,
target_id: ComponentId, target_id: ComponentId,
reply: PusActionReply, reply: ActionReplyPus,
) -> Result<(), GenericTargetedMessagingError> { ) -> Result<(), GenericTargetedMessagingError> {
self.send_message(request_id, target_id, reply) self.send_message(request_id, target_id, reply)
} }
@@ -104,14 +101,14 @@ pub mod alloc_mod {
/// Helper type defintion for a mode handler object which can send mode requests and receive /// Helper type defintion for a mode handler object which can send mode requests and receive
/// mode replies. /// mode replies.
pub type ActionRequestorInterface<S, R> = pub type ActionRequestorInterface<S, R> =
MessageSenderAndReceiver<ActionRequest, PusActionReply, S, R>; MessageSenderAndReceiver<ActionRequest, ActionReplyPus, S, R>;
impl<S: MessageSender<ActionRequest>, R: MessageReceiver<PusActionReply>> impl<S: MessageSender<ActionRequest>, R: MessageReceiver<ActionReplyPus>>
ActionRequestorInterface<S, R> ActionRequestorInterface<S, R>
{ {
pub fn try_recv_action_reply( pub fn try_recv_action_reply(
&self, &self,
) -> Result<Option<GenericMessage<PusActionReply>>, GenericTargetedMessagingError> { ) -> Result<Option<GenericMessage<ActionReplyPus>>, GenericTargetedMessagingError> {
self.try_recv_message() self.try_recv_message()
} }
@@ -127,7 +124,6 @@ pub mod alloc_mod {
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod { pub mod std_mod {
use std::sync::mpsc; use std::sync::mpsc;
@@ -179,626 +175,23 @@ pub mod std_mod {
pub type DefaultActiveActionRequestMap = DefaultActiveRequestMap<ActivePusActionRequestStd>; pub type DefaultActiveActionRequestMap = DefaultActiveRequestMap<ActivePusActionRequestStd>;
pub type ActionRequestHandlerMpsc = ActionRequestHandlerInterface< pub type ActionRequestHandlerMpsc = ActionRequestHandlerInterface<
mpsc::Sender<GenericMessage<PusActionReply>>, mpsc::Sender<GenericMessage<ActionReplyPus>>,
mpsc::Receiver<GenericMessage<ActionRequest>>, mpsc::Receiver<GenericMessage<ActionRequest>>,
>; >;
pub type ActionRequestHandlerMpscBounded = ActionRequestHandlerInterface< pub type ActionRequestHandlerMpscBounded = ActionRequestHandlerInterface<
mpsc::SyncSender<GenericMessage<PusActionReply>>, mpsc::SyncSender<GenericMessage<ActionReplyPus>>,
mpsc::Receiver<GenericMessage<ActionRequest>>, mpsc::Receiver<GenericMessage<ActionRequest>>,
>; >;
pub type ActionRequestorMpsc = ActionRequestorInterface< pub type ActionRequestorMpsc = ActionRequestorInterface<
mpsc::Sender<GenericMessage<ActionRequest>>, mpsc::Sender<GenericMessage<ActionRequest>>,
mpsc::Receiver<GenericMessage<PusActionReply>>, mpsc::Receiver<GenericMessage<ActionReplyPus>>,
>; >;
pub type ActionRequestorBoundedMpsc = ActionRequestorInterface< pub type ActionRequestorBoundedMpsc = ActionRequestorInterface<
mpsc::SyncSender<GenericMessage<ActionRequest>>, mpsc::SyncSender<GenericMessage<ActionRequest>>,
mpsc::Receiver<GenericMessage<PusActionReply>>, mpsc::Receiver<GenericMessage<ActionReplyPus>>,
>; >;
/*
pub type ModeRequestorAndHandlerMpsc = ModeInterface<
mpsc::Sender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
mpsc::Sender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
pub type ModeRequestorAndHandlerMpscBounded = ModeInterface<
mpsc::SyncSender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
mpsc::SyncSender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
*/
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {}
/*
use core::{cell::RefCell, time::Duration};
use std::{sync::mpsc, time::SystemTimeError};
use alloc::{collections::VecDeque, vec::Vec};
use delegate::delegate;
use spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcReader},
tm::PusTmReader,
PusPacket,
},
time::{cds, TimeWriter},
CcsdsPacket,
};
use crate::{
action::ActionRequestVariant,
params::{self, ParamsRaw, WritableToBeBytes},
pus::{
tests::{
PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler,
TestConverter, TestRouter, APP_DATA_TOO_SHORT,
},
verification::{
self,
tests::{SharedVerificationMap, TestVerificationReporter, VerificationStatus},
FailParams, TcStateAccepted, TcStateNone, TcStateStarted,
VerificationReportingProvider,
},
EcssTcInMemConverter, EcssTcInVecConverter, EcssTmtcError, GenericRoutingError,
MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError, PusRequestRouter,
PusServiceHelper, PusTcToRequestConverter, TmAsVecSenderWithMpsc,
},
};
use super::*;
impl<Request> PusRequestRouter<Request> for TestRouter<Request> {
type Error = GenericRoutingError;
fn route(
&self,
target_id: TargetId,
request: Request,
_token: VerificationToken<TcStateAccepted>,
) -> Result<(), Self::Error> {
self.routing_requests
.borrow_mut()
.push_back((target_id, request));
self.check_for_injected_error()
}
fn handle_error(
&self,
target_id: TargetId,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
error: Self::Error,
time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) {
self.routing_errors
.borrow_mut()
.push_back((target_id, error));
}
}
impl PusTcToRequestConverter<ActionRequest> for TestConverter<8> {
type Error = PusPacketHandlingError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) -> Result<(TargetId, ActionRequest), Self::Error> {
self.conversion_request.push_back(tc.raw_data().to_vec());
self.check_service(tc)?;
let target_id = tc.apid();
if tc.user_data().len() < 4 {
verif_reporter
.start_failure(
token,
FailParams::new(
time_stamp,
&APP_DATA_TOO_SHORT,
(tc.user_data().len() as u32).to_be_bytes().as_ref(),
),
)
.expect("start success failure");
return Err(PusPacketHandlingError::NotEnoughAppData {
expected: 4,
found: tc.user_data().len(),
});
}
if tc.subservice() == 1 {
verif_reporter
.start_success(token, time_stamp)
.expect("start success failure");
return Ok((
target_id.into(),
ActionRequest {
action_id: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()),
variant: ActionRequestVariant::VecData(tc.user_data()[4..].to_vec()),
},
));
}
Err(PusPacketHandlingError::InvalidAppData(
"unexpected app data".into(),
))
}
}
pub struct PusDynRequestHandler<const SERVICE: u8, Request> {
srv_helper: PusServiceHelper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
TestVerificationReporter,
>,
request_converter: TestConverter<SERVICE>,
request_router: TestRouter<Request>,
}
struct Pus8RequestTestbenchWithVec {
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>,
handler: PusDynRequestHandler<8, ActionRequest>,
}
impl Pus8RequestTestbenchWithVec {
pub fn new() -> Self {
let (common, srv_helper) = PusServiceHandlerWithVecCommon::new_with_test_verif_sender();
Self {
common,
handler: PusDynRequestHandler {
srv_helper,
request_converter: TestConverter::default(),
request_router: TestRouter::default(),
},
}
}
delegate! {
to self.handler.request_converter {
pub fn check_next_conversion(&mut self, tc: &PusTcCreator);
}
}
delegate! {
to self.handler.request_router {
pub fn retrieve_next_request(&mut self) -> (TargetId, ActionRequest);
}
}
delegate! {
to self.handler.request_router {
pub fn retrieve_next_routing_error(&mut self) -> (TargetId, GenericRoutingError);
}
}
}
impl PusTestHarness for Pus8RequestTestbenchWithVec {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(
&self,
subservice: u8,
expected_request_id: verification::RequestId,
);
}
}
}
impl SimplePusPacketHandler for Pus8RequestTestbenchWithVec {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.handler.srv_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.handler
.srv_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let time_stamp = cds::TimeProvider::from_now_with_u16_days()
.expect("timestamp generation failed")
.to_vec()
.unwrap();
let (target_id, action_request) = self.handler.request_converter.convert(
ecss_tc_and_token.token,
&tc,
&time_stamp,
&self.handler.srv_helper.common.verification_handler,
)?;
if let Err(e) = self.handler.request_router.route(
target_id,
action_request,
ecss_tc_and_token.token,
) {
self.handler.request_router.handle_error(
target_id,
ecss_tc_and_token.token,
&tc,
e.clone(),
&time_stamp,
&self.handler.srv_helper.common.verification_handler,
);
return Err(e.into());
}
Ok(PusPacketHandlerResult::RequestHandled)
}
}
const TIMEOUT_ERROR_CODE: ResultU16 = ResultU16::new(1, 2);
const COMPLETION_ERROR_CODE: ResultU16 = ResultU16::new(2, 0);
const COMPLETION_ERROR_CODE_STEP: ResultU16 = ResultU16::new(2, 1);
#[derive(Default)]
pub struct TestReplyHandlerHook {
pub unexpected_replies: VecDeque<GenericActionReplyPus>,
pub timeouts: RefCell<VecDeque<ActivePusActionRequest>>,
}
impl ReplyHandlerHook<ActivePusActionRequest, ActionReplyPusWithActionId> for TestReplyHandlerHook {
fn handle_unexpected_reply(&mut self, reply: &GenericActionReplyPus) {
self.unexpected_replies.push_back(reply.clone());
}
fn timeout_callback(&self, active_request: &ActivePusActionRequest) {
self.timeouts.borrow_mut().push_back(active_request.clone());
}
fn timeout_error_code(&self) -> ResultU16 {
TIMEOUT_ERROR_CODE
}
}
pub struct Pus8ReplyTestbench {
verif_reporter: TestVerificationReporter,
#[allow(dead_code)]
ecss_tm_receiver: mpsc::Receiver<Vec<u8>>,
handler: PusService8ReplyHandler<
TestVerificationReporter,
DefaultActiveActionRequestMap,
TestReplyHandlerHook,
mpsc::Sender<Vec<u8>>,
>,
}
impl Pus8ReplyTestbench {
pub fn new(normal_ctor: bool) -> Self {
let reply_handler_hook = TestReplyHandlerHook::default();
let shared_verif_map = SharedVerificationMap::default();
let test_verif_reporter = TestVerificationReporter::new(shared_verif_map.clone());
let (ecss_tm_sender, ecss_tm_receiver) = mpsc::channel();
let reply_handler = if normal_ctor {
PusService8ReplyHandler::new_from_now_with_default_map(
test_verif_reporter.clone(),
128,
reply_handler_hook,
ecss_tm_sender,
)
.expect("creating reply handler failed")
} else {
PusService8ReplyHandler::new_from_now(
test_verif_reporter.clone(),
DefaultActiveActionRequestMap::default(),
128,
reply_handler_hook,
ecss_tm_sender,
)
.expect("creating reply handler failed")
};
Self {
verif_reporter: test_verif_reporter,
ecss_tm_receiver,
handler: reply_handler,
}
}
pub fn init_handling_for_request(
&mut self,
request_id: RequestId,
_action_id: ActionId,
) -> VerificationToken<TcStateStarted> {
assert!(!self.handler.request_active(request_id));
// let action_req = ActionRequest::new(action_id, ActionRequestVariant::NoData);
let token = self.add_tc_with_req_id(request_id.into());
let token = self
.verif_reporter
.acceptance_success(token, &[])
.expect("acceptance success failure");
let token = self
.verif_reporter
.start_success(token, &[])
.expect("start success failure");
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
assert!(verif_info.started.expect("request was not started"));
assert!(verif_info.accepted.expect("request was not accepted"));
token
}
pub fn next_unrequested_reply(&self) -> Option<GenericActionReplyPus> {
self.handler.user_hook.unexpected_replies.front().cloned()
}
pub fn assert_request_completion_success(&self, step: Option<u16>, request_id: RequestId) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
self.assert_request_completion_common(request_id, &verif_info, step, true);
}
pub fn assert_request_completion_failure(
&self,
step: Option<u16>,
request_id: RequestId,
fail_enum: ResultU16,
fail_data: &[u8],
) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
self.assert_request_completion_common(request_id, &verif_info, step, false);
assert_eq!(verif_info.fail_enum.unwrap(), fail_enum.raw() as u64);
assert_eq!(verif_info.failure_data.unwrap(), fail_data);
}
pub fn assert_request_completion_common(
&self,
request_id: RequestId,
verif_info: &VerificationStatus,
step: Option<u16>,
completion_success: bool,
) {
if let Some(step) = step {
assert!(verif_info.step_status.is_some());
assert!(verif_info.step_status.unwrap());
assert_eq!(step, verif_info.step);
}
assert_eq!(
verif_info.completed.expect("request is not completed"),
completion_success
);
assert!(!self.handler.request_active(request_id));
}
pub fn assert_request_step_failure(&self, step: u16, request_id: RequestId) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
assert!(verif_info.step_status.is_some());
assert!(!verif_info.step_status.unwrap());
assert_eq!(step, verif_info.step);
}
pub fn add_routed_request(
&mut self,
request_id: verification::RequestId,
target_id: TargetId,
action_id: ActionId,
token: VerificationToken<TcStateStarted>,
timeout: Duration,
) {
if self.handler.request_active(request_id.into()) {
panic!("request already present");
}
self.handler
.add_routed_action_request(request_id, target_id, action_id, token, timeout);
if !self.handler.request_active(request_id.into()) {
panic!("request should be active now");
}
}
delegate! {
to self.handler {
pub fn request_active(&self, request_id: RequestId) -> bool;
pub fn handle_action_reply(
&mut self,
action_reply_with_ids: GenericMessage<ActionReplyPusWithActionId>,
time_stamp: &[u8]
) -> Result<(), EcssTmtcError>;
pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError>;
pub fn check_for_timeouts(&mut self, time_stamp: &[u8]) -> Result<(), EcssTmtcError>;
}
to self.verif_reporter {
fn add_tc_with_req_id(&mut self, req_id: verification::RequestId) -> VerificationToken<TcStateNone>;
}
}
}
#[test]
fn test_reply_handler_completion_success() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let sender_id = 0x06;
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
assert!(reply_testbench.request_active(request_id));
let action_reply = GenericMessage::new(
request_id,
sender_id,
ActionReplyPusWithActionId {
action_id,
variant: ActionReplyPus::Completed,
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_completion_success(None, request_id);
}
#[test]
fn test_reply_handler_step_success() {
let mut reply_testbench = Pus8ReplyTestbench::new(false);
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
action_id,
action_id,
ActionReplyPus::StepSuccess { step: 1 },
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
action_id,
action_id,
ActionReplyPus::Completed,
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_completion_success(Some(1), request_id);
}
#[test]
fn test_reply_handler_completion_failure() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let sender_id = 0x01;
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let params_raw = ParamsRaw::U32(params::U32(5));
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::CompletionFailed {
error_code: COMPLETION_ERROR_CODE,
params: params_raw.into(),
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_completion_failure(
None,
request_id,
COMPLETION_ERROR_CODE,
&params_raw.to_vec().unwrap(),
);
}
#[test]
fn test_reply_handler_step_failure() {
let mut reply_testbench = Pus8ReplyTestbench::new(false);
let sender_id = 0x01;
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::StepFailed {
error_code: COMPLETION_ERROR_CODE_STEP,
step: 2,
params: ParamsRaw::U32(crate::params::U32(5)).into(),
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_step_failure(2, request_id);
}
#[test]
fn test_reply_handler_timeout_handling() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let request_id = 0x02;
let target_id = 0x06;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let timeout_param = Duration::from_millis(1).as_millis() as u64;
let timeout_param_raw = timeout_param.to_be_bytes();
std::thread::sleep(Duration::from_millis(2));
reply_testbench
.update_time_from_now()
.expect("time update failure");
reply_testbench.check_for_timeouts(&[]).unwrap();
reply_testbench.assert_request_completion_failure(
None,
request_id,
TIMEOUT_ERROR_CODE,
&timeout_param_raw,
);
}
#[test]
fn test_unrequested_reply() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let sender_id = 0x01;
let request_id = 0x02;
let action_id = 0x03;
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::Completed,
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
let reply = reply_testbench.next_unrequested_reply();
assert!(reply.is_some());
let reply = reply.unwrap();
assert_eq!(reply.message.action_id, action_id);
assert_eq!(reply.request_id, request_id);
assert_eq!(reply.message.variant, ActionReplyPus::Completed);
}
*/
}

View File

@@ -1,13 +1,13 @@
use crate::pus::{source_buffer_large_enough, EcssTmtcError}; use crate::pus::source_buffer_large_enough;
use spacepackets::ecss::tm::PusTmCreator; use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::tm::PusTmSecondaryHeader; use spacepackets::ecss::tm::PusTmSecondaryHeader;
use spacepackets::ecss::{EcssEnumeration, PusError}; use spacepackets::ecss::EcssEnumeration;
use spacepackets::ByteConversionError; use spacepackets::ByteConversionError;
use spacepackets::{SpHeader, MAX_APID}; use spacepackets::{SpHeader, MAX_APID};
use crate::pus::EcssTmSenderCore;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub use alloc_mod::EventReporter; pub use alloc_mod::*;
pub use spacepackets::ecss::event::*; pub use spacepackets::ecss::event::*;
pub struct EventReportCreator { pub struct EventReportCreator {
@@ -16,117 +16,112 @@ pub struct EventReportCreator {
} }
impl EventReportCreator { impl EventReportCreator {
pub fn new(apid: u16) -> Option<Self> { pub fn new(apid: u16, dest_id: u16) -> Option<Self> {
if apid > MAX_APID { if apid > MAX_APID {
return None; return None;
} }
Some(Self { Some(Self { dest_id, apid })
// msg_count: 0,
dest_id: 0,
apid,
})
} }
pub fn event_info<'time, 'src_data>( pub fn event_info<'time, 'src_data>(
&self, &self,
src_data_buf: &'src_data mut [u8],
time_stamp: &'time [u8], time_stamp: &'time [u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&'src_data [u8]>, params: Option<&'src_data [u8]>,
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> { ) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm( self.generate_and_send_generic_tm(
src_data_buf,
Subservice::TmInfoReport, Subservice::TmInfoReport,
time_stamp, time_stamp,
event_id, event_id,
aux_data, params,
src_data_buf,
) )
} }
pub fn event_low_severity<'time, 'src_data>( pub fn event_low_severity<'time, 'src_data>(
&self, &self,
src_data_buf: &'src_data mut [u8],
time_stamp: &'time [u8], time_stamp: &'time [u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&'src_data [u8]>, params: Option<&'src_data [u8]>,
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> { ) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm( self.generate_and_send_generic_tm(
src_data_buf,
Subservice::TmLowSeverityReport, Subservice::TmLowSeverityReport,
time_stamp, time_stamp,
event_id, event_id,
aux_data, params,
src_data_buf,
) )
} }
pub fn event_medium_severity<'time, 'src_data>( pub fn event_medium_severity<'time, 'src_data>(
&self, &self,
buf: &'src_data mut [u8],
time_stamp: &'time [u8], time_stamp: &'time [u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&'src_data [u8]>, params: Option<&'src_data [u8]>,
buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> { ) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm( self.generate_and_send_generic_tm(
buf,
Subservice::TmMediumSeverityReport, Subservice::TmMediumSeverityReport,
time_stamp, time_stamp,
event_id, event_id,
aux_data, params,
buf,
) )
} }
pub fn event_high_severity<'time, 'src_data>( pub fn event_high_severity<'time, 'src_data>(
&self, &self,
src_data_buf: &'src_data mut [u8],
time_stamp: &'time [u8], time_stamp: &'time [u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&'src_data [u8]>, params: Option<&'src_data [u8]>,
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> { ) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm( self.generate_and_send_generic_tm(
src_data_buf,
Subservice::TmHighSeverityReport, Subservice::TmHighSeverityReport,
time_stamp, time_stamp,
event_id, event_id,
aux_data, params,
src_data_buf,
) )
} }
fn generate_and_send_generic_tm<'time, 'src_data>( fn generate_and_send_generic_tm<'time, 'src_data>(
&self, &self,
src_data_buf: &'src_data mut [u8],
subservice: Subservice, subservice: Subservice,
time_stamp: &'time [u8], time_stamp: &'time [u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&'src_data [u8]>, params: Option<&'src_data [u8]>,
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> { ) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_generic_event_tm(src_data_buf, subservice, time_stamp, event_id, aux_data) self.generate_generic_event_tm(subservice, time_stamp, event_id, params, src_data_buf)
} }
fn generate_generic_event_tm<'time, 'src_data>( fn generate_generic_event_tm<'time, 'src_data>(
&self, &self,
src_data_buf: &'src_data mut [u8],
subservice: Subservice, subservice: Subservice,
time_stamp: &'time [u8], time_stamp: &'time [u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&'src_data [u8]>, params: Option<&'src_data [u8]>,
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> { ) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
let mut src_data_len = event_id.size(); let mut src_data_len = event_id.size();
if let Some(aux_data) = aux_data { if let Some(aux_data) = params {
src_data_len += aux_data.len(); src_data_len += aux_data.len();
} }
source_buffer_large_enough(src_data_buf.len(), src_data_len)?; source_buffer_large_enough(src_data_buf.len(), src_data_len)?;
let mut sp_header = SpHeader::tm_unseg(self.apid, 0, 0).unwrap();
let sec_header = let sec_header =
PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, Some(time_stamp)); PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, time_stamp);
let mut current_idx = 0; let mut current_idx = 0;
event_id.write_to_be_bytes(&mut src_data_buf[0..event_id.size()])?; event_id.write_to_be_bytes(&mut src_data_buf[0..event_id.size()])?;
current_idx += event_id.size(); current_idx += event_id.size();
if let Some(aux_data) = aux_data { if let Some(aux_data) = params {
src_data_buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data); src_data_buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data);
current_idx += aux_data.len(); current_idx += aux_data.len();
} }
Ok(PusTmCreator::new( Ok(PusTmCreator::new(
&mut sp_header, SpHeader::new_from_apid(self.apid),
sec_header, sec_header,
&src_data_buf[0..current_idx], &src_data_buf[0..current_idx],
true, true,
@@ -137,93 +132,130 @@ impl EventReportCreator {
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
mod alloc_mod { mod alloc_mod {
use super::*; use super::*;
use crate::pus::{EcssTmSender, EcssTmtcError};
use crate::ComponentId; use crate::ComponentId;
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
use core::cell::RefCell; use core::cell::RefCell;
use spacepackets::ecss::PusError;
pub struct EventReporter { pub trait EventTmHookProvider {
fn modify_tm(&self, tm: &mut PusTmCreator);
}
#[derive(Default)]
pub struct DummyEventHook {}
impl EventTmHookProvider for DummyEventHook {
fn modify_tm(&self, _tm: &mut PusTmCreator) {}
}
pub struct EventReporter<EventTmHook: EventTmHookProvider = DummyEventHook> {
id: ComponentId, id: ComponentId,
// Use interior mutability pattern here. This is just an intermediate buffer to the PUS event packet // Use interior mutability pattern here. This is just an intermediate buffer to the PUS event packet
// generation. // generation.
source_data_buf: RefCell<Vec<u8>>, source_data_buf: RefCell<Vec<u8>>,
pub report_creator: EventReportCreator, pub report_creator: EventReportCreator,
pub tm_hook: EventTmHook,
} }
impl EventReporter { impl EventReporter<DummyEventHook> {
pub fn new( pub fn new(
id: ComponentId, id: ComponentId,
apid: u16, default_apid: u16,
default_dest_id: u16,
max_event_id_and_aux_data_size: usize, max_event_id_and_aux_data_size: usize,
) -> Option<Self> { ) -> Option<Self> {
let reporter = EventReportCreator::new(apid)?; let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
Some(Self { Some(Self {
id, id,
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]), source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
report_creator: reporter, report_creator: reporter,
tm_hook: DummyEventHook::default(),
})
}
}
impl<EventTmHook: EventTmHookProvider> EventReporter<EventTmHook> {
pub fn new_with_hook(
id: ComponentId,
default_apid: u16,
default_dest_id: u16,
max_event_id_and_aux_data_size: usize,
tm_hook: EventTmHook,
) -> Option<Self> {
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
Some(Self {
id,
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
report_creator: reporter,
tm_hook,
}) })
} }
pub fn event_info( pub fn event_info(
&self, &self,
sender: &(impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
let mut mut_buf = self.source_data_buf.borrow_mut(); let mut mut_buf = self.source_data_buf.borrow_mut();
let tm_creator = self let mut tm_creator = self
.report_creator .report_creator
.event_info(mut_buf.as_mut_slice(), time_stamp, event_id, aux_data) .event_info(time_stamp, event_id, params, mut_buf.as_mut_slice())
.map_err(PusError::ByteConversion)?; .map_err(PusError::ByteConversion)?;
self.tm_hook.modify_tm(&mut tm_creator);
sender.send_tm(self.id, tm_creator.into())?; sender.send_tm(self.id, tm_creator.into())?;
Ok(()) Ok(())
} }
pub fn event_low_severity( pub fn event_low_severity(
&self, &self,
sender: &(impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
let mut mut_buf = self.source_data_buf.borrow_mut(); let mut mut_buf = self.source_data_buf.borrow_mut();
let tm_creator = self let mut tm_creator = self
.report_creator .report_creator
.event_low_severity(mut_buf.as_mut_slice(), time_stamp, event_id, aux_data) .event_low_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
.map_err(PusError::ByteConversion)?; .map_err(PusError::ByteConversion)?;
self.tm_hook.modify_tm(&mut tm_creator);
sender.send_tm(self.id, tm_creator.into())?; sender.send_tm(self.id, tm_creator.into())?;
Ok(()) Ok(())
} }
pub fn event_medium_severity( pub fn event_medium_severity(
&self, &self,
sender: &(impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
let mut mut_buf = self.source_data_buf.borrow_mut(); let mut mut_buf = self.source_data_buf.borrow_mut();
let tm_creator = self let mut tm_creator = self
.report_creator .report_creator
.event_medium_severity(mut_buf.as_mut_slice(), time_stamp, event_id, aux_data) .event_medium_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
.map_err(PusError::ByteConversion)?; .map_err(PusError::ByteConversion)?;
self.tm_hook.modify_tm(&mut tm_creator);
sender.send_tm(self.id, tm_creator.into())?; sender.send_tm(self.id, tm_creator.into())?;
Ok(()) Ok(())
} }
pub fn event_high_severity( pub fn event_high_severity(
&self, &self,
sender: &(impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
let mut mut_buf = self.source_data_buf.borrow_mut(); let mut mut_buf = self.source_data_buf.borrow_mut();
let tm_creator = self let mut tm_creator = self
.report_creator .report_creator
.event_high_severity(mut_buf.as_mut_slice(), time_stamp, event_id, aux_data) .event_high_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
.map_err(PusError::ByteConversion)?; .map_err(PusError::ByteConversion)?;
self.tm_hook.modify_tm(&mut tm_creator);
sender.send_tm(self.id, tm_creator.into())?; sender.send_tm(self.id, tm_creator.into())?;
Ok(()) Ok(())
} }
@@ -234,10 +266,11 @@ mod alloc_mod {
mod tests { mod tests {
use super::*; use super::*;
use crate::events::{EventU32, Severity}; use crate::events::{EventU32, Severity};
use crate::pus::test_util::TEST_COMPONENT_ID; use crate::pus::test_util::TEST_COMPONENT_ID_0;
use crate::pus::tests::CommonTmInfo; use crate::pus::tests::CommonTmInfo;
use crate::pus::{ChannelWithId, PusTmVariant}; use crate::pus::{ChannelWithId, EcssTmSender, EcssTmtcError, PusTmVariant};
use crate::ComponentId; use crate::ComponentId;
use spacepackets::ecss::PusError;
use spacepackets::ByteConversionError; use spacepackets::ByteConversionError;
use std::cell::RefCell; use std::cell::RefCell;
use std::collections::VecDeque; use std::collections::VecDeque;
@@ -268,7 +301,7 @@ mod tests {
} }
} }
impl EcssTmSenderCore for TestSender { impl EcssTmSender for TestSender {
fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> { fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
match tm { match tm {
PusTmVariant::InStore(_) => { PusTmVariant::InStore(_) => {
@@ -298,10 +331,10 @@ mod tests {
fn severity_to_subservice(severity: Severity) -> Subservice { fn severity_to_subservice(severity: Severity) -> Subservice {
match severity { match severity {
Severity::INFO => Subservice::TmInfoReport, Severity::Info => Subservice::TmInfoReport,
Severity::LOW => Subservice::TmLowSeverityReport, Severity::Low => Subservice::TmLowSeverityReport,
Severity::MEDIUM => Subservice::TmMediumSeverityReport, Severity::Medium => Subservice::TmMediumSeverityReport,
Severity::HIGH => Subservice::TmHighSeverityReport, Severity::High => Subservice::TmHighSeverityReport,
} }
} }
@@ -314,22 +347,22 @@ mod tests {
aux_data: Option<&[u8]>, aux_data: Option<&[u8]>,
) { ) {
match severity { match severity {
Severity::INFO => { Severity::Info => {
reporter reporter
.event_info(sender, time_stamp, event, aux_data) .event_info(sender, time_stamp, event, aux_data)
.expect("Error reporting info event"); .expect("Error reporting info event");
} }
Severity::LOW => { Severity::Low => {
reporter reporter
.event_low_severity(sender, time_stamp, event, aux_data) .event_low_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting low event"); .expect("Error reporting low event");
} }
Severity::MEDIUM => { Severity::Medium => {
reporter reporter
.event_medium_severity(sender, time_stamp, event, aux_data) .event_medium_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting medium event"); .expect("Error reporting medium event");
} }
Severity::HIGH => { Severity::High => {
reporter reporter
.event_high_severity(sender, time_stamp, event, aux_data) .event_high_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting high event"); .expect("Error reporting high event");
@@ -343,8 +376,12 @@ mod tests {
error_data: Option<&[u8]>, error_data: Option<&[u8]>,
) { ) {
let mut sender = TestSender::default(); let mut sender = TestSender::default();
let reporter = let reporter = EventReporter::new(
EventReporter::new(TEST_COMPONENT_ID.id(), EXAMPLE_APID, max_event_aux_data_buf); TEST_COMPONENT_ID_0.id(),
EXAMPLE_APID,
0,
max_event_aux_data_buf,
);
assert!(reporter.is_some()); assert!(reporter.is_some());
let mut reporter = reporter.unwrap(); let mut reporter = reporter.unwrap();
let time_stamp_empty: [u8; 7] = [0; 7]; let time_stamp_empty: [u8; 7] = [0; 7];
@@ -352,7 +389,7 @@ mod tests {
if let Some(err_data) = error_data { if let Some(err_data) = error_data {
error_copy.extend_from_slice(err_data); error_copy.extend_from_slice(err_data);
} }
let event = EventU32::new(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0) let event = EventU32::new_checked(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event"); .expect("Error creating example event");
report_basic_event( report_basic_event(
&mut reporter, &mut reporter,
@@ -370,45 +407,45 @@ mod tests {
severity_to_subservice(severity) as u8 severity_to_subservice(severity) as u8
); );
assert_eq!(tm_info.common.dest_id, 0); assert_eq!(tm_info.common.dest_id, 0);
assert_eq!(tm_info.common.time_stamp, time_stamp_empty); assert_eq!(tm_info.common.timestamp, time_stamp_empty);
assert_eq!(tm_info.common.msg_counter, 0); assert_eq!(tm_info.common.msg_counter, 0);
assert_eq!(tm_info.common.apid, EXAMPLE_APID); assert_eq!(tm_info.common.apid, EXAMPLE_APID);
assert_eq!(tm_info.event, event); assert_eq!(tm_info.event, event);
assert_eq!(tm_info.sender_id, TEST_COMPONENT_ID.id()); assert_eq!(tm_info.sender_id, TEST_COMPONENT_ID_0.id());
assert_eq!(tm_info.aux_data, error_copy); assert_eq!(tm_info.aux_data, error_copy);
} }
#[test] #[test]
fn basic_info_event_generation() { fn basic_info_event_generation() {
basic_event_test(4, Severity::INFO, None); basic_event_test(4, Severity::Info, None);
} }
#[test] #[test]
fn basic_low_severity_event() { fn basic_low_severity_event() {
basic_event_test(4, Severity::LOW, None); basic_event_test(4, Severity::Low, None);
} }
#[test] #[test]
fn basic_medium_severity_event() { fn basic_medium_severity_event() {
basic_event_test(4, Severity::MEDIUM, None); basic_event_test(4, Severity::Medium, None);
} }
#[test] #[test]
fn basic_high_severity_event() { fn basic_high_severity_event() {
basic_event_test(4, Severity::HIGH, None); basic_event_test(4, Severity::High, None);
} }
#[test] #[test]
fn event_with_info_string() { fn event_with_info_string() {
let info_string = "Test Information"; let info_string = "Test Information";
basic_event_test(32, Severity::INFO, Some(info_string.as_bytes())); basic_event_test(32, Severity::Info, Some(info_string.as_bytes()));
} }
#[test] #[test]
fn low_severity_with_raw_err_data() { fn low_severity_with_raw_err_data() {
let raw_err_param: i32 = -1; let raw_err_param: i32 = -1;
let raw_err = raw_err_param.to_be_bytes(); let raw_err = raw_err_param.to_be_bytes();
basic_event_test(8, Severity::LOW, Some(&raw_err)) basic_event_test(8, Severity::Low, Some(&raw_err))
} }
fn check_buf_too_small( fn check_buf_too_small(
@@ -417,7 +454,7 @@ mod tests {
expected_found_len: usize, expected_found_len: usize,
) { ) {
let time_stamp_empty: [u8; 7] = [0; 7]; let time_stamp_empty: [u8; 7] = [0; 7];
let event = EventU32::new(Severity::INFO, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0) let event = EventU32::new_checked(Severity::Info, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event"); .expect("Error creating example event");
let err = reporter.event_info(sender, &time_stamp_empty, event, None); let err = reporter.event_info(sender, &time_stamp_empty, event, None);
assert!(err.is_err()); assert!(err.is_err());
@@ -437,7 +474,7 @@ mod tests {
fn insufficient_buffer() { fn insufficient_buffer() {
let mut sender = TestSender::default(); let mut sender = TestSender::default();
for i in 0..3 { for i in 0..3 {
let reporter = EventReporter::new(0, EXAMPLE_APID, i); let reporter = EventReporter::new(0, EXAMPLE_APID, 0, i);
assert!(reporter.is_some()); assert!(reporter.is_some());
let mut reporter = reporter.unwrap(); let mut reporter = reporter.unwrap();
check_buf_too_small(&mut reporter, &mut sender, i); check_buf_too_small(&mut reporter, &mut sender, i);

View File

@@ -10,13 +10,11 @@ use hashbrown::HashSet;
pub use crate::pus::event::EventReporter; pub use crate::pus::event::EventReporter;
use crate::pus::verification::TcStateToken; use crate::pus::verification::TcStateToken;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use crate::pus::EcssTmSenderCore; use crate::pus::EcssTmSender;
use crate::pus::EcssTmtcError; use crate::pus::EcssTmtcError;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*; pub use alloc_mod::*;
#[cfg(feature = "heapless")] #[cfg(feature = "heapless")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
pub use heapless_mod::*; pub use heapless_mod::*;
/// This trait allows the PUS event manager implementation to stay generic over various types /// This trait allows the PUS event manager implementation to stay generic over various types
@@ -30,7 +28,7 @@ pub use heapless_mod::*;
/// structure to track disabled events. A more primitive and embedded friendly /// structure to track disabled events. A more primitive and embedded friendly
/// solution could track this information in a static or pre-allocated list which contains /// solution could track this information in a static or pre-allocated list which contains
/// the disabled events. /// the disabled events.
pub trait PusEventMgmtBackendProvider<Event: GenericEvent> { pub trait PusEventReportingMapProvider<Event: GenericEvent> {
type Error; type Error;
fn event_enabled(&self, event: &Event) -> bool; fn event_enabled(&self, event: &Event) -> bool;
@@ -44,7 +42,6 @@ pub mod heapless_mod {
use crate::events::LargestEventRaw; use crate::events::LargestEventRaw;
use core::marker::PhantomData; use core::marker::PhantomData;
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using // TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
// regular Event type again. // regular Event type again.
#[derive(Default)] #[derive(Default)]
@@ -53,13 +50,7 @@ pub mod heapless_mod {
phantom: PhantomData<Provider>, phantom: PhantomData<Provider>,
} }
/// Safety: All contained field are [Send] as well impl<const N: usize, Provider: GenericEvent> PusEventReportingMapProvider<Provider>
unsafe impl<const N: usize, Event: GenericEvent + Send> Send
for HeaplessPusMgmtBackendProvider<N, Event>
{
}
impl<const N: usize, Provider: GenericEvent> PusEventMgmtBackendProvider<Provider>
for HeaplessPusMgmtBackendProvider<N, Provider> for HeaplessPusMgmtBackendProvider<N, Provider>
{ {
type Error = (); type Error = ();
@@ -108,20 +99,24 @@ impl From<EcssTmtcError> for EventManError {
pub mod alloc_mod { pub mod alloc_mod {
use core::marker::PhantomData; use core::marker::PhantomData;
use crate::events::EventU16; use crate::{
events::EventU16,
params::{Params, WritableToBeBytes},
pus::event::{DummyEventHook, EventTmHookProvider},
};
use super::*; use super::*;
/// Default backend provider which uses a hash set as the event reporting status container /// Default backend provider which uses a hash set as the event reporting status container
/// like mentioned in the example of the [PusEventMgmtBackendProvider] documentation. /// like mentioned in the example of the [PusEventReportingMapProvider] documentation.
/// ///
/// This provider is a good option for host systems or larger embedded systems where /// This provider is a good option for host systems or larger embedded systems where
/// the expected occasional memory allocation performed by the [HashSet] is not an issue. /// the expected occasional memory allocation performed by the [HashSet] is not an issue.
pub struct DefaultPusEventMgmtBackend<Event: GenericEvent = EventU32> { pub struct DefaultPusEventReportingMap<Event: GenericEvent = EventU32> {
disabled: HashSet<Event>, disabled: HashSet<Event>,
} }
impl<Event: GenericEvent> Default for DefaultPusEventMgmtBackend<Event> { impl<Event: GenericEvent> Default for DefaultPusEventReportingMap<Event> {
fn default() -> Self { fn default() -> Self {
Self { Self {
disabled: HashSet::default(), disabled: HashSet::default(),
@@ -129,118 +124,176 @@ pub mod alloc_mod {
} }
} }
impl<EV: GenericEvent + PartialEq + Eq + Hash + Copy + Clone> PusEventMgmtBackendProvider<EV> impl<Event: GenericEvent + PartialEq + Eq + Hash + Copy + Clone>
for DefaultPusEventMgmtBackend<EV> PusEventReportingMapProvider<Event> for DefaultPusEventReportingMap<Event>
{ {
type Error = (); type Error = ();
fn event_enabled(&self, event: &EV) -> bool { fn event_enabled(&self, event: &Event) -> bool {
!self.disabled.contains(event) !self.disabled.contains(event)
} }
fn enable_event_reporting(&mut self, event: &EV) -> Result<bool, Self::Error> { fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(event)) Ok(self.disabled.remove(event))
} }
fn disable_event_reporting(&mut self, event: &EV) -> Result<bool, Self::Error> { fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
Ok(self.disabled.insert(*event)) Ok(self.disabled.insert(*event))
} }
} }
pub struct PusEventDispatcher< #[derive(Debug, Copy, Clone, PartialEq, Eq)]
B: PusEventMgmtBackendProvider<EV, Error = E>, pub struct EventGenerationResult {
EV: GenericEvent, pub event_was_enabled: bool,
E, pub params_were_propagated: bool,
> {
reporter: EventReporter,
backend: B,
phantom: PhantomData<(E, EV)>,
} }
impl<B: PusEventMgmtBackendProvider<EV, Error = E>, EV: GenericEvent, E> pub struct PusEventTmCreatorWithMap<
PusEventDispatcher<B, EV, E> ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHook: EventTmHookProvider = DummyEventHook,
> {
pub reporter: EventReporter<EventTmHook>,
reporting_map: ReportingMap,
phantom: PhantomData<Event>,
}
impl<
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHook: EventTmHookProvider,
> PusEventTmCreatorWithMap<ReportingMap, Event, EventTmHook>
{ {
pub fn new(reporter: EventReporter, backend: B) -> Self { pub fn new(reporter: EventReporter<EventTmHook>, backend: ReportingMap) -> Self {
Self { Self {
reporter, reporter,
backend, reporting_map: backend,
phantom: PhantomData, phantom: PhantomData,
} }
} }
pub fn enable_tm_for_event(&mut self, event: &EV) -> Result<bool, E> { pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, ReportingMap::Error> {
self.backend.enable_event_reporting(event) self.reporting_map.enable_event_reporting(event)
} }
pub fn disable_tm_for_event(&mut self, event: &EV) -> Result<bool, E> { pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, ReportingMap::Error> {
self.backend.disable_event_reporting(event) self.reporting_map.disable_event_reporting(event)
} }
pub fn generate_pus_event_tm_generic( pub fn generate_pus_event_tm_generic(
&self, &self,
sender: &(impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event: EV, event: Event,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<bool, EventManError> { ) -> Result<bool, EventManError> {
if !self.backend.event_enabled(&event) { if !self.reporting_map.event_enabled(&event) {
return Ok(false); return Ok(false);
} }
match event.severity() { match event.severity() {
Severity::INFO => self Severity::Info => self
.reporter .reporter
.event_info(sender, time_stamp, event, aux_data) .event_info(sender, time_stamp, event, params)
.map(|_| true) .map(|_| true)
.map_err(|e| e.into()), .map_err(|e| e.into()),
Severity::LOW => self Severity::Low => self
.reporter .reporter
.event_low_severity(sender, time_stamp, event, aux_data) .event_low_severity(sender, time_stamp, event, params)
.map(|_| true) .map(|_| true)
.map_err(|e| e.into()), .map_err(|e| e.into()),
Severity::MEDIUM => self Severity::Medium => self
.reporter .reporter
.event_medium_severity(sender, time_stamp, event, aux_data) .event_medium_severity(sender, time_stamp, event, params)
.map(|_| true) .map(|_| true)
.map_err(|e| e.into()), .map_err(|e| e.into()),
Severity::HIGH => self Severity::High => self
.reporter .reporter
.event_high_severity(sender, time_stamp, event, aux_data) .event_high_severity(sender, time_stamp, event, params)
.map(|_| true) .map(|_| true)
.map_err(|e| e.into()), .map_err(|e| e.into()),
} }
} }
pub fn generate_pus_event_tm_generic_with_generic_params(
&self,
sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8],
event: Event,
small_data_buf: &mut [u8],
params: Option<&Params>,
) -> Result<EventGenerationResult, EventManError> {
let mut result = EventGenerationResult {
event_was_enabled: false,
params_were_propagated: true,
};
if params.is_none() {
result.event_was_enabled =
self.generate_pus_event_tm_generic(sender, time_stamp, event, None)?;
return Ok(result);
}
let params = params.unwrap();
result.event_was_enabled = match params {
Params::Heapless(heapless_param) => {
heapless_param
.write_to_be_bytes(&mut small_data_buf[..heapless_param.written_len()])
.map_err(EcssTmtcError::ByteConversion)?;
self.generate_pus_event_tm_generic(
sender,
time_stamp,
event,
Some(small_data_buf),
)?
}
Params::Vec(vec) => {
self.generate_pus_event_tm_generic(sender, time_stamp, event, Some(vec))?
}
Params::String(string) => self.generate_pus_event_tm_generic(
sender,
time_stamp,
event,
Some(string.as_bytes()),
)?,
_ => {
result.params_were_propagated = false;
self.generate_pus_event_tm_generic(sender, time_stamp, event, None)?
}
};
Ok(result)
}
} }
impl<EV: GenericEvent + Copy + PartialEq + Eq + Hash> impl<Event: GenericEvent + Copy + PartialEq + Eq + Hash, EventTmHook: EventTmHookProvider>
PusEventDispatcher<DefaultPusEventMgmtBackend<EV>, EV, ()> PusEventTmCreatorWithMap<DefaultPusEventReportingMap<Event>, Event, EventTmHook>
{ {
pub fn new_with_default_backend(reporter: EventReporter) -> Self { pub fn new_with_default_backend(reporter: EventReporter<EventTmHook>) -> Self {
Self { Self {
reporter, reporter,
backend: DefaultPusEventMgmtBackend::default(), reporting_map: DefaultPusEventReportingMap::default(),
phantom: PhantomData, phantom: PhantomData,
} }
} }
} }
impl<B: PusEventMgmtBackendProvider<EventU32, Error = E>, E> PusEventDispatcher<B, EventU32, E> { impl<ReportingMap: PusEventReportingMapProvider<EventU32>>
PusEventTmCreatorWithMap<ReportingMap, EventU32>
{
pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>( pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self, &mut self,
event: &EventU32TypedSev<Severity>, event: &EventU32TypedSev<Severity>,
) -> Result<bool, E> { ) -> Result<bool, ReportingMap::Error> {
self.backend.enable_event_reporting(event.as_ref()) self.reporting_map.enable_event_reporting(event.as_ref())
} }
pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>( pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self, &mut self,
event: &EventU32TypedSev<Severity>, event: &EventU32TypedSev<Severity>,
) -> Result<bool, E> { ) -> Result<bool, ReportingMap::Error> {
self.backend.disable_event_reporting(event.as_ref()) self.reporting_map.disable_event_reporting(event.as_ref())
} }
pub fn generate_pus_event_tm<Severity: HasSeverity>( pub fn generate_pus_event_tm<Severity: HasSeverity>(
&self, &self,
sender: &(impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event: EventU32TypedSev<Severity>, event: EventU32TypedSev<Severity>,
aux_data: Option<&[u8]>, aux_data: Option<&[u8]>,
@@ -249,42 +302,46 @@ pub mod alloc_mod {
} }
} }
pub type DefaultPusEventU16Dispatcher<E> = pub type DefaultPusEventU16TmCreator<EventTmHook = DummyEventHook> =
PusEventDispatcher<DefaultPusEventMgmtBackend<EventU16>, EventU16, E>; PusEventTmCreatorWithMap<DefaultPusEventReportingMap<EventU16>, EventU16, EventTmHook>;
pub type DefaultPusEventU32Dispatcher<E> = pub type DefaultPusEventU32TmCreator<EventTmHook = DummyEventHook> =
PusEventDispatcher<DefaultPusEventMgmtBackend<EventU32>, EventU32, E>; PusEventTmCreatorWithMap<DefaultPusEventReportingMap<EventU32>, EventU32, EventTmHook>;
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use alloc::string::{String, ToString};
use alloc::vec;
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::PusPacket;
use super::*; use super::*;
use crate::events::SeverityInfo;
use crate::pus::PusTmAsVec;
use crate::request::UniqueApidTargetId; use crate::request::UniqueApidTargetId;
use crate::{events::SeverityInfo, tmtc::PacketAsVec};
use std::sync::mpsc::{self, TryRecvError}; use std::sync::mpsc::{self, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
EventU32TypedSev::<SeverityInfo>::const_new(1, 0); const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7]; const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u16 = 0x02; const TEST_APID: u16 = 0x02;
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05); const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05);
fn create_basic_man_1() -> DefaultPusEventU32Dispatcher<()> { fn create_basic_man_1() -> DefaultPusEventU32TmCreator {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 128) let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
.expect("Creating event repoter failed"); .expect("Creating event repoter failed");
PusEventDispatcher::new_with_default_backend(reporter) PusEventTmCreatorWithMap::new_with_default_backend(reporter)
} }
fn create_basic_man_2() -> DefaultPusEventU32Dispatcher<()> { fn create_basic_man_2() -> DefaultPusEventU32TmCreator {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 128) let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
.expect("Creating event repoter failed"); .expect("Creating event repoter failed");
let backend = DefaultPusEventMgmtBackend::default(); let backend = DefaultPusEventReportingMap::default();
PusEventDispatcher::new(reporter, backend) PusEventTmCreatorWithMap::new(reporter, backend)
} }
#[test] #[test]
fn test_basic() { fn test_basic() {
let event_man = create_basic_man_1(); let event_man = create_basic_man_1();
let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>(); let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let event_sent = event_man let event_sent = event_man
.generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None) .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed"); .expect("Sending info event failed");
@@ -297,7 +354,7 @@ mod tests {
#[test] #[test]
fn test_disable_event() { fn test_disable_event() {
let mut event_man = create_basic_man_2(); let mut event_man = create_basic_man_2();
let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>(); let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
// let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx); // let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT); let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
assert!(res.is_ok()); assert!(res.is_ok());
@@ -320,7 +377,7 @@ mod tests {
#[test] #[test]
fn test_reenable_event() { fn test_reenable_event() {
let mut event_man = create_basic_man_1(); let mut event_man = create_basic_man_1();
let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>(); let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT); let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok()); assert!(res.is_ok());
assert!(res.unwrap()); assert!(res.unwrap());
@@ -333,4 +390,70 @@ mod tests {
assert!(event_sent); assert!(event_sent);
event_rx.try_recv().expect("No info event received"); event_rx.try_recv().expect("No info event received");
} }
#[test]
fn test_event_with_generic_string_param() {
let event_man = create_basic_man_1();
let mut small_data_buf = [0; 128];
let param_data = "hello world";
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let res = event_man.generate_pus_event_tm_generic_with_generic_params(
&event_tx,
&EMPTY_STAMP,
INFO_EVENT.into(),
&mut small_data_buf,
Some(&param_data.to_string().into()),
);
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
assert_eq!(u32_event, INFO_EVENT.raw());
let string_data = String::from_utf8_lossy(&tm.user_data()[4..]);
assert_eq!(string_data, param_data);
}
#[test]
fn test_event_with_generic_vec_param() {
let event_man = create_basic_man_1();
let mut small_data_buf = [0; 128];
let param_data = vec![1, 2, 3, 4];
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let res = event_man.generate_pus_event_tm_generic_with_generic_params(
&event_tx,
&EMPTY_STAMP,
INFO_EVENT.into(),
&mut small_data_buf,
Some(&param_data.clone().into()),
);
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
assert_eq!(u32_event, INFO_EVENT.raw());
let vec_data = tm.user_data()[4..].to_vec();
assert_eq!(vec_data, param_data);
}
#[test]
fn test_event_with_generic_store_param_not_propagated() {
// TODO: Test this.
}
#[test]
fn test_event_with_generic_heapless_param() {
// TODO: Test this.
}
} }

View File

@@ -1,7 +1,7 @@
use crate::events::EventU32; use crate::events::EventU32;
use crate::pus::event_man::{EventRequest, EventRequestWithToken}; use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::TcStateToken; use crate::pus::verification::TcStateToken;
use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError}; use crate::pus::{DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError};
use crate::queue::GenericSendError; use crate::queue::GenericSendError;
use spacepackets::ecss::event::Subservice; use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
@@ -9,13 +9,13 @@ use std::sync::mpsc::Sender;
use super::verification::VerificationReportingProvider; use super::verification::VerificationReportingProvider;
use super::{ use super::{
EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericConversionError, EcssTcInMemConverter, EcssTcReceiver, EcssTmSender, GenericConversionError,
GenericRoutingError, PusServiceHelper, GenericRoutingError, HandlingStatus, PusServiceHelper,
}; };
pub struct PusEventServiceHandler< pub struct PusEventServiceHandler<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSenderCore, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
> { > {
@@ -25,8 +25,8 @@ pub struct PusEventServiceHandler<
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSenderCore, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
> PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter> > PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
@@ -46,13 +46,14 @@ impl<
} }
} }
pub fn poll_and_handle_next_tc( pub fn poll_and_handle_next_tc<ErrorCb: FnMut(&PartialPusHandlingError)>(
&mut self, &mut self,
mut error_callback: ErrorCb,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { ) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() { if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty); return Ok(HandlingStatus::Empty.into());
} }
let ecss_tc_and_token = possible_packet.unwrap(); let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper self.service_helper
@@ -62,13 +63,13 @@ impl<
let subservice = tc.subservice(); let subservice = tc.subservice();
let srv = Subservice::try_from(subservice); let srv = Subservice::try_from(subservice);
if srv.is_err() { if srv.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice( return Ok(DirectPusPacketHandlerResult::CustomSubservice(
tc.subservice(), tc.subservice(),
ecss_tc_and_token.token, ecss_tc_and_token.token,
)); ));
} }
let handle_enable_disable_request = let mut handle_enable_disable_request =
|enable: bool| -> Result<PusPacketHandlerResult, PusPacketHandlingError> { |enable: bool| -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
if tc.user_data().len() < 4 { if tc.user_data().len() < 4 {
return Err(GenericConversionError::NotEnoughAppData { return Err(GenericConversionError::NotEnoughAppData {
expected: 4, expected: 4,
@@ -79,22 +80,20 @@ impl<
let user_data = tc.user_data(); let user_data = tc.user_data();
let event_u32 = let event_u32 =
EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap())); EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
let start_token = self
.service_helper
.common
.verif_reporter
.start_success(
self.service_helper.common.id,
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
)
.map_err(|_| PartialPusHandlingError::Verification);
let partial_error = start_token.clone().err();
let mut token: TcStateToken = ecss_tc_and_token.token.into(); let mut token: TcStateToken = ecss_tc_and_token.token.into();
if let Ok(start_token) = start_token { match self.service_helper.common.verif_reporter.start_success(
token = start_token.into(); &self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
) {
Ok(start_token) => {
token = start_token.into();
}
Err(e) => {
error_callback(&PartialPusHandlingError::Verification(e));
}
} }
let event_req_with_token = if enable { let event_req_with_token = if enable {
EventRequestWithToken { EventRequestWithToken {
request: EventRequest::Enable(event_u32), request: EventRequest::Enable(event_u32),
@@ -113,12 +112,7 @@ impl<
GenericSendError::RxDisconnected, GenericSendError::RxDisconnected,
)) ))
})?; })?;
if let Some(partial_error) = partial_error { Ok(HandlingStatus::HandledOne.into())
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
}; };
match srv.unwrap() { match srv.unwrap() {
@@ -137,14 +131,14 @@ impl<
handle_enable_disable_request(false)?; handle_enable_disable_request(false)?;
} }
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => { Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
return Ok(PusPacketHandlerResult::SubserviceNotImplemented( return Ok(DirectPusPacketHandlerResult::SubserviceNotImplemented(
subservice, subservice,
ecss_tc_and_token.token, ecss_tc_and_token.token,
)); ));
} }
} }
Ok(PusPacketHandlerResult::RequestHandled) Ok(HandlingStatus::HandledOne.into())
} }
} }
@@ -159,7 +153,7 @@ mod tests {
tc::{PusTcCreator, PusTcSecondaryHeader}, tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader, tm::PusTmReader,
}, },
SequenceFlags, SpHeader, SpHeader,
}; };
use std::sync::mpsc::{self, Sender}; use std::sync::mpsc::{self, Sender};
@@ -168,26 +162,27 @@ mod tests {
use crate::pus::verification::{ use crate::pus::verification::{
RequestId, VerificationReporter, VerificationReportingProvider, RequestId, VerificationReporter, VerificationReportingProvider,
}; };
use crate::pus::{GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSenderBounded}; use crate::pus::{GenericConversionError, HandlingStatus, MpscTcReceiver};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::{ use crate::{
events::EventU32, events::EventU32,
pus::{ pus::{
event_man::EventRequestWithToken, event_man::EventRequestWithToken,
tests::PusServiceHandlerWithSharedStoreCommon, tests::PusServiceHandlerWithSharedStoreCommon,
verification::{TcStateAccepted, VerificationToken}, verification::{TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError, DirectPusPacketHandlerResult, EcssTcInSharedStoreConverter, PusPacketHandlingError,
}, },
}; };
use super::PusEventServiceHandler; use super::PusEventServiceHandler;
const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25); const TEST_EVENT_0: EventU32 = EventU32::new(crate::events::Severity::Info, 5, 25);
struct Pus5HandlerWithStoreTester { struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon, common: PusServiceHandlerWithSharedStoreCommon,
handler: PusEventServiceHandler< handler: PusEventServiceHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
>, >,
@@ -209,18 +204,17 @@ mod tests {
self.handler self.handler
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.acceptance_success( .acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7])
self.handler.service_helper.id(),
self.handler.service_helper.tm_sender(),
init_token,
&[0; 7],
)
.expect("acceptance success failure") .expect("acceptance success failure")
} }
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator) {
self.common
.send_tc(self.handler.service_helper.id(), token, tc);
}
delegate! { delegate! {
to self.common { to self.common {
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator);
fn read_next_tm(&mut self) -> PusTmReader<'_>; fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool; fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
@@ -230,9 +224,11 @@ mod tests {
} }
impl SimplePusPacketHandler for Pus5HandlerWithStoreTester { impl SimplePusPacketHandler for Pus5HandlerWithStoreTester {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler.poll_and_handle_next_tc(&time_stamp) self.handler.poll_and_handle_next_tc(|_| {}, &time_stamp)
} }
} }
@@ -242,13 +238,13 @@ mod tests {
expected_event_req: EventRequest, expected_event_req: EventRequest,
event_req_receiver: mpsc::Receiver<EventRequestWithToken>, event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
) { ) {
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8); let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8);
let mut app_data = [0; 4]; let mut app_data = [0; 4];
TEST_EVENT_0 TEST_EVENT_0
.write_to_be_bytes(&mut app_data) .write_to_be_bytes(&mut app_data)
.expect("writing test event failed"); .expect("writing test event failed");
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = test_harness.init_verification(&ping_tc); let token = test_harness.init_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc); test_harness.send_tc(&token, &ping_tc);
let request_id = token.request_id(); let request_id = token.request_id();
@@ -294,25 +290,28 @@ mod tests {
let result = test_harness.handle_one_tc(); let result = test_harness.handle_one_tc();
assert!(result.is_ok()); assert!(result.is_ok());
let result = result.unwrap(); let result = result.unwrap();
if let PusPacketHandlerResult::Empty = result { assert!(
} else { matches!(
panic!("unexpected result type {result:?}") result,
} DirectPusPacketHandlerResult::Handled(HandlingStatus::Empty)
),
"unexpected result type {result:?}"
)
} }
#[test] #[test]
fn test_sending_custom_subservice() { fn test_sending_custom_subservice() {
let (event_request_tx, _) = mpsc::channel(); let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx); let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(5, 200); let sec_header = PusTcSecondaryHeader::new_simple(5, 200);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc); let token = test_harness.init_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc); test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc(); let result = test_harness.handle_one_tc();
assert!(result.is_ok()); assert!(result.is_ok());
let result = result.unwrap(); let result = result.unwrap();
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result { if let DirectPusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200); assert_eq!(subservice, 200);
} else { } else {
panic!("unexpected result type {result:?}") panic!("unexpected result type {result:?}")
@@ -323,10 +322,10 @@ mod tests {
fn test_sending_invalid_app_data() { fn test_sending_invalid_app_data() {
let (event_request_tx, _) = mpsc::channel(); let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx); let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = let sec_header =
PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8); PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8);
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &[0, 1, 2], true); let ping_tc = PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], true);
let token = test_harness.init_verification(&ping_tc); let token = test_harness.init_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc); test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc(); let result = test_harness.handle_one_tc();

File diff suppressed because it is too large Load Diff

View File

@@ -26,11 +26,9 @@ pub enum Subservice {
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod alloc_mod {} pub mod alloc_mod {}
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod std_mod {} pub mod std_mod {}
#[cfg(test)] #[cfg(test)]

View File

@@ -14,7 +14,7 @@ use spacepackets::{ByteConversionError, CcsdsPacket};
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::error::Error; use std::error::Error;
use crate::pool::{PoolProvider, StoreError}; use crate::pool::{PoolError, PoolProvider};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub use alloc_mod::*; pub use alloc_mod::*;
@@ -151,7 +151,7 @@ pub enum ScheduleError {
}, },
/// Nested time-tagged commands are not allowed. /// Nested time-tagged commands are not allowed.
NestedScheduledTc, NestedScheduledTc,
StoreError(StoreError), StoreError(PoolError),
TcDataEmpty, TcDataEmpty,
TimestampError(TimestampError), TimestampError(TimestampError),
WrongSubservice(u8), WrongSubservice(u8),
@@ -206,8 +206,8 @@ impl From<PusError> for ScheduleError {
} }
} }
impl From<StoreError> for ScheduleError { impl From<PoolError> for ScheduleError {
fn from(e: StoreError) -> Self { fn from(e: PoolError) -> Self {
Self::StoreError(e) Self::StoreError(e)
} }
} }
@@ -240,7 +240,7 @@ impl Error for ScheduleError {
pub trait PusSchedulerProvider { pub trait PusSchedulerProvider {
type TimeProvider: CcsdsTimeProvider + TimeReader; type TimeProvider: CcsdsTimeProvider + TimeReader;
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError>; fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), PoolError>;
fn is_enabled(&self) -> bool; fn is_enabled(&self) -> bool;
@@ -345,12 +345,9 @@ pub mod alloc_mod {
}, },
vec::Vec, vec::Vec,
}; };
use spacepackets::time::{ use spacepackets::time::cds::{self, DaysLen24Bits};
cds::{self, DaysLen24Bits},
UnixTime,
};
use crate::pool::StoreAddr; use crate::pool::PoolAddr;
use super::*; use super::*;
@@ -371,8 +368,8 @@ pub mod alloc_mod {
} }
enum DeletionResult { enum DeletionResult {
WithoutStoreDeletion(Option<StoreAddr>), WithoutStoreDeletion(Option<PoolAddr>),
WithStoreDeletion(Result<bool, StoreError>), WithStoreDeletion(Result<bool, PoolError>),
} }
/// This is the core data structure for scheduling PUS telecommands with [alloc] support. /// This is the core data structure for scheduling PUS telecommands with [alloc] support.
@@ -381,7 +378,7 @@ pub mod alloc_mod {
/// a [crate::pool::PoolProvider] API. This data structure just tracks the store /// a [crate::pool::PoolProvider] API. This data structure just tracks the store
/// addresses and their release times and offers a convenient API to insert and release /// addresses and their release times and offers a convenient API to insert and release
/// telecommands and perform other functionality specified by the ECSS standard in section 6.11. /// telecommands and perform other functionality specified by the ECSS standard in section 6.11.
/// The time is tracked as a [spacepackets::time::UnixTimestamp] but the only requirement to /// The time is tracked as a [spacepackets::time::UnixTime] but the only requirement to
/// the timekeeping of the user is that it is convertible to that timestamp. /// the timekeeping of the user is that it is convertible to that timestamp.
/// ///
/// The standard also specifies that the PUS scheduler can be enabled and disabled. /// The standard also specifies that the PUS scheduler can be enabled and disabled.
@@ -426,7 +423,6 @@ pub mod alloc_mod {
/// Like [Self::new], but sets the `init_current_time` parameter to the current system time. /// Like [Self::new], but sets the `init_current_time` parameter to the current system time.
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn new_with_current_init_time(time_margin: Duration) -> Result<Self, SystemTimeError> { pub fn new_with_current_init_time(time_margin: Duration) -> Result<Self, SystemTimeError> {
Ok(Self::new(UnixTime::now()?, time_margin)) Ok(Self::new(UnixTime::now()?, time_margin))
} }
@@ -528,7 +524,7 @@ pub mod alloc_mod {
&mut self, &mut self,
time_window: TimeWindow<TimeProvider>, time_window: TimeWindow<TimeProvider>,
pool: &mut (impl PoolProvider + ?Sized), pool: &mut (impl PoolProvider + ?Sized),
) -> Result<u64, (u64, StoreError)> { ) -> Result<u64, (u64, PoolError)> {
let range = self.retrieve_by_time_filter(time_window); let range = self.retrieve_by_time_filter(time_window);
let mut del_packets = 0; let mut del_packets = 0;
let mut res_if_fails = None; let mut res_if_fails = None;
@@ -558,7 +554,7 @@ pub mod alloc_mod {
pub fn delete_all( pub fn delete_all(
&mut self, &mut self,
pool: &mut (impl PoolProvider + ?Sized), pool: &mut (impl PoolProvider + ?Sized),
) -> Result<u64, (u64, StoreError)> { ) -> Result<u64, (u64, PoolError)> {
self.delete_by_time_filter(TimeWindow::<cds::CdsTime>::new_select_all(), pool) self.delete_by_time_filter(TimeWindow::<cds::CdsTime>::new_select_all(), pool)
} }
@@ -604,7 +600,7 @@ pub mod alloc_mod {
/// Please note that this function will stop on the first telecommand with a request ID match. /// Please note that this function will stop on the first telecommand with a request ID match.
/// In case of duplicate IDs (which should generally not happen), this function needs to be /// In case of duplicate IDs (which should generally not happen), this function needs to be
/// called repeatedly. /// called repeatedly.
pub fn delete_by_request_id(&mut self, req_id: &RequestId) -> Option<StoreAddr> { pub fn delete_by_request_id(&mut self, req_id: &RequestId) -> Option<PoolAddr> {
if let DeletionResult::WithoutStoreDeletion(v) = if let DeletionResult::WithoutStoreDeletion(v) =
self.delete_by_request_id_internal_without_store_deletion(req_id) self.delete_by_request_id_internal_without_store_deletion(req_id)
{ {
@@ -618,7 +614,7 @@ pub mod alloc_mod {
&mut self, &mut self,
req_id: &RequestId, req_id: &RequestId,
pool: &mut (impl PoolProvider + ?Sized), pool: &mut (impl PoolProvider + ?Sized),
) -> Result<bool, StoreError> { ) -> Result<bool, PoolError> {
if let DeletionResult::WithStoreDeletion(v) = if let DeletionResult::WithStoreDeletion(v) =
self.delete_by_request_id_internal_with_store_deletion(req_id, pool) self.delete_by_request_id_internal_with_store_deletion(req_id, pool)
{ {
@@ -670,7 +666,6 @@ pub mod alloc_mod {
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError> { pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError> {
self.current_time = UnixTime::now()?; self.current_time = UnixTime::now()?;
Ok(()) Ok(())
@@ -696,7 +691,7 @@ pub mod alloc_mod {
releaser: R, releaser: R,
tc_store: &mut (impl PoolProvider + ?Sized), tc_store: &mut (impl PoolProvider + ?Sized),
tc_buf: &mut [u8], tc_buf: &mut [u8],
) -> Result<u64, (u64, StoreError)> { ) -> Result<u64, (u64, PoolError)> {
self.release_telecommands_internal(releaser, tc_store, Some(tc_buf)) self.release_telecommands_internal(releaser, tc_store, Some(tc_buf))
} }
@@ -710,7 +705,7 @@ pub mod alloc_mod {
&mut self, &mut self,
releaser: R, releaser: R,
tc_store: &mut (impl PoolProvider + ?Sized), tc_store: &mut (impl PoolProvider + ?Sized),
) -> Result<u64, (u64, StoreError)> { ) -> Result<u64, (u64, PoolError)> {
self.release_telecommands_internal(releaser, tc_store, None) self.release_telecommands_internal(releaser, tc_store, None)
} }
@@ -719,7 +714,7 @@ pub mod alloc_mod {
mut releaser: R, mut releaser: R,
tc_store: &mut (impl PoolProvider + ?Sized), tc_store: &mut (impl PoolProvider + ?Sized),
mut tc_buf: Option<&mut [u8]>, mut tc_buf: Option<&mut [u8]>,
) -> Result<u64, (u64, StoreError)> { ) -> Result<u64, (u64, PoolError)> {
let tcs_to_release = self.telecommands_to_release(); let tcs_to_release = self.telecommands_to_release();
let mut released_tcs = 0; let mut released_tcs = 0;
let mut store_error = Ok(()); let mut store_error = Ok(());
@@ -765,7 +760,7 @@ pub mod alloc_mod {
mut releaser: R, mut releaser: R,
tc_store: &(impl PoolProvider + ?Sized), tc_store: &(impl PoolProvider + ?Sized),
tc_buf: &mut [u8], tc_buf: &mut [u8],
) -> Result<alloc::vec::Vec<TcInfo>, (alloc::vec::Vec<TcInfo>, StoreError)> { ) -> Result<alloc::vec::Vec<TcInfo>, (alloc::vec::Vec<TcInfo>, PoolError)> {
let tcs_to_release = self.telecommands_to_release(); let tcs_to_release = self.telecommands_to_release();
let mut released_tcs = alloc::vec::Vec::new(); let mut released_tcs = alloc::vec::Vec::new();
for tc in tcs_to_release { for tc in tcs_to_release {
@@ -796,7 +791,7 @@ pub mod alloc_mod {
/// The holding store for the telecommands needs to be passed so all the stored telecommands /// The holding store for the telecommands needs to be passed so all the stored telecommands
/// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error /// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error
/// will be returned but the method will still try to delete all the commands in the schedule. /// will be returned but the method will still try to delete all the commands in the schedule.
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError> { fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), PoolError> {
self.enabled = false; self.enabled = false;
let mut deletion_ok = Ok(()); let mut deletion_ok = Ok(());
for tc_lists in &mut self.tc_map { for tc_lists in &mut self.tc_map {
@@ -854,7 +849,7 @@ pub mod alloc_mod {
mod tests { mod tests {
use super::*; use super::*;
use crate::pool::{ use crate::pool::{
PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreAddr, StoreError, PoolAddr, PoolError, PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig,
}; };
use alloc::collections::btree_map::Range; use alloc::collections::btree_map::Range;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}; use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
@@ -871,28 +866,28 @@ mod tests {
cds::CdsTime::from_unix_time_with_u16_days(&timestamp, cds::SubmillisPrecision::Absent) cds::CdsTime::from_unix_time_with_u16_days(&timestamp, cds::SubmillisPrecision::Absent)
.unwrap(); .unwrap();
let len_time_stamp = cds_time.write_to_bytes(buf).unwrap(); let len_time_stamp = cds_time.write_to_bytes(buf).unwrap();
let len_packet = base_ping_tc_simple_ctor(0, None) let len_packet = base_ping_tc_simple_ctor(0, &[])
.write_to_bytes(&mut buf[len_time_stamp..]) .write_to_bytes(&mut buf[len_time_stamp..])
.unwrap(); .unwrap();
( (
SpHeader::tc_unseg(0x02, 0x34, len_packet as u16).unwrap(), SpHeader::new_for_unseg_tc(0x02, 0x34, len_packet as u16),
len_packet + len_time_stamp, len_packet + len_time_stamp,
) )
} }
fn scheduled_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator { fn scheduled_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
let (mut sph, len_app_data) = pus_tc_base(timestamp, buf); let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(&mut sph, 11, 4, Some(&buf[..len_app_data]), true) PusTcCreator::new_simple(sph, 11, 4, &buf[..len_app_data], true)
} }
fn wrong_tc_service(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator { fn wrong_tc_service(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
let (mut sph, len_app_data) = pus_tc_base(timestamp, buf); let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(&mut sph, 12, 4, Some(&buf[..len_app_data]), true) PusTcCreator::new_simple(sph, 12, 4, &buf[..len_app_data], true)
} }
fn wrong_tc_subservice(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator { fn wrong_tc_subservice(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
let (mut sph, len_app_data) = pus_tc_base(timestamp, buf); let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(&mut sph, 11, 5, Some(&buf[..len_app_data]), true) PusTcCreator::new_simple(sph, 11, 5, &buf[..len_app_data], true)
} }
fn double_wrapped_time_tagged_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator { fn double_wrapped_time_tagged_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
@@ -900,40 +895,31 @@ mod tests {
cds::CdsTime::from_unix_time_with_u16_days(&timestamp, cds::SubmillisPrecision::Absent) cds::CdsTime::from_unix_time_with_u16_days(&timestamp, cds::SubmillisPrecision::Absent)
.unwrap(); .unwrap();
let len_time_stamp = cds_time.write_to_bytes(buf).unwrap(); let len_time_stamp = cds_time.write_to_bytes(buf).unwrap();
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap(); let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 0);
// app data should not matter, double wrapped time-tagged commands should be rejected right // app data should not matter, double wrapped time-tagged commands should be rejected right
// away // away
let inner_time_tagged_tc = PusTcCreator::new_simple(&mut sph, 11, 4, None, true); let inner_time_tagged_tc = PusTcCreator::new_simple(sph, 11, 4, &[], true);
let packet_len = inner_time_tagged_tc let packet_len = inner_time_tagged_tc
.write_to_bytes(&mut buf[len_time_stamp..]) .write_to_bytes(&mut buf[len_time_stamp..])
.expect("writing inner time tagged tc failed"); .expect("writing inner time tagged tc failed");
PusTcCreator::new_simple( PusTcCreator::new_simple(sph, 11, 4, &buf[..len_time_stamp + packet_len], true)
&mut sph,
11,
4,
Some(&buf[..len_time_stamp + packet_len]),
true,
)
} }
fn invalid_time_tagged_cmd() -> PusTcCreator<'static> { fn invalid_time_tagged_cmd() -> PusTcCreator<'static> {
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 1).unwrap(); let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 1);
PusTcCreator::new_simple(&mut sph, 11, 4, None, true) PusTcCreator::new_simple(sph, 11, 4, &[], true)
} }
fn base_ping_tc_simple_ctor( fn base_ping_tc_simple_ctor(seq_count: u16, app_data: &'static [u8]) -> PusTcCreator<'static> {
seq_count: u16, let sph = SpHeader::new_for_unseg_tc(0x02, seq_count, 0);
app_data: Option<&'static [u8]>, PusTcCreator::new_simple(sph, 17, 1, app_data, true)
) -> PusTcCreator<'static> {
let mut sph = SpHeader::tc_unseg(0x02, seq_count, 0).unwrap();
PusTcCreator::new_simple(&mut sph, 17, 1, app_data, true)
} }
fn ping_tc_to_store( fn ping_tc_to_store(
pool: &mut StaticMemoryPool, pool: &mut StaticMemoryPool,
buf: &mut [u8], buf: &mut [u8],
seq_count: u16, seq_count: u16,
app_data: Option<&'static [u8]>, app_data: &'static [u8],
) -> TcInfo { ) -> TcInfo {
let ping_tc = base_ping_tc_simple_ctor(seq_count, app_data); let ping_tc = base_ping_tc_simple_ctor(seq_count, app_data);
let ping_size = ping_tc.write_to_bytes(buf).expect("writing ping TC failed"); let ping_size = ping_tc.write_to_bytes(buf).expect("writing ping TC failed");
@@ -957,7 +943,7 @@ mod tests {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc( .insert_unwrapped_and_stored_tc(
@@ -967,7 +953,7 @@ mod tests {
.unwrap(); .unwrap();
let app_data = &[0, 1, 2]; let app_data = &[0, 1, 2];
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, Some(app_data)); let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, app_data);
scheduler scheduler
.insert_unwrapped_and_stored_tc( .insert_unwrapped_and_stored_tc(
UnixTime::new_only_secs(200), UnixTime::new_only_secs(200),
@@ -976,7 +962,7 @@ mod tests {
.unwrap(); .unwrap();
let app_data = &[0, 1, 2]; let app_data = &[0, 1, 2];
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, Some(app_data)); let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, app_data);
scheduler scheduler
.insert_unwrapped_and_stored_tc( .insert_unwrapped_and_stored_tc(
UnixTime::new_only_secs(300), UnixTime::new_only_secs(300),
@@ -1002,7 +988,7 @@ mod tests {
.insert_unwrapped_and_stored_tc( .insert_unwrapped_and_stored_tc(
UnixTime::new_only_secs(100), UnixTime::new_only_secs(100),
TcInfo::new( TcInfo::new(
StoreAddr::from(StaticPoolAddr { PoolAddr::from(StaticPoolAddr {
pool_idx: 0, pool_idx: 0,
packet_idx: 1, packet_idx: 1,
}), }),
@@ -1019,7 +1005,7 @@ mod tests {
.insert_unwrapped_and_stored_tc( .insert_unwrapped_and_stored_tc(
UnixTime::new_only_secs(100), UnixTime::new_only_secs(100),
TcInfo::new( TcInfo::new(
StoreAddr::from(StaticPoolAddr { PoolAddr::from(StaticPoolAddr {
pool_idx: 0, pool_idx: 0,
packet_idx: 2, packet_idx: 2,
}), }),
@@ -1063,8 +1049,8 @@ mod tests {
fn common_check( fn common_check(
enabled: bool, enabled: bool,
store_addr: &StoreAddr, store_addr: &PoolAddr,
expected_store_addrs: Vec<StoreAddr>, expected_store_addrs: Vec<PoolAddr>,
counter: &mut usize, counter: &mut usize,
) { ) {
assert!(enabled); assert!(enabled);
@@ -1073,8 +1059,8 @@ mod tests {
} }
fn common_check_disabled( fn common_check_disabled(
enabled: bool, enabled: bool,
store_addr: &StoreAddr, store_addr: &PoolAddr,
expected_store_addrs: Vec<StoreAddr>, expected_store_addrs: Vec<PoolAddr>,
counter: &mut usize, counter: &mut usize,
) { ) {
assert!(!enabled); assert!(!enabled);
@@ -1087,10 +1073,10 @@ mod tests {
let src_id_to_set = 12; let src_id_to_set = 12;
let apid_to_set = 0x22; let apid_to_set = 0x22;
let seq_count = 105; let seq_count = 105;
let mut sp_header = SpHeader::tc_unseg(apid_to_set, 105, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(apid_to_set, 105, 0);
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1); let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
sec_header.source_id = src_id_to_set; sec_header.source_id = src_id_to_set;
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let req_id = RequestId::from_tc(&ping_tc); let req_id = RequestId::from_tc(&ping_tc);
assert_eq!(req_id.source_id(), src_id_to_set); assert_eq!(req_id.source_id(), src_id_to_set);
assert_eq!(req_id.apid(), apid_to_set); assert_eq!(req_id.apid(), apid_to_set);
@@ -1106,13 +1092,13 @@ mod tests {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed"); .expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed"); .expect("insertion failed");
@@ -1171,13 +1157,13 @@ mod tests {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed"); .expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1)
.expect("insertion failed"); .expect("insertion failed");
@@ -1230,13 +1216,13 @@ mod tests {
scheduler.disable(); scheduler.disable();
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed"); .expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed"); .expect("insertion failed");
@@ -1294,7 +1280,7 @@ mod tests {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let info = scheduler let info = scheduler
.insert_unwrapped_tc( .insert_unwrapped_tc(
@@ -1309,7 +1295,7 @@ mod tests {
let mut read_buf: [u8; 64] = [0; 64]; let mut read_buf: [u8; 64] = [0; 64];
pool.read(&tc_info_0.addr(), &mut read_buf).unwrap(); pool.read(&tc_info_0.addr(), &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data"); let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None)); assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(scheduler.num_scheduled_telecommands(), 1); assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@@ -1332,7 +1318,7 @@ mod tests {
let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap(); let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data"); let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1); assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None)); assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
} }
#[test] #[test]
@@ -1356,7 +1342,7 @@ mod tests {
let read_len = pool.read(&info.addr, &mut buf).unwrap(); let read_len = pool.read(&info.addr, &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data"); let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1); assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None)); assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(scheduler.num_scheduled_telecommands(), 1); assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@@ -1381,7 +1367,7 @@ mod tests {
let read_len = pool.read(&addr_vec[0], &mut buf).unwrap(); let read_len = pool.read(&addr_vec[0], &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data"); let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data");
assert_eq!(read_len, check_tc.1); assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None)); assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
} }
#[test] #[test]
@@ -1506,7 +1492,7 @@ mod tests {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed"); .expect("insertion failed");
@@ -1528,7 +1514,7 @@ mod tests {
// TC could not even be read.. // TC could not even be read..
assert_eq!(err.0, 0); assert_eq!(err.0, 0);
match err.1 { match err.1 {
StoreError::DataDoesNotExist(addr) => { PoolError::DataDoesNotExist(addr) => {
assert_eq!(tc_info_0.addr(), addr); assert_eq!(tc_info_0.addr(), addr);
} }
_ => panic!("unexpected error {}", err.1), _ => panic!("unexpected error {}", err.1),
@@ -1540,7 +1526,7 @@ mod tests {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed"); .expect("insertion failed");
@@ -1551,7 +1537,7 @@ mod tests {
assert!(reset_res.is_err()); assert!(reset_res.is_err());
let err = reset_res.unwrap_err(); let err = reset_res.unwrap_err();
match err { match err {
StoreError::DataDoesNotExist(addr) => { PoolError::DataDoesNotExist(addr) => {
assert_eq!(addr, tc_info_0.addr()); assert_eq!(addr, tc_info_0.addr());
} }
_ => panic!("unexpected error {err}"), _ => panic!("unexpected error {err}"),
@@ -1563,7 +1549,7 @@ mod tests {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed"); .expect("inserting tc failed");
@@ -1581,7 +1567,7 @@ mod tests {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed"); .expect("inserting tc failed");
@@ -1599,15 +1585,15 @@ mod tests {
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false)); let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed"); .expect("inserting tc failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1)
.expect("inserting tc failed"); .expect("inserting tc failed");
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, None); let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_2) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_2)
.expect("inserting tc failed"); .expect("inserting tc failed");
@@ -1653,7 +1639,7 @@ mod tests {
let err = insert_res.unwrap_err(); let err = insert_res.unwrap_err();
match err { match err {
ScheduleError::StoreError(e) => match e { ScheduleError::StoreError(e) => match e {
StoreError::StoreFull(_) => {} PoolError::StoreFull(_) => {}
_ => panic!("unexpected store error {e}"), _ => panic!("unexpected store error {e}"),
}, },
_ => panic!("unexpected error {err}"), _ => panic!("unexpected error {err}"),
@@ -1667,7 +1653,7 @@ mod tests {
release_secs: u64, release_secs: u64,
) -> TcInfo { ) -> TcInfo {
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info = ping_tc_to_store(pool, &mut buf, seq_count, None); let tc_info = ping_tc_to_store(pool, &mut buf, seq_count, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(release_secs as i64), tc_info) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(release_secs as i64), tc_info)
@@ -1915,13 +1901,13 @@ mod tests {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5)); let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32]; let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None); let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed"); .expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None); let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
scheduler scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1) .insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed"); .expect("insertion failed");
@@ -1949,13 +1935,13 @@ mod tests {
#[test] #[test]
fn test_generic_insert_app_data_test() { fn test_generic_insert_app_data_test() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1); let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let mut sph = SpHeader::new( let sph = SpHeader::new(
PacketId::const_new(PacketType::Tc, true, 0x002), PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5), PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
0, 0,
); );
let sec_header = PusTcSecondaryHeader::new_simple(17, 1); let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc); let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc);
assert!(result.is_ok()); assert!(result.is_ok());
@@ -1971,13 +1957,13 @@ mod tests {
#[test] #[test]
fn test_generic_insert_app_data_test_byte_conv_error() { fn test_generic_insert_app_data_test_byte_conv_error() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1); let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let mut sph = SpHeader::new( let sph = SpHeader::new(
PacketId::const_new(PacketType::Tc, true, 0x002), PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5), PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
0, 0,
); );
let sec_header = PusTcSecondaryHeader::new_simple(17, 1); let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let mut buf: [u8; 16] = [0; 16]; let mut buf: [u8; 16] = [0; 16];
let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc); let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc);
assert!(result.is_err()); assert!(result.is_err());
@@ -2000,13 +1986,13 @@ mod tests {
#[test] #[test]
fn test_generic_insert_app_data_test_as_vec() { fn test_generic_insert_app_data_test_as_vec() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1); let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let mut sph = SpHeader::new( let sph = SpHeader::new(
PacketId::const_new(PacketType::Tc, true, 0x002), PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5), PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
0, 0,
); );
let sec_header = PusTcSecondaryHeader::new_simple(17, 1); let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let mut buf: [u8; 64] = [0; 64]; let mut buf: [u8; 64] = [0; 64];
generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc).unwrap(); generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc).unwrap();
let vec = generate_insert_telecommand_app_data_as_vec(&time_writer, &ping_tc) let vec = generate_insert_telecommand_app_data_as_vec(&time_writer, &ping_tc)

View File

@@ -1,12 +1,13 @@
use super::scheduler::PusSchedulerProvider; use super::scheduler::PusSchedulerProvider;
use super::verification::{VerificationReporter, VerificationReportingProvider}; use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{ use super::{
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiverCore, DirectPusPacketHandlerResult, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTmSenderCore, MpscTcReceiver, MpscTmInSharedPoolSender, MpscTmInSharedPoolSenderBounded, EcssTcInVecConverter, EcssTcReceiver, EcssTmSender, HandlingStatus, MpscTcReceiver,
PusServiceHelper, PusTmAsVec, PartialPusHandlingError, PusServiceHelper,
}; };
use crate::pool::PoolProvider; use crate::pool::PoolProvider;
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError}; use crate::pus::PusPacketHandlingError;
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use alloc::string::ToString; use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket}; use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::time::cds::CdsTime; use spacepackets::time::cds::CdsTime;
@@ -21,8 +22,8 @@ use std::sync::mpsc;
/// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release /// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release
/// telecommands when applicable. /// telecommands when applicable.
pub struct PusSchedServiceHandler< pub struct PusSchedServiceHandler<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSenderCore, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
PusScheduler: PusSchedulerProvider, PusScheduler: PusSchedulerProvider,
@@ -33,8 +34,8 @@ pub struct PusSchedServiceHandler<
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSenderCore, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
Scheduler: PusSchedulerProvider, Scheduler: PusSchedulerProvider,
@@ -64,14 +65,15 @@ impl<
&self.scheduler &self.scheduler
} }
pub fn poll_and_handle_next_tc( pub fn poll_and_handle_next_tc<ErrorCb: FnMut(&PartialPusHandlingError)>(
&mut self, &mut self,
mut error_callback: ErrorCb,
time_stamp: &[u8], time_stamp: &[u8],
sched_tc_pool: &mut (impl PoolProvider + ?Sized), sched_tc_pool: &mut (impl PoolProvider + ?Sized),
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { ) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() { if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty); return Ok(HandlingStatus::Empty.into());
} }
let ecss_tc_and_token = possible_packet.unwrap(); let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper self.service_helper
@@ -81,36 +83,34 @@ impl<
let subservice = PusPacket::subservice(&tc); let subservice = PusPacket::subservice(&tc);
let standard_subservice = scheduling::Subservice::try_from(subservice); let standard_subservice = scheduling::Subservice::try_from(subservice);
if standard_subservice.is_err() { if standard_subservice.is_err() {
return Ok(PusPacketHandlerResult::CustomSubservice( return Ok(DirectPusPacketHandlerResult::CustomSubservice(
subservice, subservice,
ecss_tc_and_token.token, ecss_tc_and_token.token,
)); ));
} }
let partial_error = None;
match standard_subservice.unwrap() { match standard_subservice.unwrap() {
scheduling::Subservice::TcEnableScheduling => { scheduling::Subservice::TcEnableScheduling => {
let start_token = self let opt_started_token = match self.service_helper.verif_reporter().start_success(
.service_helper &self.service_helper.common.tm_sender,
.verif_reporter() ecss_tc_and_token.token,
.start_success( time_stamp,
self.service_helper.common.id, ) {
&self.service_helper.common.tm_sender, Ok(started_token) => Some(started_token),
ecss_tc_and_token.token, Err(e) => {
time_stamp, error_callback(&PartialPusHandlingError::Verification(e));
) None
.expect("Error sending start success"); }
};
self.scheduler.enable(); self.scheduler.enable();
if self.scheduler.is_enabled() {
self.service_helper if self.scheduler.is_enabled() && opt_started_token.is_some() {
.verif_reporter() if let Err(e) = self.service_helper.verif_reporter().completion_success(
.completion_success( &self.service_helper.common.tm_sender,
self.service_helper.common.id, opt_started_token.unwrap(),
&self.service_helper.common.tm_sender, time_stamp,
start_token, ) {
time_stamp, error_callback(&PartialPusHandlingError::Verification(e));
) }
.expect("Error sending completion success");
} else { } else {
return Err(PusPacketHandlingError::Other( return Err(PusPacketHandlingError::Other(
"failed to enabled scheduler".to_string(), "failed to enabled scheduler".to_string(),
@@ -118,28 +118,27 @@ impl<
} }
} }
scheduling::Subservice::TcDisableScheduling => { scheduling::Subservice::TcDisableScheduling => {
let start_token = self let opt_started_token = match self.service_helper.verif_reporter().start_success(
.service_helper &self.service_helper.common.tm_sender,
.verif_reporter() ecss_tc_and_token.token,
.start_success( time_stamp,
self.service_helper.common.id, ) {
&self.service_helper.common.tm_sender, Ok(started_token) => Some(started_token),
ecss_tc_and_token.token, Err(e) => {
time_stamp, error_callback(&PartialPusHandlingError::Verification(e));
) None
.expect("Error sending start success"); }
};
self.scheduler.disable(); self.scheduler.disable();
if !self.scheduler.is_enabled() { if !self.scheduler.is_enabled() && opt_started_token.is_some() {
self.service_helper if let Err(e) = self.service_helper.verif_reporter().completion_success(
.verif_reporter() &self.service_helper.common.tm_sender,
.completion_success( opt_started_token.unwrap(),
self.service_helper.common.id, time_stamp,
&self.service_helper.common.tm_sender, ) {
start_token, error_callback(&PartialPusHandlingError::Verification(e));
time_stamp, }
)
.expect("Error sending completion success");
} else { } else {
return Err(PusPacketHandlingError::Other( return Err(PusPacketHandlingError::Other(
"failed to disable scheduler".to_string(), "failed to disable scheduler".to_string(),
@@ -151,7 +150,6 @@ impl<
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.start_success( .start_success(
self.service_helper.common.id,
&self.service_helper.common.tm_sender, &self.service_helper.common.tm_sender,
ecss_tc_and_token.token, ecss_tc_and_token.token,
time_stamp, time_stamp,
@@ -165,7 +163,6 @@ impl<
self.service_helper self.service_helper
.verif_reporter() .verif_reporter()
.completion_success( .completion_success(
self.service_helper.common.id,
&self.service_helper.common.tm_sender, &self.service_helper.common.tm_sender,
start_token, start_token,
time_stamp, time_stamp,
@@ -178,7 +175,6 @@ impl<
.common .common
.verif_reporter .verif_reporter
.start_success( .start_success(
self.service_helper.common.id,
&self.service_helper.common.tm_sender, &self.service_helper.common.tm_sender,
ecss_tc_and_token.token, ecss_tc_and_token.token,
time_stamp, time_stamp,
@@ -193,7 +189,6 @@ impl<
self.service_helper self.service_helper
.verif_reporter() .verif_reporter()
.completion_success( .completion_success(
self.service_helper.common.id,
&self.service_helper.common.tm_sender, &self.service_helper.common.tm_sender,
start_token, start_token,
time_stamp, time_stamp,
@@ -202,25 +197,20 @@ impl<
} }
_ => { _ => {
// Treat unhandled standard subservices as custom subservices for now. // Treat unhandled standard subservices as custom subservices for now.
return Ok(PusPacketHandlerResult::CustomSubservice( return Ok(DirectPusPacketHandlerResult::CustomSubservice(
subservice, subservice,
ecss_tc_and_token.token, ecss_tc_and_token.token,
)); ));
} }
} }
if let Some(partial_error) = partial_error { Ok(HandlingStatus::HandledOne.into())
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
} }
} }
/// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and regular /// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and regular
/// mpsc queues. /// mpsc queues.
pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHandler< pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
mpsc::Sender<PusTmAsVec>, mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
PusScheduler, PusScheduler,
@@ -229,7 +219,7 @@ pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHand
/// queues. /// queues.
pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler< pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
mpsc::SyncSender<PusTmAsVec>, mpsc::SyncSender<PacketAsVec>,
EcssTcInVecConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
PusScheduler, PusScheduler,
@@ -238,7 +228,7 @@ pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServ
/// mpsc queues. /// mpsc queues.
pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceHandler< pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSender, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
PusScheduler, PusScheduler,
@@ -247,7 +237,7 @@ pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceH
/// mpsc queues. /// mpsc queues.
pub type PusService11SchedHandlerStaticWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler< pub type PusService11SchedHandlerStaticWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
PusScheduler, PusScheduler,
@@ -265,10 +255,8 @@ mod tests {
verification::{RequestId, TcStateAccepted, VerificationToken}, verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
}; };
use crate::pus::{ use crate::pus::{DirectPusPacketHandlerResult, MpscTcReceiver, PusPacketHandlingError};
MpscTcReceiver, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, use crate::tmtc::PacketSenderWithSharedPool;
PusPacketHandlingError,
};
use alloc::collections::VecDeque; use alloc::collections::VecDeque;
use delegate::delegate; use delegate::delegate;
use spacepackets::ecss::scheduling::Subservice; use spacepackets::ecss::scheduling::Subservice;
@@ -287,7 +275,7 @@ mod tests {
common: PusServiceHandlerWithSharedStoreCommon, common: PusServiceHandlerWithSharedStoreCommon,
handler: PusSchedServiceHandler< handler: PusSchedServiceHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
TestScheduler, TestScheduler,
@@ -308,10 +296,12 @@ mod tests {
} }
} }
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { pub fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler self.handler
.poll_and_handle_next_tc(&time_stamp, &mut self.sched_tc_pool) .poll_and_handle_next_tc(|_| {}, &time_stamp, &mut self.sched_tc_pool)
} }
} }
@@ -321,18 +311,17 @@ mod tests {
self.handler self.handler
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.acceptance_success( .acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7])
self.handler.service_helper.id(),
self.handler.service_helper.tm_sender(),
init_token,
&[0; 7],
)
.expect("acceptance success failure") .expect("acceptance success failure")
} }
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator) {
self.common
.send_tc(self.handler.service_helper.id(), token, tc);
}
delegate! { delegate! {
to self.common { to self.common {
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator);
fn read_next_tm(&mut self) -> PusTmReader<'_>; fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool; fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
@@ -355,7 +344,7 @@ mod tests {
fn reset( fn reset(
&mut self, &mut self,
_store: &mut (impl crate::pool::PoolProvider + ?Sized), _store: &mut (impl crate::pool::PoolProvider + ?Sized),
) -> Result<(), crate::pool::StoreError> { ) -> Result<(), crate::pool::PoolError> {
self.reset_count += 1; self.reset_count += 1;
Ok(()) Ok(())
} }
@@ -388,9 +377,9 @@ mod tests {
test_harness: &mut Pus11HandlerWithStoreTester, test_harness: &mut Pus11HandlerWithStoreTester,
subservice: Subservice, subservice: Subservice,
) { ) {
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap(); let reply_header = SpHeader::new_for_unseg_tm(TEST_APID, 0, 0);
let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8); let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8);
let enable_scheduling = PusTcCreator::new(&mut reply_header, tc_header, &[0; 7], true); let enable_scheduling = PusTcCreator::new(reply_header, tc_header, &[0; 7], true);
let token = test_harness.init_verification(&enable_scheduling); let token = test_harness.init_verification(&enable_scheduling);
test_harness.send_tc(&token, &enable_scheduling); test_harness.send_tc(&token, &enable_scheduling);
@@ -398,7 +387,7 @@ mod tests {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
test_harness test_harness
.handler .handler
.poll_and_handle_next_tc(&time_stamp, &mut test_harness.sched_tc_pool) .poll_and_handle_next_tc(|_| {}, &time_stamp, &mut test_harness.sched_tc_pool)
.unwrap(); .unwrap();
test_harness.check_next_verification_tm(1, request_id); test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id); test_harness.check_next_verification_tm(3, request_id);
@@ -435,9 +424,9 @@ mod tests {
#[test] #[test]
fn test_insert_activity_tc() { fn test_insert_activity_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new(); let mut test_harness = Pus11HandlerWithStoreTester::new();
let mut reply_header = SpHeader::tm_unseg(TEST_APID, 0, 0).unwrap(); let mut reply_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1); let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new(&mut reply_header, sec_header, &[], true); let ping_tc = PusTcCreator::new(reply_header, sec_header, &[], true);
let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc); let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc);
let stamper = cds::CdsTime::now_with_u16_days().expect("time provider failed"); let stamper = cds::CdsTime::now_with_u16_days().expect("time provider failed");
let mut sched_app_data: [u8; 64] = [0; 64]; let mut sched_app_data: [u8; 64] = [0; 64];
@@ -445,10 +434,10 @@ mod tests {
let ping_raw = ping_tc.to_vec().expect("generating raw tc failed"); let ping_raw = ping_tc.to_vec().expect("generating raw tc failed");
sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw); sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw);
written_len += ping_raw.len(); written_len += ping_raw.len();
reply_header = SpHeader::tm_unseg(TEST_APID, 1, 0).unwrap(); reply_header = SpHeader::new_for_unseg_tc(TEST_APID, 1, 0);
sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8); sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8);
let enable_scheduling = PusTcCreator::new( let enable_scheduling = PusTcCreator::new(
&mut reply_header, reply_header,
sec_header, sec_header,
&sched_app_data[..written_len], &sched_app_data[..written_len],
true, true,

View File

@@ -1,7 +1,7 @@
use crate::pus::{ use crate::pus::{
PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError, PusTmAsVec, DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError, PusTmVariant,
PusTmInPool, PusTmVariant,
}; };
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader}; use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
use spacepackets::SpHeader; use spacepackets::SpHeader;
@@ -9,16 +9,15 @@ use std::sync::mpsc;
use super::verification::{VerificationReporter, VerificationReportingProvider}; use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{ use super::{
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiverCore, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcReceiver,
EcssTmSenderCore, GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSender, EcssTmSender, GenericConversionError, HandlingStatus, MpscTcReceiver, PusServiceHelper,
MpscTmInSharedPoolSenderBounded, PusServiceHelper,
}; };
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets. /// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
/// This handler only processes ping requests and generates a ping reply for them accordingly. /// This handler only processes ping requests and generates a ping reply for them accordingly.
pub struct PusService17TestHandler< pub struct PusService17TestHandler<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSenderCore, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
> { > {
@@ -27,8 +26,8 @@ pub struct PusService17TestHandler<
} }
impl< impl<
TcReceiver: EcssTcReceiverCore, TcReceiver: EcssTcReceiver,
TmSender: EcssTmSenderCore, TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
> PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter> > PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
@@ -44,13 +43,14 @@ impl<
Self { service_helper } Self { service_helper }
} }
pub fn poll_and_handle_next_tc( pub fn poll_and_handle_next_tc<ErrorCb: FnMut(&PartialPusHandlingError)>(
&mut self, &mut self,
mut error_callback: ErrorCb,
time_stamp: &[u8], time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { ) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() { if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty); return Ok(HandlingStatus::Empty.into());
} }
let ecss_tc_and_token = possible_packet.unwrap(); let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper self.service_helper
@@ -61,65 +61,48 @@ impl<
return Err(GenericConversionError::WrongService(tc.service()).into()); return Err(GenericConversionError::WrongService(tc.service()).into());
} }
if tc.subservice() == 1 { if tc.subservice() == 1 {
let mut partial_error = None; let opt_started_token = match self.service_helper.verif_reporter().start_success(
let result = self &self.service_helper.common.tm_sender,
.service_helper ecss_tc_and_token.token,
.verif_reporter() time_stamp,
.start_success( ) {
self.service_helper.common.id, Ok(token) => Some(token),
&self.service_helper.common.tm_sender, Err(e) => {
ecss_tc_and_token.token, error_callback(&PartialPusHandlingError::Verification(e));
time_stamp, None
) }
.map_err(|_| PartialPusHandlingError::Verification);
let start_token = if let Ok(result) = result {
Some(result)
} else {
partial_error = Some(result.unwrap_err());
None
}; };
// Sequence count will be handled centrally in TM funnel. // Sequence count will be handled centrally in TM funnel.
let mut reply_header = // It is assumed that the verification reporter was built with a valid APID, so we use
SpHeader::tm_unseg(self.service_helper.verif_reporter().apid(), 0, 0).unwrap(); // the unchecked API here.
let reply_header =
SpHeader::new_for_unseg_tm(self.service_helper.verif_reporter().apid(), 0, 0);
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, time_stamp); let tc_header = PusTmSecondaryHeader::new_simple(17, 2, time_stamp);
let ping_reply = PusTmCreator::new(&mut reply_header, tc_header, &[], true); let ping_reply = PusTmCreator::new(reply_header, tc_header, &[], true);
let result = self if let Err(e) = self
.service_helper .service_helper
.common .common
.tm_sender .tm_sender
.send_tm(self.service_helper.id(), PusTmVariant::Direct(ping_reply)) .send_tm(self.service_helper.id(), PusTmVariant::Direct(ping_reply))
.map_err(PartialPusHandlingError::TmSend); {
if let Err(err) = result { error_callback(&PartialPusHandlingError::TmSend(e));
partial_error = Some(err);
} }
if let Some(start_token) = opt_started_token {
if let Some(start_token) = start_token { if let Err(e) = self.service_helper.verif_reporter().completion_success(
if self &self.service_helper.common.tm_sender,
.service_helper start_token,
.verif_reporter() time_stamp,
.completion_success( ) {
self.service_helper.common.id, error_callback(&PartialPusHandlingError::Verification(e));
&self.service_helper.common.tm_sender,
start_token,
time_stamp,
)
.is_err()
{
partial_error = Some(PartialPusHandlingError::Verification)
} }
} }
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
};
} else { } else {
return Ok(PusPacketHandlerResult::CustomSubservice( return Ok(DirectPusPacketHandlerResult::CustomSubservice(
tc.subservice(), tc.subservice(),
ecss_tc_and_token.token, ecss_tc_and_token.token,
)); ));
} }
Ok(PusPacketHandlerResult::RequestHandled) Ok(HandlingStatus::HandledOne.into())
} }
} }
@@ -127,7 +110,7 @@ impl<
/// mpsc queues. /// mpsc queues.
pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler< pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
MpscTcReceiver, MpscTcReceiver,
mpsc::Sender<PusTmAsVec>, mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
>; >;
@@ -135,23 +118,15 @@ pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
/// queues. /// queues.
pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler< pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver, MpscTcReceiver,
mpsc::SyncSender<PusTmInPool>, mpsc::SyncSender<PacketAsVec>,
EcssTcInVecConverter, EcssTcInVecConverter,
VerificationReporter, VerificationReporter,
>; >;
/// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and regular
/// mpsc queues.
pub type PusService17TestHandlerStaticWithMpsc = PusService17TestHandler<
MpscTcReceiver,
MpscTmInSharedPoolSender,
EcssTcInSharedStoreConverter,
VerificationReporter,
>;
/// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded /// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded
/// mpsc queues. /// mpsc queues.
pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler< pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
>; >;
@@ -167,17 +142,18 @@ mod tests {
}; };
use crate::pus::verification::{TcStateAccepted, VerificationToken}; use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{ use crate::pus::{
EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericConversionError, MpscTcReceiver, DirectPusPacketHandlerResult, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, GenericConversionError, HandlingStatus, MpscTcReceiver, MpscTmAsVecSender,
PusPacketHandlingError, PartialPusHandlingError, PusPacketHandlingError,
}; };
use crate::tmtc::PacketSenderWithSharedPool;
use crate::ComponentId; use crate::ComponentId;
use delegate::delegate; use delegate::delegate;
use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader}; use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
use spacepackets::ecss::tm::PusTmReader; use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
use spacepackets::time::{cds, TimeWriter}; use spacepackets::time::{cds, TimeWriter};
use spacepackets::{SequenceFlags, SpHeader}; use spacepackets::SpHeader;
use super::PusService17TestHandler; use super::PusService17TestHandler;
@@ -185,7 +161,7 @@ mod tests {
common: PusServiceHandlerWithSharedStoreCommon, common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService17TestHandler< handler: PusService17TestHandler<
MpscTcReceiver, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded, PacketSenderWithSharedPool,
EcssTcInSharedStoreConverter, EcssTcInSharedStoreConverter,
VerificationReporter, VerificationReporter,
>, >,
@@ -208,19 +184,18 @@ mod tests {
self.handler self.handler
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.acceptance_success( .acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7])
self.handler.service_helper.id(),
self.handler.service_helper.tm_sender(),
init_token,
&[0; 7],
)
.expect("acceptance success failure") .expect("acceptance success failure")
} }
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator) {
self.common
.send_tc(self.handler.service_helper.id(), token, tc);
}
delegate! { delegate! {
to self.common { to self.common {
fn read_next_tm(&mut self) -> PusTmReader<'_>; fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator);
fn check_no_tm_available(&self) -> bool; fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm( fn check_next_verification_tm(
&self, &self,
@@ -231,9 +206,12 @@ mod tests {
} }
} }
impl SimplePusPacketHandler for Pus17HandlerWithStoreTester { impl SimplePusPacketHandler for Pus17HandlerWithStoreTester {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler.poll_and_handle_next_tc(&time_stamp) self.handler
.poll_and_handle_next_tc(|_partial_error: &PartialPusHandlingError| {}, &time_stamp)
} }
} }
@@ -264,18 +242,17 @@ mod tests {
self.handler self.handler
.service_helper .service_helper
.verif_reporter() .verif_reporter()
.acceptance_success( .acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7])
self.handler.service_helper.id(),
self.handler.service_helper.tm_sender(),
init_token,
&[0; 7],
)
.expect("acceptance success failure") .expect("acceptance success failure")
} }
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator) {
self.common
.send_tc(self.handler.service_helper.id(), token, tc);
}
delegate! { delegate! {
to self.common { to self.common {
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator);
fn read_next_tm(&mut self) -> PusTmReader<'_>; fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool; fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm( fn check_next_verification_tm(
@@ -287,17 +264,20 @@ mod tests {
} }
} }
impl SimplePusPacketHandler for Pus17HandlerWithVecTester { impl SimplePusPacketHandler for Pus17HandlerWithVecTester {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap(); let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler.poll_and_handle_next_tc(&time_stamp) self.handler
.poll_and_handle_next_tc(|_partial_error: &PartialPusHandlingError| {}, &time_stamp)
} }
} }
fn ping_test(test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler)) { fn ping_test(test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler)) {
// Create a ping TC, verify acceptance. // Create a ping TC, verify acceptance.
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1); let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc); let token = test_harness.init_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc); test_harness.send_tc(&token, &ping_tc);
let request_id = token.request_id(); let request_id = token.request_id();
@@ -339,19 +319,20 @@ mod tests {
let mut test_harness = Pus17HandlerWithStoreTester::new(0); let mut test_harness = Pus17HandlerWithStoreTester::new(0);
let result = test_harness.handle_one_tc(); let result = test_harness.handle_one_tc();
assert!(result.is_ok()); assert!(result.is_ok());
let result = result.unwrap(); match result.unwrap() {
if let PusPacketHandlerResult::Empty = result { DirectPusPacketHandlerResult::Handled(handled) => {
} else { assert_eq!(handled, HandlingStatus::Empty);
panic!("unexpected result type {result:?}") }
_ => panic!("unexpected result"),
} }
} }
#[test] #[test]
fn test_sending_unsupported_service() { fn test_sending_unsupported_service() {
let mut test_harness = Pus17HandlerWithStoreTester::new(0); let mut test_harness = Pus17HandlerWithStoreTester::new(0);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(3, 1); let sec_header = PusTcSecondaryHeader::new_simple(3, 1);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc); let token = test_harness.init_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc); test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc(); let result = test_harness.handle_one_tc();
@@ -370,15 +351,15 @@ mod tests {
#[test] #[test]
fn test_sending_custom_subservice() { fn test_sending_custom_subservice() {
let mut test_harness = Pus17HandlerWithStoreTester::new(0); let mut test_harness = Pus17HandlerWithStoreTester::new(0);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 200); let sec_header = PusTcSecondaryHeader::new_simple(17, 200);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc); let token = test_harness.init_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc); test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc(); let result = test_harness.handle_one_tc();
assert!(result.is_ok()); assert!(result.is_ok());
let result = result.unwrap(); let result = result.unwrap();
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result { if let DirectPusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200); assert_eq!(subservice, 200);
} else { } else {
panic!("unexpected result type {result:?}") panic!("unexpected result type {result:?}")

File diff suppressed because it is too large Load Diff

View File

@@ -3,16 +3,14 @@ use core::{fmt, marker::PhantomData};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*; pub use alloc_mod::*;
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub use std_mod::*; pub use std_mod::*;
use spacepackets::{ use spacepackets::{
ecss::{tc::IsPusTelecommand, PusPacket}, ecss::{tc::IsPusTelecommand, PusPacket},
ByteConversionError, CcsdsPacket, ByteConversionError,
}; };
use crate::{queue::GenericTargetedMessagingError, ComponentId}; use crate::{queue::GenericTargetedMessagingError, ComponentId};
@@ -49,7 +47,7 @@ impl UniqueApidTargetId {
/// This function attempts to build the ID from a PUS telecommand by extracting the APID /// This function attempts to build the ID from a PUS telecommand by extracting the APID
/// and the first four bytes of the application data field as the target field. /// and the first four bytes of the application data field as the target field.
pub fn from_pus_tc( pub fn from_pus_tc(
tc: &(impl CcsdsPacket + PusPacket + IsPusTelecommand), tc: &(impl PusPacket + IsPusTelecommand),
) -> Result<Self, ByteConversionError> { ) -> Result<Self, ByteConversionError> {
if tc.user_data().len() < 4 { if tc.user_data().len() < 4 {
return Err(ByteConversionError::FromSliceTooSmall { return Err(ByteConversionError::FromSliceTooSmall {
@@ -89,6 +87,10 @@ impl fmt::Display for UniqueApidTargetId {
} }
} }
/// This contains metadata information which might be useful when used together with a
/// generic message tpye.
///
/// This could for example be used to build request/reply patterns or state tracking for request.
#[derive(Debug, Copy, PartialEq, Eq, Clone)] #[derive(Debug, Copy, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct MessageMetadata { pub struct MessageMetadata {
@@ -113,17 +115,16 @@ impl MessageMetadata {
} }
} }
/// Generic message type which is associated with a sender using a [ChannelId] and associated /// Generic message type which adds [metadata][MessageMetadata] to a generic message typ.
/// with a request using a [RequestId].
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct GenericMessage<MSG> { pub struct GenericMessage<Message> {
pub requestor_info: MessageMetadata, pub requestor_info: MessageMetadata,
pub message: MSG, pub message: Message,
} }
impl<MSG> GenericMessage<MSG> { impl<Message> GenericMessage<Message> {
pub fn new(requestor_info: MessageMetadata, message: MSG) -> Self { pub fn new(requestor_info: MessageMetadata, message: Message) -> Self {
Self { Self {
requestor_info, requestor_info,
message, message,
@@ -191,10 +192,7 @@ impl<MSG, R: MessageReceiver<MSG>> MessageReceiverWithId<MSG, R> {
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod alloc_mod { pub mod alloc_mod {
use core::marker::PhantomData;
use crate::queue::GenericSendError; use crate::queue::GenericSendError;
use super::*; use super::*;
@@ -328,13 +326,12 @@ pub mod alloc_mod {
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod { pub mod std_mod {
use super::*; use super::*;
use std::sync::mpsc; use std::sync::mpsc;
use crate::queue::{GenericReceiveError, GenericSendError, GenericTargetedMessagingError}; use crate::queue::{GenericReceiveError, GenericSendError};
impl<MSG: Send> MessageSender<MSG> for mpsc::Sender<GenericMessage<MSG>> { impl<MSG: Send> MessageSender<MSG> for mpsc::Sender<GenericMessage<MSG>> {
fn send(&self, message: GenericMessage<MSG>) -> Result<(), GenericTargetedMessagingError> { fn send(&self, message: GenericMessage<MSG>) -> Result<(), GenericTargetedMessagingError> {
@@ -418,9 +415,9 @@ mod tests {
#[test] #[test]
fn test_basic_target_id_with_apid_from_pus_tc() { fn test_basic_target_id_with_apid_from_pus_tc() {
let mut sp_header = SpHeader::tc_unseg(0x111, 5, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(0x111, 5, 0);
let app_data = 1_u32.to_be_bytes(); let app_data = 1_u32.to_be_bytes();
let pus_tc = PusTcCreator::new_simple(&mut sp_header, 17, 1, Some(&app_data), true); let pus_tc = PusTcCreator::new_simple(sp_header, 17, 1, &app_data, true);
let id = UniqueApidTargetId::from_pus_tc(&pus_tc).unwrap(); let id = UniqueApidTargetId::from_pus_tc(&pus_tc).unwrap();
assert_eq!(id.apid, 0x111); assert_eq!(id.apid, 0x111);
assert_eq!(id.unique_id, 1); assert_eq!(id.unique_id, 1);
@@ -428,9 +425,9 @@ mod tests {
#[test] #[test]
fn test_basic_target_id_with_apid_from_pus_tc_invalid_app_data() { fn test_basic_target_id_with_apid_from_pus_tc_invalid_app_data() {
let mut sp_header = SpHeader::tc_unseg(0x111, 5, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(0x111, 5, 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1); let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let pus_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); let pus_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let error = UniqueApidTargetId::from_pus_tc(&pus_tc); let error = UniqueApidTargetId::from_pus_tc(&pus_tc);
assert!(error.is_err()); assert!(error.is_err());
let error = error.unwrap_err(); let error = error.unwrap_err();

View File

@@ -32,7 +32,7 @@ dyn_clone::clone_trait_object!(SequenceCountProvider<u16>);
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
impl<T, Raw> SequenceCountProvider<Raw> for T where T: SequenceCountProviderCore<Raw> + Clone {} impl<T, Raw> SequenceCountProvider<Raw> for T where T: SequenceCountProviderCore<Raw> + Clone {}
#[derive(Default, Clone)] #[derive(Clone)]
pub struct SeqCountProviderSimple<T: Copy> { pub struct SeqCountProviderSimple<T: Copy> {
seq_count: Cell<T>, seq_count: Cell<T>,
max_val: T, max_val: T,
@@ -43,13 +43,12 @@ macro_rules! impl_for_primitives {
$( $(
paste! { paste! {
impl SeqCountProviderSimple<$ty> { impl SeqCountProviderSimple<$ty> {
pub fn [<new_ $ty _max_val>](max_val: $ty) -> Self { pub fn [<new_custom_max_val_ $ty>](max_val: $ty) -> Self {
Self { Self {
seq_count: Cell::new(0), seq_count: Cell::new(0),
max_val, max_val,
} }
} }
pub fn [<new_ $ty>]() -> Self { pub fn [<new_ $ty>]() -> Self {
Self { Self {
seq_count: Cell::new(0), seq_count: Cell::new(0),
@@ -58,6 +57,12 @@ macro_rules! impl_for_primitives {
} }
} }
impl Default for SeqCountProviderSimple<$ty> {
fn default() -> Self {
Self::[<new_ $ty>]()
}
}
impl SequenceCountProviderCore<$ty> for SeqCountProviderSimple<$ty> { impl SequenceCountProviderCore<$ty> for SeqCountProviderSimple<$ty> {
fn get(&self) -> $ty { fn get(&self) -> $ty {
self.seq_count.get() self.seq_count.get()
@@ -86,21 +91,16 @@ macro_rules! impl_for_primitives {
impl_for_primitives!(u8, u16, u32, u64,); impl_for_primitives!(u8, u16, u32, u64,);
/// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT]. /// This is a sequence count provider which wraps around at [MAX_SEQ_COUNT].
#[derive(Clone)]
pub struct CcsdsSimpleSeqCountProvider { pub struct CcsdsSimpleSeqCountProvider {
provider: SeqCountProviderSimple<u16>, provider: SeqCountProviderSimple<u16>,
} }
impl CcsdsSimpleSeqCountProvider {
pub fn new() -> Self {
Self {
provider: SeqCountProviderSimple::new_u16_max_val(MAX_SEQ_COUNT),
}
}
}
impl Default for CcsdsSimpleSeqCountProvider { impl Default for CcsdsSimpleSeqCountProvider {
fn default() -> Self { fn default() -> Self {
Self::new() Self {
provider: SeqCountProviderSimple::new_custom_max_val_u16(MAX_SEQ_COUNT),
}
} }
} }
@@ -187,7 +187,7 @@ mod tests {
#[test] #[test]
fn test_u8_counter() { fn test_u8_counter() {
let u8_counter = SeqCountProviderSimple::new_u8(); let u8_counter = SeqCountProviderSimple::<u8>::default();
assert_eq!(u8_counter.get(), 0); assert_eq!(u8_counter.get(), 0);
assert_eq!(u8_counter.get_and_increment(), 0); assert_eq!(u8_counter.get_and_increment(), 0);
assert_eq!(u8_counter.get_and_increment(), 1); assert_eq!(u8_counter.get_and_increment(), 1);

View File

@@ -1,391 +0,0 @@
//! CCSDS packet routing components.
//!
//! The routing components consist of two core components:
//! 1. [CcsdsDistributor] component which dispatches received packets to a user-provided handler
//! 2. [CcsdsPacketHandler] trait which should be implemented by the user-provided packet handler.
//!
//! The [CcsdsDistributor] implements the [ReceivesCcsdsTc] and [ReceivesTcCore] trait which allows to
//! pass raw or CCSDS packets to it. Upon receiving a packet, it performs the following steps:
//!
//! 1. It tries to identify the target Application Process Identifier (APID) based on the
//! respective CCSDS space packet header field. If that process fails, a [ByteConversionError] is
//! returned to the user
//! 2. If a valid APID is found and matches one of the APIDs provided by
//! [CcsdsPacketHandler::valid_apids], it will pass the packet to the user provided
//! [CcsdsPacketHandler::handle_known_apid] function. If no valid APID is found, the packet
//! will be passed to the [CcsdsPacketHandler::handle_unknown_apid] function.
//!
//! # Example
//!
//! ```rust
//! use satrs::tmtc::ccsds_distrib::{CcsdsPacketHandler, CcsdsDistributor};
//! use satrs::tmtc::{ReceivesTc, ReceivesTcCore};
//! use spacepackets::{CcsdsPacket, SpHeader};
//! use spacepackets::ecss::WritablePusPacket;
//! use spacepackets::ecss::tc::PusTcCreator;
//!
//! #[derive (Default)]
//! struct ConcreteApidHandler {
//! known_call_count: u32,
//! unknown_call_count: u32
//! }
//!
//! impl ConcreteApidHandler {
//! fn mutable_foo(&mut self) {}
//! }
//!
//! impl CcsdsPacketHandler for ConcreteApidHandler {
//! type Error = ();
//! fn valid_apids(&self) -> &'static [u16] { &[0x002] }
//! fn handle_known_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
//! assert_eq!(sp_header.apid(), 0x002);
//! assert_eq!(tc_raw.len(), 13);
//! self.known_call_count += 1;
//! Ok(())
//! }
//! fn handle_unknown_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
//! assert_eq!(sp_header.apid(), 0x003);
//! assert_eq!(tc_raw.len(), 13);
//! self.unknown_call_count += 1;
//! Ok(())
//! }
//! }
//!
//! let apid_handler = ConcreteApidHandler::default();
//! let mut ccsds_distributor = CcsdsDistributor::new(apid_handler);
//!
//! // Create and pass PUS telecommand with a valid APID
//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
//! let mut pus_tc = PusTcCreator::new_simple(&mut space_packet_header, 17, 1, None, true);
//! let mut test_buf: [u8; 32] = [0; 32];
//! let mut size = pus_tc
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! let tc_slice = &test_buf[0..size];
//! ccsds_distributor.pass_tc(&tc_slice).expect("Passing TC slice failed");
//!
//! // Now pass a packet with an unknown APID to the distributor
//! pus_tc.set_apid(0x003);
//! size = pus_tc
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! let tc_slice = &test_buf[0..size];
//! ccsds_distributor.pass_tc(&tc_slice).expect("Passing TC slice failed");
//!
//! // Retrieve the APID handler.
//! let handler_ref = ccsds_distributor.packet_handler();
//! assert_eq!(handler_ref.known_call_count, 1);
//! assert_eq!(handler_ref.unknown_call_count, 1);
//!
//! // Mutable access to the handler.
//! let mutable_handler_ref = ccsds_distributor.packet_handler_mut();
//! mutable_handler_ref.mutable_foo();
//! ```
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore};
use core::fmt::{Display, Formatter};
use spacepackets::{ByteConversionError, CcsdsPacket, SpHeader};
#[cfg(feature = "std")]
use std::error::Error;
/// Generic trait for a handler or dispatcher object handling CCSDS packets.
///
/// Users should implement this trait on their custom CCSDS packet handler and then pass a boxed
/// instance of this handler to the [CcsdsDistributor]. The distributor will use the trait
/// interface to dispatch received packets to the user based on the Application Process Identifier
/// (APID) field of the CCSDS packet.
pub trait CcsdsPacketHandler {
type Error;
// TODO: Rework this to return a boolean based on u16 input..
fn valid_apids(&self) -> &'static [u16];
fn handle_known_apid(&mut self, sp_header: &SpHeader, tc_raw: &[u8])
-> Result<(), Self::Error>;
fn handle_unknown_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error>;
}
/// The CCSDS distributor dispatches received CCSDS packets to a user provided packet handler.
pub struct CcsdsDistributor<PacketHandler: CcsdsPacketHandler<Error = E>, E> {
/// User provided APID handler stored as a generic trait object.
/// It can be cast back to the original concrete type using [Self::packet_handler] or
/// the [Self::packet_handler_mut] method.
packet_handler: PacketHandler,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CcsdsError<E> {
CustomError(E),
ByteConversionError(ByteConversionError),
}
impl<E: Display> Display for CcsdsError<E> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
Self::CustomError(e) => write!(f, "{e}"),
Self::ByteConversionError(e) => write!(f, "{e}"),
}
}
}
#[cfg(feature = "std")]
impl<E: Error> Error for CcsdsError<E> {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::CustomError(e) => e.source(),
Self::ByteConversionError(e) => e.source(),
}
}
}
impl<PacketHandler: CcsdsPacketHandler<Error = E>, E: 'static> ReceivesCcsdsTc
for CcsdsDistributor<PacketHandler, E>
{
type Error = CcsdsError<E>;
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.dispatch_ccsds(header, tc_raw)
}
}
impl<PacketHandler: CcsdsPacketHandler<Error = E>, E: 'static> ReceivesTcCore
for CcsdsDistributor<PacketHandler, E>
{
type Error = CcsdsError<E>;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
if tc_raw.len() < 7 {
return Err(CcsdsError::ByteConversionError(
ByteConversionError::FromSliceTooSmall {
found: tc_raw.len(),
expected: 7,
},
));
}
let (sp_header, _) =
SpHeader::from_be_bytes(tc_raw).map_err(|e| CcsdsError::ByteConversionError(e))?;
self.dispatch_ccsds(&sp_header, tc_raw)
}
}
impl<PacketHandler: CcsdsPacketHandler<Error = E>, E: 'static> CcsdsDistributor<PacketHandler, E> {
pub fn new(packet_handler: PacketHandler) -> Self {
CcsdsDistributor { packet_handler }
}
pub fn packet_handler(&self) -> &PacketHandler {
&self.packet_handler
}
pub fn packet_handler_mut(&mut self) -> &mut PacketHandler {
&mut self.packet_handler
}
fn dispatch_ccsds(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), CcsdsError<E>> {
let apid = sp_header.apid();
let valid_apids = self.packet_handler.valid_apids();
for &valid_apid in valid_apids {
if valid_apid == apid {
return self
.packet_handler
.handle_known_apid(sp_header, tc_raw)
.map_err(|e| CcsdsError::CustomError(e));
}
}
self.packet_handler
.handle_unknown_apid(sp_header, tc_raw)
.map_err(|e| CcsdsError::CustomError(e))
}
}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use crate::tmtc::ccsds_distrib::{CcsdsDistributor, CcsdsPacketHandler};
use spacepackets::ecss::tc::PusTcCreator;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::CcsdsPacket;
use std::collections::VecDeque;
use std::sync::{Arc, Mutex};
use std::vec::Vec;
fn is_send<T: Send>(_: &T) {}
pub fn generate_ping_tc(buf: &mut [u8]) -> &[u8] {
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let size = pus_tc
.write_to_bytes(buf)
.expect("Error writing TC to buffer");
assert_eq!(size, 13);
&buf[0..size]
}
pub fn generate_ping_tc_as_vec() -> Vec<u8> {
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
PusTcCreator::new_simple(&mut sph, 17, 1, None, true)
.to_vec()
.unwrap()
}
type SharedPacketQueue = Arc<Mutex<VecDeque<(u16, Vec<u8>)>>>;
pub struct BasicApidHandlerSharedQueue {
pub known_packet_queue: SharedPacketQueue,
pub unknown_packet_queue: SharedPacketQueue,
}
#[derive(Default)]
pub struct BasicApidHandlerOwnedQueue {
pub known_packet_queue: VecDeque<(u16, Vec<u8>)>,
pub unknown_packet_queue: VecDeque<(u16, Vec<u8>)>,
}
impl CcsdsPacketHandler for BasicApidHandlerSharedQueue {
type Error = ();
fn valid_apids(&self) -> &'static [u16] {
&[0x000, 0x002]
}
fn handle_known_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
let mut vec = Vec::new();
vec.extend_from_slice(tc_raw);
self.known_packet_queue
.lock()
.unwrap()
.push_back((sp_header.apid(), vec));
Ok(())
}
fn handle_unknown_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
let mut vec = Vec::new();
vec.extend_from_slice(tc_raw);
self.unknown_packet_queue
.lock()
.unwrap()
.push_back((sp_header.apid(), vec));
Ok(())
}
}
impl CcsdsPacketHandler for BasicApidHandlerOwnedQueue {
type Error = ();
fn valid_apids(&self) -> &'static [u16] {
&[0x000, 0x002]
}
fn handle_known_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
let mut vec = Vec::new();
vec.extend_from_slice(tc_raw);
self.known_packet_queue.push_back((sp_header.apid(), vec));
Ok(())
}
fn handle_unknown_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
let mut vec = Vec::new();
vec.extend_from_slice(tc_raw);
self.unknown_packet_queue.push_back((sp_header.apid(), vec));
Ok(())
}
}
#[test]
fn test_distribs_known_apid() {
let known_packet_queue = Arc::new(Mutex::default());
let unknown_packet_queue = Arc::new(Mutex::default());
let apid_handler = BasicApidHandlerSharedQueue {
known_packet_queue: known_packet_queue.clone(),
unknown_packet_queue: unknown_packet_queue.clone(),
};
let mut ccsds_distrib = CcsdsDistributor::new(apid_handler);
is_send(&ccsds_distrib);
let mut test_buf: [u8; 32] = [0; 32];
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
ccsds_distrib.pass_tc(tc_slice).expect("Passing TC failed");
let recvd = known_packet_queue.lock().unwrap().pop_front();
assert!(unknown_packet_queue.lock().unwrap().is_empty());
assert!(recvd.is_some());
let (apid, packet) = recvd.unwrap();
assert_eq!(apid, 0x002);
assert_eq!(packet, tc_slice);
}
#[test]
fn test_unknown_apid_handling() {
let apid_handler = BasicApidHandlerOwnedQueue::default();
let mut ccsds_distrib = CcsdsDistributor::new(apid_handler);
let mut sph = SpHeader::tc_unseg(0x004, 0x34, 0).unwrap();
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let mut test_buf: [u8; 32] = [0; 32];
pus_tc
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
ccsds_distrib.pass_tc(&test_buf).expect("Passing TC failed");
assert!(ccsds_distrib.packet_handler().known_packet_queue.is_empty());
let apid_handler = ccsds_distrib.packet_handler_mut();
let recvd = apid_handler.unknown_packet_queue.pop_front();
assert!(recvd.is_some());
let (apid, packet) = recvd.unwrap();
assert_eq!(apid, 0x004);
assert_eq!(packet.as_slice(), test_buf);
}
#[test]
fn test_ccsds_distribution() {
let mut ccsds_distrib = CcsdsDistributor::new(BasicApidHandlerOwnedQueue::default());
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let tc_vec = pus_tc.to_vec().unwrap();
ccsds_distrib
.pass_ccsds(&sph, &tc_vec)
.expect("passing CCSDS TC failed");
let recvd = ccsds_distrib
.packet_handler_mut()
.known_packet_queue
.pop_front();
assert!(recvd.is_some());
let recvd = recvd.unwrap();
assert_eq!(recvd.0, 0x002);
assert_eq!(recvd.1, tc_vec);
}
#[test]
fn test_distribution_short_packet_fails() {
let mut ccsds_distrib = CcsdsDistributor::new(BasicApidHandlerOwnedQueue::default());
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let tc_vec = pus_tc.to_vec().unwrap();
let result = ccsds_distrib.pass_tc(&tc_vec[0..6]);
assert!(result.is_err());
let error = result.unwrap_err();
if let CcsdsError::ByteConversionError(ByteConversionError::FromSliceTooSmall {
found,
expected,
}) = error
{
assert_eq!(found, 6);
assert_eq!(expected, 7);
} else {
panic!("Unexpected error variant");
}
}
}

View File

@@ -1,115 +1,651 @@
//! Telemetry and Telecommanding (TMTC) module. Contains packet routing components with special //! Telemetry and Telecommanding (TMTC) module. Contains packet routing components with special
//! support for CCSDS and ECSS packets. //! support for CCSDS and ECSS packets.
//! //!
//! The distributor modules provided by this module use trait objects provided by the user to //! It is recommended to read the [sat-rs book chapter](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/communication.html)
//! directly dispatch received packets to packet listeners based on packet fields like the CCSDS //! about communication first. The TMTC abstractions provided by this framework are based on the
//! Application Process ID (APID) or the ECSS PUS service type. This allows for fast packet //! assumption that all telemetry is sent to a special handler object called the TM sink while
//! routing without the overhead and complication of using message queues. However, it also requires //! all received telecommands are sent to a special handler object called TC source. Using
//! a design like this makes it simpler to add new TC packet sources or new telemetry generators:
//! They only need to send the received and generated data to these objects.
use crate::queue::GenericSendError;
use crate::{
pool::{PoolAddr, PoolError},
ComponentId,
};
#[cfg(feature = "std")]
pub use alloc_mod::*;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use downcast_rs::{impl_downcast, Downcast}; use downcast_rs::{impl_downcast, Downcast};
use spacepackets::SpHeader; use spacepackets::{
ecss::{
tc::PusTcReader,
tm::{PusTmCreator, PusTmReader},
},
SpHeader,
};
#[cfg(feature = "std")]
use std::sync::mpsc;
#[cfg(feature = "std")]
pub use std_mod::*;
#[cfg(feature = "alloc")]
pub mod ccsds_distrib;
#[cfg(feature = "alloc")]
pub mod pus_distrib;
pub mod tm_helper; pub mod tm_helper;
#[cfg(feature = "alloc")] /// Simple type modelling packet stored inside a pool structure. This structure is intended to
pub use ccsds_distrib::{CcsdsDistributor, CcsdsError, CcsdsPacketHandler}; /// be used when sending a packet via a message queue, so it also contains the sender ID.
#[cfg(feature = "alloc")] #[derive(Debug, PartialEq, Eq, Clone)]
pub use pus_distrib::{PusDistributor, PusServiceDistributor}; pub struct PacketInPool {
pub sender_id: ComponentId,
pub store_addr: PoolAddr,
}
/// Generic trait for object which can receive any telecommands in form of a raw bytestream, with impl PacketInPool {
pub fn new(sender_id: ComponentId, store_addr: PoolAddr) -> Self {
Self {
sender_id,
store_addr,
}
}
}
/// Generic trait for object which can send any packets in form of a raw bytestream, with
/// no assumptions about the received protocol. /// no assumptions about the received protocol.
/// pub trait PacketSenderRaw: Send {
/// This trait is implemented by both the [crate::tmtc::pus_distrib::PusDistributor] and the
/// [crate::tmtc::ccsds_distrib::CcsdsDistributor] which allows to pass the respective packets in
/// raw byte format into them.
pub trait ReceivesTcCore {
type Error; type Error;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error>; fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error>;
} }
/// Extension trait of [ReceivesTcCore] which allows downcasting by implementing [Downcast] and /// Extension trait of [PacketSenderRaw] which allows downcasting by implementing [Downcast].
/// is also sendable.
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub trait ReceivesTc: ReceivesTcCore + Downcast + Send { pub trait PacketSenderRawExt: PacketSenderRaw + Downcast {
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast(&self) -> &dyn ReceivesTcCore<Error = Self::Error>; fn upcast(&self) -> &dyn PacketSenderRaw<Error = Self::Error>;
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast_mut(&mut self) -> &mut dyn ReceivesTcCore<Error = Self::Error>; fn upcast_mut(&mut self) -> &mut dyn PacketSenderRaw<Error = Self::Error>;
} }
/// Blanket implementation to automatically implement [ReceivesTc] when the [alloc] feature /// Blanket implementation to automatically implement [PacketSenderRawExt] when the [alloc]
/// is enabled. /// feature is enabled.
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
impl<T> ReceivesTc for T impl<T> PacketSenderRawExt for T
where where
T: ReceivesTcCore + Send + 'static, T: PacketSenderRaw + Send + 'static,
{ {
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast(&self) -> &dyn ReceivesTcCore<Error = Self::Error> { fn upcast(&self) -> &dyn PacketSenderRaw<Error = Self::Error> {
self self
} }
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast_mut(&mut self) -> &mut dyn ReceivesTcCore<Error = Self::Error> { fn upcast_mut(&mut self) -> &mut dyn PacketSenderRaw<Error = Self::Error> {
self self
} }
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
impl_downcast!(ReceivesTc assoc Error); impl_downcast!(PacketSenderRawExt assoc Error);
/// Generic trait for object which can receive CCSDS space packets, for example ECSS PUS packets /// Generic trait for object which can send CCSDS space packets, for example ECSS PUS packets
/// for CCSDS File Delivery Protocol (CFDP) packets. /// or CCSDS File Delivery Protocol (CFDP) packets wrapped in space packets.
/// pub trait PacketSenderCcsds: Send {
/// This trait is implemented by both the [crate::tmtc::pus_distrib::PusDistributor] and the
/// [crate::tmtc::ccsds_distrib::CcsdsDistributor] which allows
/// to pass the respective packets in raw byte format or in CCSDS format into them.
pub trait ReceivesCcsdsTc {
type Error; type Error;
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error>; fn send_ccsds(
&self,
sender_id: ComponentId,
header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error>;
} }
/// Generic trait for a TM packet source, with no restrictions on the type of TM. #[cfg(feature = "std")]
impl PacketSenderCcsds for mpsc::Sender<PacketAsVec> {
type Error = GenericSendError;
fn send_ccsds(
&self,
sender_id: ComponentId,
_: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
self.send(PacketAsVec::new(sender_id, tc_raw.to_vec()))
.map_err(|_| GenericSendError::RxDisconnected)
}
}
#[cfg(feature = "std")]
impl PacketSenderCcsds for mpsc::SyncSender<PacketAsVec> {
type Error = GenericSendError;
fn send_ccsds(
&self,
sender_id: ComponentId,
_: &SpHeader,
packet_raw: &[u8],
) -> Result<(), Self::Error> {
self.try_send(PacketAsVec::new(sender_id, packet_raw.to_vec()))
.map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})
}
}
/// Generic trait for a packet receiver, with no restrictions on the type of packet.
/// Implementors write the telemetry into the provided buffer and return the size of the telemetry. /// Implementors write the telemetry into the provided buffer and return the size of the telemetry.
pub trait TmPacketSourceCore { pub trait PacketSource: Send {
type Error; type Error;
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error>; fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error>;
} }
/// Extension trait of [TmPacketSourceCore] which allows downcasting by implementing [Downcast] and /// Extension trait of [PacketSource] which allows downcasting by implementing [Downcast].
/// is also sendable.
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub trait TmPacketSource: TmPacketSourceCore + Downcast + Send { pub trait PacketSourceExt: PacketSource + Downcast {
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast(&self) -> &dyn TmPacketSourceCore<Error = Self::Error>; fn upcast(&self) -> &dyn PacketSource<Error = Self::Error>;
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast_mut(&mut self) -> &mut dyn TmPacketSourceCore<Error = Self::Error>; fn upcast_mut(&mut self) -> &mut dyn PacketSource<Error = Self::Error>;
} }
/// Blanket implementation to automatically implement [ReceivesTc] when the [alloc] feature /// Blanket implementation to automatically implement [PacketSourceExt] when the [alloc] feature
/// is enabled. /// is enabled.
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
impl<T> TmPacketSource for T impl<T> PacketSourceExt for T
where where
T: TmPacketSourceCore + Send + 'static, T: PacketSource + 'static,
{ {
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast(&self) -> &dyn TmPacketSourceCore<Error = Self::Error> { fn upcast(&self) -> &dyn PacketSource<Error = Self::Error> {
self self
} }
// Remove this once trait upcasting coercion has been implemented. // Remove this once trait upcasting coercion has been implemented.
// Tracking issue: https://github.com/rust-lang/rust/issues/65991 // Tracking issue: https://github.com/rust-lang/rust/issues/65991
fn upcast_mut(&mut self) -> &mut dyn TmPacketSourceCore<Error = Self::Error> { fn upcast_mut(&mut self) -> &mut dyn PacketSource<Error = Self::Error> {
self self
} }
} }
/// Helper trait for any generic (static) store which allows storing raw or CCSDS packets.
pub trait CcsdsPacketPool {
fn add_ccsds_tc(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<PoolAddr, PoolError> {
self.add_raw_tc(tc_raw)
}
fn add_raw_tc(&mut self, tc_raw: &[u8]) -> Result<PoolAddr, PoolError>;
}
/// Helper trait for any generic (static) store which allows storing ECSS PUS Telecommand packets.
pub trait PusTcPool {
fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<PoolAddr, PoolError>;
}
/// Helper trait for any generic (static) store which allows storing ECSS PUS Telemetry packets.
pub trait PusTmPool {
fn add_pus_tm_from_reader(&mut self, pus_tm: &PusTmReader) -> Result<PoolAddr, PoolError>;
fn add_pus_tm_from_creator(&mut self, pus_tm: &PusTmCreator) -> Result<PoolAddr, PoolError>;
}
/// Generic trait for any sender component able to send packets stored inside a pool structure.
pub trait PacketInPoolSender: Send {
fn send_packet(
&self,
sender_id: ComponentId,
store_addr: PoolAddr,
) -> Result<(), GenericSendError>;
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use alloc::vec::Vec;
use super::*;
/// Simple type modelling packet stored in the heap. This structure is intended to
/// be used when sending a packet via a message queue, so it also contains the sender ID.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct PacketAsVec {
pub sender_id: ComponentId,
pub packet: Vec<u8>,
}
impl PacketAsVec {
pub fn new(sender_id: ComponentId, packet: Vec<u8>) -> Self {
Self { sender_id, packet }
}
}
}
#[cfg(feature = "std")]
pub mod std_mod {
use core::cell::RefCell;
#[cfg(feature = "crossbeam")]
use crossbeam_channel as cb;
use spacepackets::ecss::WritablePusPacket;
use thiserror::Error;
use crate::pool::PoolProvider;
use crate::pus::{EcssTmSender, EcssTmtcError, PacketSenderPusTc};
use super::*;
/// Newtype wrapper around the [SharedStaticMemoryPool] to enable extension helper traits on
/// top of the regular shared memory pool API.
#[derive(Clone)]
pub struct SharedPacketPool(pub SharedStaticMemoryPool);
impl SharedPacketPool {
pub fn new(pool: &SharedStaticMemoryPool) -> Self {
Self(pool.clone())
}
}
impl PusTcPool for SharedPacketPool {
fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<PoolAddr, PoolError> {
let mut pg = self.0.write().map_err(|_| PoolError::LockError)?;
let addr = pg.free_element(pus_tc.len_packed(), |buf| {
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
})?;
Ok(addr)
}
}
impl PusTmPool for SharedPacketPool {
fn add_pus_tm_from_reader(&mut self, pus_tm: &PusTmReader) -> Result<PoolAddr, PoolError> {
let mut pg = self.0.write().map_err(|_| PoolError::LockError)?;
let addr = pg.free_element(pus_tm.len_packed(), |buf| {
buf[0..pus_tm.len_packed()].copy_from_slice(pus_tm.raw_data());
})?;
Ok(addr)
}
fn add_pus_tm_from_creator(
&mut self,
pus_tm: &PusTmCreator,
) -> Result<PoolAddr, PoolError> {
let mut pg = self.0.write().map_err(|_| PoolError::LockError)?;
let mut result = Ok(0);
let addr = pg.free_element(pus_tm.len_written(), |buf| {
result = pus_tm.write_to_bytes(buf);
})?;
result?;
Ok(addr)
}
}
impl CcsdsPacketPool for SharedPacketPool {
fn add_raw_tc(&mut self, tc_raw: &[u8]) -> Result<PoolAddr, PoolError> {
let mut pg = self.0.write().map_err(|_| PoolError::LockError)?;
let addr = pg.free_element(tc_raw.len(), |buf| {
buf[0..tc_raw.len()].copy_from_slice(tc_raw);
})?;
Ok(addr)
}
}
#[cfg(feature = "std")]
impl PacketSenderRaw for mpsc::Sender<PacketAsVec> {
type Error = GenericSendError;
fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
self.send(PacketAsVec::new(sender_id, packet.to_vec()))
.map_err(|_| GenericSendError::RxDisconnected)
}
}
#[cfg(feature = "std")]
impl PacketSenderRaw for mpsc::SyncSender<PacketAsVec> {
type Error = GenericSendError;
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.try_send(PacketAsVec::new(sender_id, tc_raw.to_vec()))
.map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})
}
}
#[derive(Debug, Clone, PartialEq, Eq, Error)]
pub enum StoreAndSendError {
#[error("Store error: {0}")]
Store(#[from] PoolError),
#[error("Genreric send error: {0}")]
Send(#[from] GenericSendError),
}
pub use crate::pool::SharedStaticMemoryPool;
impl PacketInPoolSender for mpsc::Sender<PacketInPool> {
fn send_packet(
&self,
sender_id: ComponentId,
store_addr: PoolAddr,
) -> Result<(), GenericSendError> {
self.send(PacketInPool::new(sender_id, store_addr))
.map_err(|_| GenericSendError::RxDisconnected)
}
}
impl PacketInPoolSender for mpsc::SyncSender<PacketInPool> {
fn send_packet(
&self,
sender_id: ComponentId,
store_addr: PoolAddr,
) -> Result<(), GenericSendError> {
self.try_send(PacketInPool::new(sender_id, store_addr))
.map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})
}
}
#[cfg(feature = "crossbeam")]
impl PacketInPoolSender for cb::Sender<PacketInPool> {
fn send_packet(
&self,
sender_id: ComponentId,
store_addr: PoolAddr,
) -> Result<(), GenericSendError> {
self.try_send(PacketInPool::new(sender_id, store_addr))
.map_err(|e| match e {
cb::TrySendError::Full(_) => GenericSendError::QueueFull(None),
cb::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})
}
}
/// This is the primary structure used to send packets stored in a dedicated memory pool
/// structure.
#[derive(Clone)]
pub struct PacketSenderWithSharedPool<
Sender: PacketInPoolSender = mpsc::SyncSender<PacketInPool>,
PacketPool: CcsdsPacketPool = SharedPacketPool,
> {
pub sender: Sender,
pub shared_pool: RefCell<PacketPool>,
}
impl<Sender: PacketInPoolSender> PacketSenderWithSharedPool<Sender, SharedPacketPool> {
pub fn new_with_shared_packet_pool(
packet_sender: Sender,
shared_pool: &SharedStaticMemoryPool,
) -> Self {
Self {
sender: packet_sender,
shared_pool: RefCell::new(SharedPacketPool::new(shared_pool)),
}
}
}
impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool>
PacketSenderWithSharedPool<Sender, PacketStore>
{
pub fn new(packet_sender: Sender, shared_pool: PacketStore) -> Self {
Self {
sender: packet_sender,
shared_pool: RefCell::new(shared_pool),
}
}
}
impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + Clone>
PacketSenderWithSharedPool<Sender, PacketStore>
{
pub fn shared_packet_store(&self) -> PacketStore {
let pool = self.shared_pool.borrow();
pool.clone()
}
}
impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + Send> PacketSenderRaw
for PacketSenderWithSharedPool<Sender, PacketStore>
{
type Error = StoreAndSendError;
fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
let mut shared_pool = self.shared_pool.borrow_mut();
let store_addr = shared_pool.add_raw_tc(packet)?;
drop(shared_pool);
self.sender
.send_packet(sender_id, store_addr)
.map_err(StoreAndSendError::Send)?;
Ok(())
}
}
impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + PusTcPool + Send>
PacketSenderPusTc for PacketSenderWithSharedPool<Sender, PacketStore>
{
type Error = StoreAndSendError;
fn send_pus_tc(
&self,
sender_id: ComponentId,
_: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error> {
let mut shared_pool = self.shared_pool.borrow_mut();
let store_addr = shared_pool.add_raw_tc(pus_tc.raw_data())?;
drop(shared_pool);
self.sender
.send_packet(sender_id, store_addr)
.map_err(StoreAndSendError::Send)?;
Ok(())
}
}
impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + Send> PacketSenderCcsds
for PacketSenderWithSharedPool<Sender, PacketStore>
{
type Error = StoreAndSendError;
fn send_ccsds(
&self,
sender_id: ComponentId,
_sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
self.send_packet(sender_id, tc_raw)
}
}
impl<Sender: PacketInPoolSender, PacketStore: CcsdsPacketPool + PusTmPool + Send> EcssTmSender
for PacketSenderWithSharedPool<Sender, PacketStore>
{
fn send_tm(
&self,
sender_id: crate::ComponentId,
tm: crate::pus::PusTmVariant,
) -> Result<(), crate::pus::EcssTmtcError> {
let send_addr = |store_addr: PoolAddr| {
self.sender
.send_packet(sender_id, store_addr)
.map_err(EcssTmtcError::Send)
};
match tm {
crate::pus::PusTmVariant::InStore(store_addr) => send_addr(store_addr),
crate::pus::PusTmVariant::Direct(tm_creator) => {
let mut pool = self.shared_pool.borrow_mut();
let store_addr = pool.add_pus_tm_from_creator(&tm_creator)?;
send_addr(store_addr)
}
}
}
}
}
#[cfg(test)]
pub(crate) mod tests {
use alloc::vec;
use std::sync::RwLock;
use crate::pool::{PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
use super::*;
use std::sync::mpsc;
pub(crate) fn send_with_sender<SendError>(
sender_id: ComponentId,
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
packet: &[u8],
) -> Result<(), SendError> {
packet_sender.send_packet(sender_id, packet)
}
#[test]
fn test_basic_mpsc_channel_sender_bounded() {
let (tx, rx) = mpsc::channel();
let some_packet = vec![1, 2, 3, 4, 5];
send_with_sender(1, &tx, &some_packet).expect("failed to send packet");
let rx_packet = rx.try_recv().unwrap();
assert_eq!(some_packet, rx_packet.packet);
assert_eq!(1, rx_packet.sender_id);
}
#[test]
fn test_basic_mpsc_channel_receiver_dropped() {
let (tx, rx) = mpsc::channel();
let some_packet = vec![1, 2, 3, 4, 5];
drop(rx);
let result = send_with_sender(2, &tx, &some_packet);
assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::RxDisconnected);
}
#[test]
fn test_basic_mpsc_sync_sender() {
let (tx, rx) = mpsc::sync_channel(3);
let some_packet = vec![1, 2, 3, 4, 5];
send_with_sender(3, &tx, &some_packet).expect("failed to send packet");
let rx_packet = rx.try_recv().unwrap();
assert_eq!(some_packet, rx_packet.packet);
assert_eq!(3, rx_packet.sender_id);
}
#[test]
fn test_basic_mpsc_sync_sender_receiver_dropped() {
let (tx, rx) = mpsc::sync_channel(3);
let some_packet = vec![1, 2, 3, 4, 5];
drop(rx);
let result = send_with_sender(0, &tx, &some_packet);
assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::RxDisconnected);
}
#[test]
fn test_basic_mpsc_sync_sender_queue_full() {
let (tx, rx) = mpsc::sync_channel(1);
let some_packet = vec![1, 2, 3, 4, 5];
send_with_sender(0, &tx, &some_packet).expect("failed to send packet");
let result = send_with_sender(1, &tx, &some_packet);
assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::QueueFull(None));
let rx_packet = rx.try_recv().unwrap();
assert_eq!(some_packet, rx_packet.packet);
}
#[test]
fn test_basic_shared_store_sender_unbounded_sender() {
let (tc_tx, tc_rx) = mpsc::channel();
let pool_cfg = StaticPoolConfig::new(vec![(2, 8)], true);
let shared_pool = SharedPacketPool::new(&SharedStaticMemoryPool::new(RwLock::new(
StaticMemoryPool::new(pool_cfg),
)));
let some_packet = vec![1, 2, 3, 4, 5];
let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
send_with_sender(5, &tc_sender, &some_packet).expect("failed to send packet");
let packet_in_pool = tc_rx.try_recv().unwrap();
let mut pool = shared_pool.0.write().unwrap();
let read_guard = pool.read_with_guard(packet_in_pool.store_addr);
assert_eq!(read_guard.read_as_vec().unwrap(), some_packet);
assert_eq!(packet_in_pool.sender_id, 5)
}
#[test]
fn test_basic_shared_store_sender() {
let (tc_tx, tc_rx) = mpsc::sync_channel(10);
let pool_cfg = StaticPoolConfig::new(vec![(2, 8)], true);
let shared_pool = SharedPacketPool::new(&SharedStaticMemoryPool::new(RwLock::new(
StaticMemoryPool::new(pool_cfg),
)));
let some_packet = vec![1, 2, 3, 4, 5];
let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
send_with_sender(5, &tc_sender, &some_packet).expect("failed to send packet");
let packet_in_pool = tc_rx.try_recv().unwrap();
let mut pool = shared_pool.0.write().unwrap();
let read_guard = pool.read_with_guard(packet_in_pool.store_addr);
assert_eq!(read_guard.read_as_vec().unwrap(), some_packet);
assert_eq!(packet_in_pool.sender_id, 5)
}
#[test]
fn test_basic_shared_store_sender_rx_dropped() {
let (tc_tx, tc_rx) = mpsc::sync_channel(10);
let pool_cfg = StaticPoolConfig::new(vec![(2, 8)], true);
let shared_pool = SharedPacketPool::new(&SharedStaticMemoryPool::new(RwLock::new(
StaticMemoryPool::new(pool_cfg),
)));
let some_packet = vec![1, 2, 3, 4, 5];
drop(tc_rx);
let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
let result = send_with_sender(2, &tc_sender, &some_packet);
assert!(result.is_err());
matches!(
result.unwrap_err(),
StoreAndSendError::Send(GenericSendError::RxDisconnected)
);
}
#[test]
fn test_basic_shared_store_sender_queue_full() {
let (tc_tx, tc_rx) = mpsc::sync_channel(1);
let pool_cfg = StaticPoolConfig::new(vec![(2, 8)], true);
let shared_pool = SharedPacketPool::new(&SharedStaticMemoryPool::new(RwLock::new(
StaticMemoryPool::new(pool_cfg),
)));
let some_packet = vec![1, 2, 3, 4, 5];
let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
send_with_sender(3, &tc_sender, &some_packet).expect("failed to send packet");
let result = send_with_sender(3, &tc_sender, &some_packet);
assert!(result.is_err());
matches!(
result.unwrap_err(),
StoreAndSendError::Send(GenericSendError::RxDisconnected)
);
let packet_in_pool = tc_rx.try_recv().unwrap();
let mut pool = shared_pool.0.write().unwrap();
let read_guard = pool.read_with_guard(packet_in_pool.store_addr);
assert_eq!(read_guard.read_as_vec().unwrap(), some_packet);
assert_eq!(packet_in_pool.sender_id, 3);
}
#[test]
fn test_basic_shared_store_store_error() {
let (tc_tx, tc_rx) = mpsc::sync_channel(1);
let pool_cfg = StaticPoolConfig::new(vec![(1, 8)], true);
let shared_pool = SharedPacketPool::new(&SharedStaticMemoryPool::new(RwLock::new(
StaticMemoryPool::new(pool_cfg),
)));
let some_packet = vec![1, 2, 3, 4, 5];
let tc_sender = PacketSenderWithSharedPool::new(tc_tx, shared_pool.clone());
send_with_sender(4, &tc_sender, &some_packet).expect("failed to send packet");
let result = send_with_sender(4, &tc_sender, &some_packet);
assert!(result.is_err());
matches!(
result.unwrap_err(),
StoreAndSendError::Store(PoolError::StoreFull(..))
);
let packet_in_pool = tc_rx.try_recv().unwrap();
let mut pool = shared_pool.0.write().unwrap();
let read_guard = pool.read_with_guard(packet_in_pool.store_addr);
assert_eq!(read_guard.read_as_vec().unwrap(), some_packet);
assert_eq!(packet_in_pool.sender_id, 4);
}
}

View File

@@ -1,405 +0,0 @@
//! ECSS PUS packet routing components.
//!
//! The routing components consist of two core components:
//! 1. [PusDistributor] component which dispatches received packets to a user-provided handler.
//! 2. [PusServiceDistributor] trait which should be implemented by the user-provided PUS packet
//! handler.
//!
//! The [PusDistributor] implements the [ReceivesEcssPusTc], [ReceivesCcsdsTc] and the
//! [ReceivesTcCore] trait which allows to pass raw packets, CCSDS packets and PUS TC packets into
//! it. Upon receiving a packet, it performs the following steps:
//!
//! 1. It tries to extract the [SpHeader] and [spacepackets::ecss::tc::PusTcReader] objects from
//! the raw bytestream. If this process fails, a [PusDistribError::PusError] is returned to the
//! user.
//! 2. If it was possible to extract both components, the packet will be passed to the
//! [PusServiceDistributor::distribute_packet] method provided by the user.
//!
//! # Example
//!
//! ```rust
//! use spacepackets::ecss::WritablePusPacket;
//! use satrs::tmtc::pus_distrib::{PusDistributor, PusServiceDistributor};
//! use satrs::tmtc::{ReceivesTc, ReceivesTcCore};
//! use spacepackets::SpHeader;
//! use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
//!
//! struct ConcretePusHandler {
//! handler_call_count: u32
//! }
//!
//! // This is a very simple possible service provider. It increments an internal call count field,
//! // which is used to verify the handler was called
//! impl PusServiceDistributor for ConcretePusHandler {
//! type Error = ();
//! fn distribute_packet(&mut self, service: u8, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
//! assert_eq!(service, 17);
//! assert_eq!(pus_tc.len_packed(), 13);
//! self.handler_call_count += 1;
//! Ok(())
//! }
//! }
//!
//! let service_handler = ConcretePusHandler {
//! handler_call_count: 0
//! };
//! let mut pus_distributor = PusDistributor::new(service_handler);
//!
//! // Create and pass PUS ping telecommand with a valid APID
//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
//! let mut pus_tc = PusTcCreator::new_simple(&mut space_packet_header, 17, 1, None, true);
//! let mut test_buf: [u8; 32] = [0; 32];
//! let mut size = pus_tc
//! .write_to_bytes(test_buf.as_mut_slice())
//! .expect("Error writing TC to buffer");
//! let tc_slice = &test_buf[0..size];
//!
//! pus_distributor.pass_tc(tc_slice).expect("Passing PUS telecommand failed");
//!
//! // User helper function to retrieve concrete class. We check the call count here to verify
//! // that the PUS ping telecommand was routed successfully.
//! let concrete_handler = pus_distributor.service_distributor();
//! assert_eq!(concrete_handler.handler_call_count, 1);
//! ```
use crate::pus::ReceivesEcssPusTc;
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore};
use core::fmt::{Display, Formatter};
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::{PusError, PusPacket};
use spacepackets::SpHeader;
#[cfg(feature = "std")]
use std::error::Error;
/// Trait for a generic distributor object which can distribute PUS packets based on packet
/// properties like the PUS service, space packet header or any other content of the PUS packet.
pub trait PusServiceDistributor {
type Error;
fn distribute_packet(
&mut self,
service: u8,
header: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error>;
}
/// Generic distributor object which dispatches received packets to a user provided handler.
pub struct PusDistributor<ServiceDistributor: PusServiceDistributor<Error = E>, E> {
service_distributor: ServiceDistributor,
}
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E>
PusDistributor<ServiceDistributor, E>
{
pub fn new(service_provider: ServiceDistributor) -> Self {
PusDistributor {
service_distributor: service_provider,
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum PusDistribError<E> {
CustomError(E),
PusError(PusError),
}
impl<E: Display> Display for PusDistribError<E> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PusDistribError::CustomError(e) => write!(f, "pus distribution error: {e}"),
PusDistribError::PusError(e) => write!(f, "pus distribution error: {e}"),
}
}
}
#[cfg(feature = "std")]
impl<E: Error> Error for PusDistribError<E> {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
Self::CustomError(e) => e.source(),
Self::PusError(e) => e.source(),
}
}
}
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E: 'static> ReceivesTcCore
for PusDistributor<ServiceDistributor, E>
{
type Error = PusDistribError<E>;
fn pass_tc(&mut self, tm_raw: &[u8]) -> Result<(), Self::Error> {
// Convert to ccsds and call pass_ccsds
let (sp_header, _) = SpHeader::from_be_bytes(tm_raw)
.map_err(|e| PusDistribError::PusError(PusError::ByteConversion(e)))?;
self.pass_ccsds(&sp_header, tm_raw)
}
}
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E: 'static> ReceivesCcsdsTc
for PusDistributor<ServiceDistributor, E>
{
type Error = PusDistribError<E>;
fn pass_ccsds(&mut self, header: &SpHeader, tm_raw: &[u8]) -> Result<(), Self::Error> {
let (tc, _) = PusTcReader::new(tm_raw).map_err(|e| PusDistribError::PusError(e))?;
self.pass_pus_tc(header, &tc)
}
}
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E: 'static> ReceivesEcssPusTc
for PusDistributor<ServiceDistributor, E>
{
type Error = PusDistribError<E>;
fn pass_pus_tc(&mut self, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
self.service_distributor
.distribute_packet(pus_tc.service(), header, pus_tc)
.map_err(|e| PusDistribError::CustomError(e))
}
}
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E: 'static>
PusDistributor<ServiceDistributor, E>
{
pub fn service_distributor(&self) -> &ServiceDistributor {
&self.service_distributor
}
pub fn service_distributor_mut(&mut self) -> &mut ServiceDistributor {
&mut self.service_distributor
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::queue::GenericSendError;
use crate::tmtc::ccsds_distrib::tests::{
generate_ping_tc, generate_ping_tc_as_vec, BasicApidHandlerOwnedQueue,
BasicApidHandlerSharedQueue,
};
use crate::tmtc::ccsds_distrib::{CcsdsDistributor, CcsdsPacketHandler};
use alloc::format;
use alloc::vec::Vec;
use spacepackets::ecss::PusError;
use spacepackets::CcsdsPacket;
#[cfg(feature = "std")]
use std::collections::VecDeque;
#[cfg(feature = "std")]
use std::sync::{Arc, Mutex};
fn is_send<T: Send>(_: &T) {}
pub struct PacketInfo {
pub service: u8,
pub apid: u16,
pub packet: Vec<u8>,
}
struct PusHandlerSharedQueue(Arc<Mutex<VecDeque<PacketInfo>>>);
#[derive(Default)]
struct PusHandlerOwnedQueue(VecDeque<PacketInfo>);
impl PusServiceDistributor for PusHandlerSharedQueue {
type Error = PusError;
fn distribute_packet(
&mut self,
service: u8,
sp_header: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error> {
let mut packet: Vec<u8> = Vec::new();
packet.extend_from_slice(pus_tc.raw_data());
self.0
.lock()
.expect("Mutex lock failed")
.push_back(PacketInfo {
service,
apid: sp_header.apid(),
packet,
});
Ok(())
}
}
impl PusServiceDistributor for PusHandlerOwnedQueue {
type Error = PusError;
fn distribute_packet(
&mut self,
service: u8,
sp_header: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error> {
let mut packet: Vec<u8> = Vec::new();
packet.extend_from_slice(pus_tc.raw_data());
self.0.push_back(PacketInfo {
service,
apid: sp_header.apid(),
packet,
});
Ok(())
}
}
struct ApidHandlerShared {
pub pus_distrib: PusDistributor<PusHandlerSharedQueue, PusError>,
pub handler_base: BasicApidHandlerSharedQueue,
}
struct ApidHandlerOwned {
pub pus_distrib: PusDistributor<PusHandlerOwnedQueue, PusError>,
handler_base: BasicApidHandlerOwnedQueue,
}
macro_rules! apid_handler_impl {
() => {
type Error = PusError;
fn valid_apids(&self) -> &'static [u16] {
&[0x000, 0x002]
}
fn handle_known_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
self.handler_base
.handle_known_apid(&sp_header, tc_raw)
.ok()
.expect("Unexpected error");
match self.pus_distrib.pass_ccsds(&sp_header, tc_raw) {
Ok(_) => Ok(()),
Err(e) => match e {
PusDistribError::CustomError(_) => Ok(()),
PusDistribError::PusError(e) => Err(e),
},
}
}
fn handle_unknown_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
self.handler_base
.handle_unknown_apid(&sp_header, tc_raw)
.ok()
.expect("Unexpected error");
Ok(())
}
};
}
impl CcsdsPacketHandler for ApidHandlerOwned {
apid_handler_impl!();
}
impl CcsdsPacketHandler for ApidHandlerShared {
apid_handler_impl!();
}
#[test]
fn test_pus_distribution_as_raw_packet() {
let mut pus_distrib = PusDistributor::new(PusHandlerOwnedQueue::default());
let tc = generate_ping_tc_as_vec();
let result = pus_distrib.pass_tc(&tc);
assert!(result.is_ok());
assert_eq!(pus_distrib.service_distributor_mut().0.len(), 1);
let packet_info = pus_distrib.service_distributor_mut().0.pop_front().unwrap();
assert_eq!(packet_info.service, 17);
assert_eq!(packet_info.apid, 0x002);
assert_eq!(packet_info.packet, tc);
}
#[test]
fn test_pus_distribution_combined_handler() {
let known_packet_queue = Arc::new(Mutex::default());
let unknown_packet_queue = Arc::new(Mutex::default());
let pus_queue = Arc::new(Mutex::default());
let pus_handler = PusHandlerSharedQueue(pus_queue.clone());
let handler_base = BasicApidHandlerSharedQueue {
known_packet_queue: known_packet_queue.clone(),
unknown_packet_queue: unknown_packet_queue.clone(),
};
let pus_distrib = PusDistributor::new(pus_handler);
is_send(&pus_distrib);
let apid_handler = ApidHandlerShared {
pus_distrib,
handler_base,
};
let mut ccsds_distrib = CcsdsDistributor::new(apid_handler);
let mut test_buf: [u8; 32] = [0; 32];
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
// Pass packet to distributor
ccsds_distrib
.pass_tc(tc_slice)
.expect("Passing TC slice failed");
let recvd_ccsds = known_packet_queue.lock().unwrap().pop_front();
assert!(unknown_packet_queue.lock().unwrap().is_empty());
assert!(recvd_ccsds.is_some());
let (apid, packet) = recvd_ccsds.unwrap();
assert_eq!(apid, 0x002);
assert_eq!(packet.as_slice(), tc_slice);
let recvd_pus = pus_queue.lock().unwrap().pop_front();
assert!(recvd_pus.is_some());
let packet_info = recvd_pus.unwrap();
assert_eq!(packet_info.service, 17);
assert_eq!(packet_info.apid, 0x002);
assert_eq!(packet_info.packet, tc_slice);
}
#[test]
fn test_accessing_combined_distributor() {
let pus_handler = PusHandlerOwnedQueue::default();
let handler_base = BasicApidHandlerOwnedQueue::default();
let pus_distrib = PusDistributor::new(pus_handler);
let apid_handler = ApidHandlerOwned {
pus_distrib,
handler_base,
};
let mut ccsds_distrib = CcsdsDistributor::new(apid_handler);
let mut test_buf: [u8; 32] = [0; 32];
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
ccsds_distrib
.pass_tc(tc_slice)
.expect("Passing TC slice failed");
let apid_handler_casted_back = ccsds_distrib.packet_handler_mut();
assert!(!apid_handler_casted_back
.handler_base
.known_packet_queue
.is_empty());
let handler_owned_queue = apid_handler_casted_back
.pus_distrib
.service_distributor_mut();
assert!(!handler_owned_queue.0.is_empty());
let packet_info = handler_owned_queue.0.pop_front().unwrap();
assert_eq!(packet_info.service, 17);
assert_eq!(packet_info.apid, 0x002);
assert_eq!(packet_info.packet, tc_slice);
}
#[test]
fn test_pus_distrib_error_custom_error() {
let error = PusDistribError::CustomError(GenericSendError::RxDisconnected);
let error_string = format!("{}", error);
assert_eq!(
error_string,
"pus distribution error: rx side has disconnected"
);
}
#[test]
fn test_pus_distrib_error_pus_error() {
let error = PusDistribError::<GenericSendError>::PusError(PusError::CrcCalculationMissing);
let error_string = format!("{}", error);
assert_eq!(
error_string,
"pus distribution error: crc16 was not calculated"
);
}
}

View File

@@ -3,50 +3,6 @@ use spacepackets::time::cds::CdsTime;
use spacepackets::time::TimeWriter; use spacepackets::time::TimeWriter;
use spacepackets::SpHeader; use spacepackets::SpHeader;
#[cfg(feature = "std")]
pub use std_mod::*;
#[cfg(feature = "std")]
pub mod std_mod {
use crate::pool::{
PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StoreAddr, StoreError,
};
use crate::pus::EcssTmtcError;
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::WritablePusPacket;
use std::sync::{Arc, RwLock};
#[derive(Clone)]
pub struct SharedTmPool(pub SharedStaticMemoryPool);
impl SharedTmPool {
pub fn new(shared_pool: StaticMemoryPool) -> Self {
Self(Arc::new(RwLock::new(shared_pool)))
}
pub fn clone_backing_pool(&self) -> SharedStaticMemoryPool {
self.0.clone()
}
pub fn shared_pool(&self) -> &SharedStaticMemoryPool {
&self.0
}
pub fn shared_pool_mut(&mut self) -> &mut SharedStaticMemoryPool {
&mut self.0
}
pub fn add_pus_tm(&self, pus_tm: &PusTmCreator) -> Result<StoreAddr, EcssTmtcError> {
let mut pg = self.0.write().map_err(|_| StoreError::LockError)?;
let addr = pg.free_element(pus_tm.len_written(), |buf| {
pus_tm
.write_to_bytes(buf)
.expect("writing PUS TM to store failed");
})?;
Ok(addr)
}
}
}
pub struct PusTmWithCdsShortHelper { pub struct PusTmWithCdsShortHelper {
apid: u16, apid: u16,
cds_short_buf: [u8; 7], cds_short_buf: [u8; 7],
@@ -92,9 +48,9 @@ impl PusTmWithCdsShortHelper {
source_data: &'a [u8], source_data: &'a [u8],
seq_count: u16, seq_count: u16,
) -> PusTmCreator { ) -> PusTmCreator {
let mut reply_header = SpHeader::tm_unseg(self.apid, seq_count, 0).unwrap(); let reply_header = SpHeader::new_for_unseg_tm(self.apid, seq_count, 0);
let tc_header = PusTmSecondaryHeader::new_simple(service, subservice, &self.cds_short_buf); let tc_header = PusTmSecondaryHeader::new_simple(service, subservice, &self.cds_short_buf);
PusTmCreator::new(&mut reply_header, tc_header, source_data, true) PusTmCreator::new(reply_header, tc_header, source_data, true)
} }
} }

View File

@@ -79,7 +79,7 @@ impl ModeRequestHandler for TestDevice {
Ok(()) Ok(())
} }
fn announce_mode(&self, _requestor_info: MessageMetadata, _recursive: bool) { fn announce_mode(&self, _requestor_info: Option<MessageMetadata>, _recursive: bool) {
println!( println!(
"{}: announcing mode: {:?}", "{}: announcing mode: {:?}",
self.name, self.mode_and_submode self.name, self.mode_and_submode
@@ -150,9 +150,11 @@ impl TestAssembly {
ModeReply::ModeReply(self.mode_and_submode), ModeReply::ModeReply(self.mode_and_submode),
) )
.unwrap(), .unwrap(),
ModeRequest::AnnounceMode => self.announce_mode(request.requestor_info, false), ModeRequest::AnnounceMode => {
self.announce_mode(Some(request.requestor_info), false)
}
ModeRequest::AnnounceModeRecursive => { ModeRequest::AnnounceModeRecursive => {
self.announce_mode(request.requestor_info, true) self.announce_mode(Some(request.requestor_info), true)
} }
ModeRequest::ModeInfo(_) => todo!(), ModeRequest::ModeInfo(_) => todo!(),
} }
@@ -197,7 +199,7 @@ impl ModeRequestHandler for TestAssembly {
Ok(()) Ok(())
} }
fn announce_mode(&self, requestor_info: MessageMetadata, recursive: bool) { fn announce_mode(&self, requestor_info: Option<MessageMetadata>, recursive: bool) {
println!( println!(
"TestAssembly: Announcing mode (recursively: {}): {:?}", "TestAssembly: Announcing mode (recursively: {}): {:?}",
recursive, self.mode_and_submode recursive, self.mode_and_submode
@@ -207,6 +209,7 @@ impl ModeRequestHandler for TestAssembly {
if recursive { if recursive {
mode_request = ModeRequest::AnnounceModeRecursive; mode_request = ModeRequest::AnnounceModeRecursive;
} }
let request_id = requestor_info.map_or(0, |info| info.request_id());
self.mode_node self.mode_node
.request_sender_map .request_sender_map
.0 .0
@@ -214,10 +217,7 @@ impl ModeRequestHandler for TestAssembly {
.for_each(|(_, sender)| { .for_each(|(_, sender)| {
sender sender
.send(GenericMessage::new( .send(GenericMessage::new(
MessageMetadata::new( MessageMetadata::new(request_id, self.mode_node.local_channel_id_generic()),
requestor_info.request_id(),
self.mode_node.local_channel_id_generic(),
),
mode_request, mode_request,
)) ))
.expect("sending mode request failed"); .expect("sending mode request failed");

View File

@@ -1,4 +1,4 @@
use satrs::pool::{PoolGuard, PoolProvider, StaticMemoryPool, StaticPoolConfig, StoreAddr}; use satrs::pool::{PoolAddr, PoolGuard, PoolProvider, StaticMemoryPool, StaticPoolConfig};
use std::ops::DerefMut; use std::ops::DerefMut;
use std::sync::mpsc; use std::sync::mpsc;
use std::sync::mpsc::{Receiver, Sender}; use std::sync::mpsc::{Receiver, Sender};
@@ -12,7 +12,7 @@ fn threaded_usage() {
let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false); let pool_cfg = StaticPoolConfig::new(vec![(16, 6), (32, 3), (8, 12)], false);
let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg))); let shared_pool = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_clone = shared_pool.clone(); let shared_clone = shared_pool.clone();
let (tx, rx): (Sender<StoreAddr>, Receiver<StoreAddr>) = mpsc::channel(); let (tx, rx): (Sender<PoolAddr>, Receiver<PoolAddr>) = mpsc::channel();
let jh0 = thread::spawn(move || { let jh0 = thread::spawn(move || {
let mut dummy = shared_pool.write().unwrap(); let mut dummy = shared_pool.write().unwrap();
let addr = dummy.add(&DUMMY_DATA).expect("Writing data failed"); let addr = dummy.add(&DUMMY_DATA).expect("Writing data failed");

View File

@@ -21,7 +21,7 @@ struct EventIntrospection {
} }
//#[event(descr="This is some info event")] //#[event(descr="This is some info event")]
const INFO_EVENT_0: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0); const INFO_EVENT_0: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_0_ERASED: EventU32 = EventU32::const_from_info(INFO_EVENT_0); const INFO_EVENT_0_ERASED: EventU32 = EventU32::const_from_info(INFO_EVENT_0);
// This is ideally auto-generated // This is ideally auto-generated
@@ -36,7 +36,7 @@ const INFO_EVENT_0_INTROSPECTION: EventIntrospection = EventIntrospection {
}; };
//#[event(descr="This is some low severity event")] //#[event(descr="This is some low severity event")]
const SOME_LOW_SEV_EVENT: EventU32TypedSev<SeverityLow> = EventU32TypedSev::const_new(0, 12); const SOME_LOW_SEV_EVENT: EventU32TypedSev<SeverityLow> = EventU32TypedSev::new(0, 12);
//const EVENT_LIST: [&'static Event; 2] = [&INFO_EVENT_0, &SOME_LOW_SEV_EVENT]; //const EVENT_LIST: [&'static Event; 2] = [&INFO_EVENT_0, &SOME_LOW_SEV_EVENT];
@@ -47,7 +47,7 @@ const TEST_GROUP_NAME_NAME: &str = "TEST_GROUP_NAME";
//#[event(desc="Some medium severity event")] //#[event(desc="Some medium severity event")]
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP: EventU32TypedSev<SeverityMedium> = const MEDIUM_SEV_EVENT_IN_OTHER_GROUP: EventU32TypedSev<SeverityMedium> =
EventU32TypedSev::const_new(TEST_GROUP_NAME, 0); EventU32TypedSev::new(TEST_GROUP_NAME, 0);
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED: EventU32 = const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED: EventU32 =
EventU32::const_from_medium(MEDIUM_SEV_EVENT_IN_OTHER_GROUP); EventU32::const_from_medium(MEDIUM_SEV_EVENT_IN_OTHER_GROUP);
@@ -62,7 +62,7 @@ const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_INTROSPECTION: EventIntrospection = EventI
info: "Some medium severity event", info: "Some medium severity event",
}; };
const CONST_SLICE: &'static [u8] = &[0, 1, 2, 3]; const CONST_SLICE: &[u8] = &[0, 1, 2, 3];
const INTROSPECTION_FOR_TEST_GROUP_0: [&EventIntrospection; 2] = const INTROSPECTION_FOR_TEST_GROUP_0: [&EventIntrospection; 2] =
[&INFO_EVENT_0_INTROSPECTION, &INFO_EVENT_0_INTROSPECTION]; [&INFO_EVENT_0_INTROSPECTION, &INFO_EVENT_0_INTROSPECTION];

Some files were not shown because too many files have changed in this diff Show More