Compare commits

...

148 Commits

Author SHA1 Message Date
a819feeaa2 Merge pull request 'Update embedded examples' (#194) from update-embedded-examples into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #194
2024-05-25 13:07:29 +02:00
46ce3fc772 update folder name
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-05-25 12:35:26 +02:00
8d27bdf3bf Update embedded examples
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-05-25 12:32:46 +02:00
3d2a46f044 Merge pull request 'flight heritage segment in docs' (#193) from add-flight-heritage-docs into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #193
2024-05-22 19:13:18 +02:00
1f192af262
improve phrasing and links again 2024-05-22 19:11:43 +02:00
3f78c200ad
improve phrasing
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
2024-05-22 19:09:43 +02:00
d73dfcdd67
flight heritage segment in docs
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-05-22 19:05:05 +02:00
5cae0f7036 Merge pull request 'Heapless memory pool' (#191) from heapless-mem-pool into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #191
2024-05-22 13:16:29 +02:00
832250d211
minor improvements
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-05-22 12:55:50 +02:00
3c3b4349e8
that should do the job
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-05-21 19:48:56 +02:00
acf73e93b1
Introduce heapless memory pools
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit
2024-05-21 18:31:19 +02:00
0b2d4f6187 Merge pull request 'satrs v0.2.1' (#190) from prep-satrs-v0.2.1 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #190
2024-05-19 13:19:23 +02:00
f7016b940a
changelog
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
Rust/sat-rs/pipeline/pr-main There was a failure building this commit
2024-05-19 08:14:31 +02:00
397ecd0c40
prep patch release 2024-05-19 08:13:32 +02:00
422f2c11ab Merge pull request 'removed unsafe block which is not necessary' (#189) from remove-unnecessary-unsafe-block into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #189
2024-05-18 12:46:58 +02:00
37e945fd91 Merge branch 'main' into remove-unnecessary-unsafe-block 2024-05-18 12:46:44 +02:00
45379858f0 Merge pull request 'TCP server config default improvements' (#187) from tcp-server-cfg-improvements into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #187
2024-05-18 12:44:53 +02:00
7c194ab543 Merge branch 'main' into tcp-server-cfg-improvements
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
2024-05-18 12:44:42 +02:00
bca1d7292a
removed unsafe block which is not necessary
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-13 17:01:26 +02:00
cdcb9cae1c Merge pull request 'cross ref docs for events' (#188) from cross-ref-docs-for-events into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #188
2024-05-13 15:57:00 +02:00
9dcbd42862
cross ref docs
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-13 15:36:09 +02:00
da05efc16d
TCP server config default improvements
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-05-13 15:31:51 +02:00
e38e25a998 Merge pull request 'update the satrs example graph' (#186) from satrs-example-graph-update into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #186
2024-05-13 15:29:39 +02:00
14b381cf4a update the satrs example graph
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-10 17:03:46 +02:00
3746e9ebb0 Merge pull request 'Add timestamp to SimRequest' (#140) from add-timestamp-to-sim-request into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #140
2024-05-08 14:58:13 +02:00
d2fc783562 Merge remote-tracking branch 'origin/main' into add-timestamp-to-sim-request
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
2024-05-08 14:57:12 +02:00
282f799203 Merge pull request 'prep v0.2.0' (#184) from prep_v0.2.0 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #184
2024-05-02 14:57:14 +02:00
46dbb4309b
new clippy check
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-05-02 14:44:22 +02:00
42d1257e83
prepare next release v0.2.0
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-05-02 14:39:30 +02:00
583f6ce4d2 Merge pull request 'small robustness fix' (#183) from robustness-fix into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #183
2024-05-02 13:41:55 +02:00
408803fe86
small robustness fix
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-05-02 13:41:27 +02:00
9ffe4d0ae0 Merge pull request 'smaller improvements' (#182) from smaller-improvements into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #182
2024-05-02 12:28:39 +02:00
e37061dcf0
smaller improvements 2024-05-02 12:28:09 +02:00
3a2ac11407 Merge pull request 'bounded the PUS stack hot loop' (#181) from pus-hot-loop-bounding into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #181
2024-05-02 12:02:02 +02:00
23327a7786
bounded the PUS stack hot loop
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-05-02 12:01:24 +02:00
89d5a1022f Merge pull request 'optimize PUS stack code' (#180) from optimize-pus-stack-code into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #180
2024-05-02 11:59:26 +02:00
a00c843698
optimize PUS stack code
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-05-02 11:58:46 +02:00
c586fd7fef Merge pull request 'try unifying some direct PUS handler code' (#179) from unify-some-example-code into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #179
2024-05-02 11:29:11 +02:00
7e78e70a17
try unifying some direct PUS handler code
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-05-02 11:14:05 +02:00
424dfc439c Merge pull request 'simplified PUS stack' (#178) from simplify-pus-stack into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #178
2024-05-02 10:01:16 +02:00
45eb2f1343
cargo fmt
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-05-01 21:16:26 +02:00
736eb74e66
simplified PUS stack
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-05-01 21:13:08 +02:00
29f71c2a57 Merge pull request 'Reworked generic parameter handling for PUS service 1 and 5' (#175) from rework-generic-params-for-pus into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #175
2024-04-30 15:42:05 +02:00
f0d08b65a4 Merge branch 'main' into rework-generic-params-for-pus
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-30 13:35:08 +02:00
c7a74a844c Merge pull request 'renamed thread name' (#176) from small-tweak into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #176
2024-04-30 13:31:46 +02:00
9c60427f89 Reworked generic parameter handling for PUS service 1 and 5
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-30 13:29:55 +02:00
958ab9bab6
renamed thread name
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-25 11:11:31 +02:00
312849bddb Merge pull request 'More improvements for Event API' (#173) from improve-event-api into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #173
2024-04-24 19:34:33 +02:00
b0159a3ba7
prep next release candidate
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-24 19:18:45 +02:00
c477739f6d
more improvements for API, tests for example event module
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-24 18:50:08 +02:00
b7ce039406
add optional defmt support for events
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-24 18:36:00 +02:00
4736d40997 Merge pull request 'simplified event management' (#172) from simplify-event-management into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #172
2024-04-24 15:58:00 +02:00
5ec5124ea3
Updated events modules and docs
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-24 14:30:45 +02:00
5e43259d4f Merge branch 'main' into add-timestamp-to-sim-request
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-23 16:36:30 +02:00
bfaddd0ebb Merge pull request 'prep next release' (#171) from pre-v0.2.0-rc.4 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #171
2024-04-23 16:32:03 +02:00
423a068736 prep next release
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-23 14:55:19 +02:00
8022af1bf2 Merge pull request 'update Python client for example' (#170) from update-example-pyclient into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #170
2024-04-23 14:52:04 +02:00
acd2260dfd update Python client for example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-23 14:22:50 +02:00
e5ee698dc4 Merge pull request 'TCP server improvements' (#169) from tcp-ip-improvements into main
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
Reviewed-on: #169
2024-04-23 13:21:41 +02:00
e8907c74d4
changelog
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-23 11:23:00 +02:00
536051e05b improvements and fixes
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-22 20:29:14 +02:00
701db659e9 Merge pull request 'formatting' (#168) from fmt into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #168
2024-04-22 15:47:58 +02:00
4b8e54b91b
formatting
Some checks are pending
Rust/sat-rs/pipeline/head This commit looks good
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-22 10:42:49 +02:00
870d60cfd6 Merge pull request 'bugfix and improvements for CCSDS SP decoder' (#167) from ccsds-decoder-bugfix into main
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
Reviewed-on: #167
2024-04-22 10:23:12 +02:00
9e62e4292c
bugfix and improvements for CCSDS SP decoder
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-20 11:19:46 +02:00
b2e77fbc09 Merge pull request 'requires another hotfix' (#166) from and-another-docs-rs-hotfix into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #166
2024-04-17 20:42:09 +02:00
5371928496
docs_rs build argument hotfix
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
2024-04-17 20:41:30 +02:00
31cddbd99b Merge pull request 'bump msrv version' (#165) from bump-msrv into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #165
2024-04-17 18:56:21 +02:00
7c00e13e70 bump msrv version
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 18:10:32 +02:00
aa72063454 Merge pull request 'prepare next release candidate' (#164) from prep-v0.2.0-rc.2 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #164
2024-04-17 18:03:28 +02:00
7b37b76695 prepare next release candidate
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 17:19:38 +02:00
ea5d95c12d Merge pull request 'why is this an issue for docs-rs?' (#163) from fix-for-docs-build-docs-rs into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #163
2024-04-17 17:09:36 +02:00
c62adbb300 Merge branch 'main' into fix-for-docs-build-docs-rs
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-17 16:41:45 +02:00
9242b8a607 Merge pull request 'prepare MIB release' (#162) from prepare-mib-release into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #162
2024-04-17 16:37:34 +02:00
4a27d2605d
why is this an issue for docs-rs?
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-17 16:34:56 +02:00
8195245481
prepare MIB release
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
Rust/sat-rs/pipeline/pr-main Build queued...
2024-04-17 16:17:30 +02:00
f6f7519625 Merge pull request 'small cleanup' (#161) from small-cargo-toml-cleaning into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #161
2024-04-17 16:03:03 +02:00
0f0fbc1a18
small cleanup
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 15:17:46 +02:00
6e55e2ac95 Merge pull request 'Prepare next releases' (#160) from prep-next-satrs-releases into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #160
2024-04-17 14:58:01 +02:00
2f96bfe992
changelog sat-rs
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 10:03:17 +02:00
52aafb3aab
prep next releases
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-17 10:01:46 +02:00
6ce9cb5ead Merge pull request 'use released satrs-shared' (#159) from use-released-satrs-shared into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #159
2024-04-16 21:31:21 +02:00
273f79d1e6
use release satrs-shared
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-16 21:07:51 +02:00
622221835e Merge pull request 'allow sat-rs shared spacepackets range' (#158) from satrs-shared-spacepackets-range into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #158
2024-04-16 20:54:59 +02:00
e396ad2e7a
small fix
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-16 19:52:32 +02:00
772927d50b
allow spacepackets range
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-16 19:50:46 +02:00
be9a45e55f Merge pull request 'changelog satrs-shared v0.1.3' (#157) from changelog-satrs-shared into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #157
2024-04-16 19:48:12 +02:00
eee8a69550
changelog satrs-shared v0.1.3
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-04-16 19:47:36 +02:00
f7a6d3ce47 Merge pull request 'bump spacepackets to v0.11.0' (#156) from bump-spacepackets into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #156
2024-04-16 19:46:17 +02:00
df97a3a93e
small adjustment
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-16 19:39:07 +02:00
42750e08c0
bump spacepackets to v0.11.0
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-16 19:26:46 +02:00
786671bbd7 Merge pull request 're-worked TMTC modules' (#155) from rework-tmtc-modules into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #155
2024-04-16 11:10:52 +02:00
63f37f0917 Re-worked TMTC modules
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-16 11:04:22 +02:00
8cfe3b81e7 Merge pull request 'bugfix for targeted services' (#154) from bugfix-targeted-services into main
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
Reviewed-on: #154
2024-04-13 15:10:14 +02:00
de50bec562
bugfix for targeted services
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-10 17:18:53 +02:00
39ab9fa27b Merge pull request 'closure param name tweak' (#153) from small-example-tweak into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #153
2024-04-10 17:17:13 +02:00
1dbc81a8f5
closure param name tweak
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-10 15:51:08 +02:00
1ad74ee1d5 Merge pull request 'this makes a bit more sense' (#152) from naming-improvement-pus-actions into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #152
2024-04-10 15:37:39 +02:00
f96fe6bdc0
this makes a bit more sense
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-04-10 15:19:08 +02:00
d43a8eb571 Merge pull request 'improve example structure' (#151) from improve-example-structure into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #151
2024-04-10 13:19:41 +02:00
0bbada90ef improve example structure
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-04-10 12:58:51 +02:00
3375780e00 Merge pull request 'Refactor and improve TCP servers' (#150) from refactor-tcp-server into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #150
2024-04-10 12:29:23 +02:00
de028ed827
bugfix in example
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build started...
2024-04-10 11:54:05 +02:00
d27ac5dfc9
refactored TCP server
Some checks are pending
Rust/sat-rs/pipeline/head Build queued...
2024-04-10 11:28:16 +02:00
c67b7cb93a
this is non-trivial
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-04-09 19:40:55 +02:00
f71ba3e8d8 Merge pull request 'introduce stop signal handling for TCP' (#149) from tcp-server-stop-signal into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #149
2024-04-09 18:11:29 +02:00
975cd927f4 Merge branch 'main' into add-timestamp-to-sim-request
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-09 17:27:57 +02:00
3cc9dd3c48 introduce stop signal handling
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-09 17:21:43 +02:00
0fec994028 Merge pull request 'Update STM32F3 example' (#148) from update-stm32f3-example-tmtc-handling into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #148
2024-04-04 18:33:00 +02:00
226a134aff
Update STM32F3 example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
1. New command to change blinky frequency.
2. Bump used sat-rs version.
2024-04-04 18:21:30 +02:00
aac59ec7c1 Merge pull request 'Major refactoring and update of PUS module' (#146) from pus-modules-update into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #146
2024-04-04 15:27:29 +02:00
ce7eb8650f changelog
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-04 15:20:07 +02:00
df2733a176
Major refactoring and update of PUS module 2024-04-04 15:18:53 +02:00
344fe6a4c0 Merge pull request 'Simplify low level PUS API' (#145) from simplify-low-level-pus-api into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #145
2024-03-29 23:41:14 +01:00
a5941751d7
Simplify low-level PUS API for verification and events
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-29 16:22:40 +01:00
977e29894b Merge pull request 'STM32 defmt + RTT support' (#144) from stm32-defmt-support into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #144
2024-03-29 12:34:02 +01:00
dd1417a368
Upgrade example to use defmt/RTT/probe-rs
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-28 23:47:07 +01:00
3195cf5111
update config.toml template file
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-28 23:06:16 +01:00
8280c70682 Merge pull request 'Framework to Library' (#143) from framework-to-library into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #143
2024-03-27 14:33:02 +01:00
19c5aa9b83
update rust book as well
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-27 14:17:59 +01:00
713b4e097b
update the README 2024-03-27 14:14:45 +01:00
9039c1b59a Merge branch 'main' into add-timestamp-to-sim-request
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-25 16:14:04 +01:00
746b31ec5d Merge pull request 'satrs-example RTIC v2' (#142) from satrs-example-rtic-v2 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #142
2024-03-25 16:09:06 +01:00
2318cd4293
Update satrs-example for the STM32F3
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Update RTIC to v2
- Update Python client version
2024-03-25 14:26:07 +01:00
a6b57d3eb9 Merge pull request 'Update STM32F3 example' (#141) from update-stm32f3-example into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #141
2024-03-22 15:19:58 +01:00
bddd3132d4 added some more instructions for Python
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-22 13:18:20 +01:00
6a6ffba754 why have two files
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-03-22 13:09:27 +01:00
d27a41e4de Start updating the STM32F3 Discovery example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-22 13:08:01 +01:00
972bf19188
cargo fmt
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-13 12:03:11 +01:00
9d711d2b73
add fern logging
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-03-13 10:49:24 +01:00
d0005cdd63
this works
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-03-13 10:36:08 +01:00
f00e6cf50c
we require an asynchronix update here I guess
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-03-12 18:25:21 +01:00
128df9a813 Merge pull request 'First version of asynchronix based mini simulator' (#139) from init-minisim into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #139
2024-03-11 10:41:24 +01:00
7387be3bc3
new request/reponse API
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-11 10:26:48 +01:00
d3fb504545
clean up manifest file
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-09 15:14:15 +01:00
ae8e39f626
First version of asynchronix based mini simulator
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
- Basic simulator with 3 devices
- Can be driven via a UDP interface
- Design allows to drive the simulation via different interface in the future
  by using Request/Reply messaging.
2024-03-09 15:11:11 +01:00
ab3d907d4e Merge pull request 'Refactor TMTC distribution modules' (#138) from ccsds-distrib-refactoring into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #138
2024-03-04 16:53:23 +01:00
3de5954898
Refactor TMTC distribution modules
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-04 16:26:34 +01:00
5600aa576c Merge pull request 'use generics for the PUS stack' (#134) from pus-stack-use-generics into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #134
2024-02-26 15:46:47 +01:00
88793cfa87
add some helper types
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-26 15:34:20 +01:00
223b637eb8
use generics for the PUS stack
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-26 15:18:15 +01:00
cf9b115e1e Merge pull request 'Refactored Verification Reporter Module' (#132) from refactor-verification-mod into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #132
2024-02-26 11:58:57 +01:00
eea9b11b39
refactored verification reporter
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Use generics instead of trait objects where applicable.
2024-02-26 11:41:42 +01:00
f21ab0017e Merge pull request 'fixed for scheduler' (#133) from scheduler-fixes into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #133
2024-02-26 11:15:50 +01:00
a7ca00317f
cargo fmt
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-26 11:00:48 +01:00
75fda42f4f
fixed for scheduler
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-02-26 10:53:33 +01:00
faf0f6f6c6 Merge pull request 'refactored event manager' (#131) from refactor-event-man into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #131
2024-02-23 14:31:48 +01:00
a690c7720d Refactored event manager
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-23 14:19:30 +01:00
174 changed files with 167175 additions and 10181 deletions

64
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,64 @@
name: ci
on: [push, pull_request]
jobs:
check:
name: Check build
strategy:
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo check --release
test:
name: Run Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- run: cargo nextest run --all-features
- run: cargo test --doc --all-features
cross-check:
name: Check Cross-Compilation
runs-on: ubuntu-latest
strategy:
matrix:
target:
- armv7-unknown-linux-gnueabihf
- thumbv7em-none-eabihf
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf"
- run: cargo check -p satrs --release --target=${{matrix.target}} --no-default-features
fmt:
name: Check formatting
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo fmt --all -- --check
docs:
name: Check Documentation Build
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]'
clippy:
name: Clippy
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- run: cargo clippy -- -D warnings

7
.gitignore vendored
View File

@ -1,5 +1,10 @@
/target
target/
output.log
/Cargo.lock
output.log
output.log
/.idea/*
!/.idea/runConfigurations

View File

@ -4,10 +4,12 @@ members = [
"satrs",
"satrs-mib",
"satrs-example",
"satrs-minisim",
"satrs-shared",
]
exclude = [
"satrs-example-stm32f3-disco",
"embedded-examples/stm32f3-disco-rtic",
"embedded-examples/stm32h7-rtic",
]

View File

@ -1,4 +1,4 @@
<p align="center"> <img src="misc/satrs-logo.png" width="40%"> </p>
<p align="center"> <img src="misc/satrs-logo-v2.png" width="40%"> </p>
[![sat-rs website](https://img.shields.io/badge/sat--rs-website-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
[![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/)
@ -8,12 +8,17 @@
sat-rs
=========
This is the repository of the sat-rs framework. Its primary goal is to provide re-usable components
This is the repository of the sat-rs library. Its primary goal is to provide re-usable components
to write on-board software for remote systems like rovers or satellites. It is specifically written
for the special requirements for these systems. You can find an overview of the project and the
link to the [more high-level sat-rs book](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
at the [IRS software projects website](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/).
This is early-stage software. Important features are missing. New releases
with breaking changes are released regularly, with all changes documented inside respective
changelog files. You should only use this library if your are willing to work in this
environment.
A lot of the architecture and general design considerations are based on the
[FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage
through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
@ -34,9 +39,14 @@ This project currently contains following crates:
on a host computer or on any system with a standard runtime like a Raspberry Pi.
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib):
Components to build a mission information base from the on-board software directly.
* [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example-stm32f3-disco):
Example of a simple example on-board software using sat-rs components on a bare-metal system
with constrained resources.
* [`satrs-stm32f3-disco-rtic`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/embedded-examples/satrs-stm32f3-disco-rtic):
Example of a simple example using low-level sat-rs components on a bare-metal system
with constrained resources. This example uses the [RTIC](https://github.com/rtic-rs/rtic)
framework on the STM32F3-Discovery device.
* [`satrs-stm32h-nucleo-rtic`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/embedded-examples/satrs-stm32h7-nucleo-rtic):
Example of a simple example using sat-rs components on a bare-metal system
with constrained resources. This example uses the [RTIC](https://github.com/rtic-rs/rtic)
framework on the STM32H743ZIT device.
Each project has its own `CHANGELOG.md`.
@ -49,6 +59,17 @@ Each project has its own `CHANGELOG.md`.
[`satrs`](https://egit.irs.uni-stuttgart.de/rust/satrs/src/branch/main/satrs)
crate.
# Flight Heritage
There is an active and continuous effort to get early flight heritage for the sat-rs library.
Currently this library has the following flight heritage:
- Submission as an [OPS-SAT experiment](https://www.esa.int/Enabling_Support/Operations/OPS-SAT)
which has also
[flown on the satellite](https://blogs.esa.int/rocketscience/2024/05/21/ops-sat-reentry-tomorrow-final-experiments-continue/).
The application is strongly based on the sat-rs example application. You can find the repository
of the experiment [here](https://egit.irs.uni-stuttgart.de/rust/ops-sat-rs).
# Coverage
Coverage was generated using [`grcov`](https://github.com/mozilla/grcov). If you have not done so
@ -59,5 +80,5 @@ rustup component add llvm-tools-preview
cargo install grcov --locked
```
After that, you can simply run `coverage.py` to test the `satrs-core` crate with coverage. You can
After that, you can simply run `coverage.py` to test the `satrs` crate with coverage. You can
optionally supply the `--open` flag to open the coverage report in your webbrowser.

View File

@ -33,6 +33,7 @@ pipeline {
stage('Test') {
steps {
sh 'cargo nextest r --all-features'
sh 'cargo test --doc --all-features'
}
}
stage('Check with all features') {

View File

@ -18,15 +18,19 @@ def generate_cov_report(open_report: bool, format: str, package: str):
out_path = "./target/debug/coverage"
if format == "lcov":
out_path = "./target/debug/lcov.info"
os.system(
grcov_cmd = (
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"-o {out_path}"
)
print(f"Running: {grcov_cmd}")
os.system(grcov_cmd)
if format == "lcov":
os.system(
lcov_cmd = (
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info"
)
print(f"Running: {lcov_cmd}")
os.system(lcov_cmd)
if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path)
@ -43,7 +47,7 @@ def main():
parser.add_argument(
"-p",
"--package",
choices=["satrs"],
choices=["satrs", "satrs-minisim", "satrs-example"],
default="satrs",
help="Choose project to generate coverage for",
)

View File

@ -0,0 +1,37 @@
[target.'cfg(all(target_arch = "arm", target_os = "none"))']
# uncomment ONE of these three option to make `cargo run` start a GDB session
# which option to pick depends on your system
# You can also replace openocd.gdb by jlink.gdb when using a J-Link.
# runner = "arm-none-eabi-gdb -q -x openocd.gdb"
# runner = "gdb-multiarch -q -x openocd.gdb"
# runner = "gdb -q -x openocd.gdb"
runner = "probe-rs run --chip STM32F303VCTx"
rustflags = [
"-C", "linker=flip-link",
# LLD (shipped with the Rust toolchain) is used as the default linker
"-C", "link-arg=-Tlink.x",
"-C", "link-arg=-Tdefmt.x",
# This is needed if your flash or ram addresses are not aligned to 0x10000 in memory.x
# See https://github.com/rust-embedded/cortex-m-quickstart/pull/95
"-C", "link-arg=--nmagic",
# if you run into problems with LLD switch to the GNU linker by commenting out
# this line
# "-C", "linker=arm-none-eabi-ld",
# if you need to link to pre-compiled C libraries provided by a C toolchain
# use GCC as the linker by commenting out both lines above and then
# uncommenting the three lines below
# "-C", "linker=arm-none-eabi-gcc",
# "-C", "link-arg=-Wl,-Tlink.x",
# "-C", "link-arg=-nostartfiles",
]
[build]
# comment out the following line if you intend to run unit tests on host machine
target = "thumbv7em-none-eabihf" # Cortex-M4F and Cortex-M7F (with FPU)
[env]
DEFMT_LOG = "info"

View File

@ -1,3 +1,4 @@
/target
/itm.txt
/.cargo/config*
/.vscode

View File

@ -13,18 +13,18 @@ dependencies = [
[[package]]
name = "atomic-polyfill"
version = "0.1.11"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ff7eb3f316534d83a8a2c3d1674ace8a5a71198eba31e2e2b597833f699b28"
checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4"
dependencies = [
"critical-section",
]
[[package]]
name = "autocfg"
version = "1.1.0"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "bare-metal"
@ -55,20 +55,21 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bxcan"
version = "0.6.2"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b13b4b2ea9ab2ba924063ebb86ad895cb79f4a79bf90f27949eb20c335b30f9"
checksum = "40ac3d0c0a542d0ab5521211f873f62706a7136df415676f676d347e5a41dd80"
dependencies = [
"bitflags",
"nb 1.0.0",
"embedded-hal 0.2.7",
"nb 1.1.0",
"vcell",
]
[[package]]
name = "byteorder"
version = "1.4.3"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cast"
@ -87,31 +88,42 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.23"
version = "0.4.38"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401"
dependencies = [
"num-integer",
"num-traits",
]
[[package]]
name = "cortex-m"
version = "0.7.6"
name = "cobs"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70858629a458fdfd39f9675c4dc309411f2a3f83bede76988d81bf1a0ecee9e0"
checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15"
[[package]]
name = "cobs"
version = "0.2.3"
source = "git+https://github.com/robamu/cobs.rs.git?branch=all_features#c70a7f30fd00a7cbdb7666dec12b437977385d40"
[[package]]
name = "cortex-m"
version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ec610d8f49840a5b376c69663b6369e71f4b34484b9b2eb29fb918d92516cb9"
dependencies = [
"bare-metal 0.2.5",
"bitfield",
"embedded-hal",
"critical-section",
"embedded-hal 0.2.7",
"volatile-register",
]
[[package]]
name = "cortex-m-rt"
version = "0.7.2"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6d3328b8b5534f0c90acd66b68950f2763b37e0173cac4d8b4937c4a80761f9"
checksum = "2722f5b7d6ea8583cffa4d247044e280ccbb9fe501bed56552e2ba48b02d5f3d"
dependencies = [
"cortex-m-rt-macros",
]
@ -124,48 +136,44 @@ checksum = "f0f6f3e36f203cfedbc78b357fb28730aa2c6dc1ab060ee5c2405e843988d3c7"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 1.0.109",
]
[[package]]
name = "cortex-m-rtic"
version = "1.1.3"
name = "cortex-m-semihosting"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6b82f1c39acd6c3a35c2013b6110c20f5bc534522791fabadeed49ccada2dce"
checksum = "c23234600452033cc77e4b761e740e02d2c4168e11dbf36ab14a0f58973592b0"
dependencies = [
"bare-metal 1.0.0",
"cortex-m",
"cortex-m-rtic-macros",
"heapless",
"rtic-core",
"rtic-monotonic",
"version_check",
]
[[package]]
name = "cortex-m-rtic-macros"
version = "1.1.5"
name = "crc"
version = "3.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e8e9645ef54bec1cf70ac33e9bf9566e6507ab5b41ae6baf3735662194e8607"
checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636"
dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
"rtic-syntax",
"syn",
"crc-catalog",
]
[[package]]
name = "crc-catalog"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
[[package]]
name = "critical-section"
version = "1.1.1"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52"
checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216"
[[package]]
name = "darling"
version = "0.14.2"
version = "0.20.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa"
checksum = "83b2eb4d90d12bdda5ed17de686c2acb4c57914f8f921b8da7e112b5a36f3fe1"
dependencies = [
"darling_core",
"darling_macro",
@ -173,26 +181,113 @@ dependencies = [
[[package]]
name = "darling_core"
version = "0.14.2"
version = "0.20.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f"
checksum = "622687fe0bac72a04e5599029151f5796111b90f1baaa9b544d807a5e31cd120"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"syn",
"syn 2.0.65",
]
[[package]]
name = "darling_macro"
version = "0.14.2"
version = "0.20.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e"
checksum = "733cabb43482b1a1b53eee8583c2b9e8684d592215ea83efd305dd31bc2f0178"
dependencies = [
"darling_core",
"quote",
"syn",
"syn 2.0.65",
]
[[package]]
name = "defmt"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a99dd22262668b887121d4672af5a64b238f026099f1a2a1b322066c9ecfe9e0"
dependencies = [
"bitflags",
"defmt-macros",
]
[[package]]
name = "defmt-brtt"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2f0ac3635d0c89d12b8101fcb44a7625f5f030a1c0491124b74467eb5a58a78"
dependencies = [
"critical-section",
"defmt",
]
[[package]]
name = "defmt-macros"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9f309eff1f79b3ebdf252954d90ae440599c26c2c553fe87a2d17195f2dcb"
dependencies = [
"defmt-parser",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 2.0.65",
]
[[package]]
name = "defmt-parser"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff4a5fefe330e8d7f31b16a318f9ce81000d8e35e69b93eae154d16d2278f70f"
dependencies = [
"thiserror",
]
[[package]]
name = "defmt-test"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290966e8c38f94b11884877242de876280d0eab934900e9642d58868e77c5df1"
dependencies = [
"cortex-m-rt",
"cortex-m-semihosting",
"defmt",
"defmt-test-macros",
]
[[package]]
name = "defmt-test-macros"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "984bc6eca246389726ac2826acc2488ca0fe5fcd6b8d9b48797021951d76a125"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.65",
]
[[package]]
name = "delegate"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee5df75c70b95bd3aacc8e2fd098797692fb1d54121019c4de481e42f04c8a1"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "derive-new"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.65",
]
[[package]]
@ -214,6 +309,12 @@ dependencies = [
"void",
]
[[package]]
name = "embedded-hal"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "361a90feb7004eca4019fb28352a9465666b24f840f5c3cddf0ff13920590b89"
[[package]]
name = "embedded-time"
version = "0.12.1"
@ -225,25 +326,31 @@ dependencies = [
[[package]]
name = "enumset"
version = "1.0.12"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753"
checksum = "226c0da7462c13fb57e5cc9e0dc8f0635e7d27f276a3a7fd30054647f669007d"
dependencies = [
"enumset_derive",
]
[[package]]
name = "enumset_derive"
version = "0.6.1"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0"
checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af"
dependencies = [
"darling",
"proc-macro2",
"quote",
"syn",
"syn 2.0.65",
]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "fnv"
version = "1.0.7"
@ -252,18 +359,42 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "fugit"
version = "0.3.6"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ab17bb279def6720d058cb6c052249938e7f99260ab534879281a95367a87e5"
checksum = "17186ad64927d5ac8f02c1e77ccefa08ccd9eaa314d5a4772278aa204a22f7e7"
dependencies = [
"gcd",
]
[[package]]
name = "gcd"
version = "2.2.0"
name = "futures-core"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4b1b088ad0a967aa29540456b82fc8903f854775d33f71e9709c4efb3dfbfd2"
checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]]
name = "futures-task"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]]
name = "futures-util"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [
"futures-core",
"futures-task",
"pin-project-lite",
"pin-utils",
]
[[package]]
name = "gcd"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d758ba1b47b00caf47f24925c0074ecb20d6dfcffe7f6d53395c0465674841a"
[[package]]
name = "generic-array"
@ -276,9 +407,9 @@ dependencies = [
[[package]]
name = "generic-array"
version = "0.14.6"
version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
@ -286,29 +417,26 @@ dependencies = [
[[package]]
name = "hash32"
version = "0.2.1"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67"
checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606"
dependencies = [
"byteorder",
]
[[package]]
name = "hashbrown"
version = "0.12.3"
version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
[[package]]
name = "heapless"
version = "0.7.16"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743"
checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad"
dependencies = [
"atomic-polyfill",
"hash32",
"rustc_version 0.4.0",
"spin",
"stable_deref_trait",
]
@ -320,41 +448,14 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "indexmap"
version = "1.9.2"
version = "2.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26"
dependencies = [
"autocfg",
"equivalent",
"hashbrown",
]
[[package]]
name = "itm_logger"
version = "0.1.3-pre.0"
dependencies = [
"cortex-m",
"log",
]
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "lsm303dlhc"
version = "0.2.0"
@ -362,7 +463,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5d1a5c290951321d1b0d4a40edd828537de9889134a0e67c5146542ae57706"
dependencies = [
"cast",
"embedded-hal",
"embedded-hal 0.2.7",
"generic-array 0.11.2",
]
@ -372,7 +473,7 @@ version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc4010833aea396656c2f91ee704d51a6f1329ec2ab56ffd00bfd56f7481ea94"
dependencies = [
"generic-array 0.14.6",
"generic-array 0.14.7",
]
[[package]]
@ -381,14 +482,14 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f"
dependencies = [
"nb 1.0.0",
"nb 1.1.0",
]
[[package]]
name = "nb"
version = "1.0.0"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "546c37ac5d9e56f55e73b677106873d9d9f5190605e41a856503623648488cae"
checksum = "8d5439c4ad607c3c23abf66de8c8bf57ba8adcd1f129e699851a6e43935d339d"
[[package]]
name = "num"
@ -414,19 +515,18 @@ dependencies = [
[[package]]
name = "num-integer"
version = "0.1.45"
version = "0.1.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
dependencies = [
"autocfg",
"num-traits",
]
[[package]]
name = "num-iter"
version = "0.1.43"
version = "0.1.45"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf"
dependencies = [
"autocfg",
"num-integer",
@ -446,27 +546,60 @@ dependencies = [
[[package]]
name = "num-traits"
version = "0.2.15"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "panic-itm"
version = "0.4.2"
name = "num_enum"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d577d97d1b31268087b6dddf2470e6794ef5eee87d9dca7fcd0481695391a4c"
checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.65",
]
[[package]]
name = "panic-probe"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4047d9235d1423d66cc97da7d07eddb54d4f154d6c13805c6d0793956f4f25b0"
dependencies = [
"cortex-m",
"defmt",
]
[[package]]
name = "paste"
version = "1.0.11"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pin-project-lite"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "proc-macro-error"
@ -477,7 +610,7 @@ dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn",
"syn 1.0.109",
"version_check",
]
@ -494,31 +627,54 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.49"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5"
checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.23"
version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rtcc"
version = "0.3.0"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3623619ce77c09a7d87cf7c61c5c887b9c7dee8805f66af6c4aa5824be4d9930"
checksum = "95973c3a0274adc4f3c5b70d2b5b85618d6de9559a6737d3293ecae9a2fc0839"
dependencies = [
"chrono",
]
[[package]]
name = "rtic"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c443db16326376bdd64377da268f6616d5f804aba8ce799bac7d1f7f244e9d51"
dependencies = [
"atomic-polyfill",
"bare-metal 1.0.0",
"cortex-m",
"critical-section",
"rtic-core",
"rtic-macros",
]
[[package]]
name = "rtic-common"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0786b50b81ef9d2a944a000f60405bb28bf30cd45da2d182f3fe636b2321f35c"
dependencies = [
"critical-section",
]
[[package]]
name = "rtic-core"
version = "1.0.0"
@ -526,21 +682,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9369355b04d06a3780ec0f51ea2d225624db777acbc60abd8ca4832da5c1a42"
[[package]]
name = "rtic-monotonic"
version = "1.0.0"
name = "rtic-macros"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb8b0b822d1a366470b9cea83a1d4e788392db763539dc4ba022bcc787fece82"
[[package]]
name = "rtic-syntax"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ad3ae243dd8d0a1b064615f664d4fa7e63929939074c564cbe5efdc4c503065"
checksum = "54053598ea24b1b74937724e366558412a1777eb2680b91ef646db540982789a"
dependencies = [
"indexmap",
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
"syn 2.0.65",
]
[[package]]
name = "rtic-monotonics"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "058c2397dbd5bb4c5650a0e368c3920953e458805ff5097a0511b8147b3619d7"
dependencies = [
"atomic-polyfill",
"cfg-if",
"cortex-m",
"embedded-hal 1.0.0",
"fugit",
"rtic-time",
]
[[package]]
name = "rtic-time"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75b232e7aebc045cfea81cdd164bc2727a10aca9a4568d406d0a5661cdfd0f19"
dependencies = [
"critical-section",
"futures-util",
"rtic-common",
]
[[package]]
@ -558,32 +734,59 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
"semver 1.0.16",
"semver 1.0.23",
]
[[package]]
name = "sat-rs-example-stm32f-disco"
name = "satrs"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "866fcae3b683ccc37b5ad77982483a0ee01d5dc408dea5aad2117ad404b60fe1"
dependencies = [
"cobs 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"crc",
"defmt",
"delegate",
"derive-new",
"num-traits",
"num_enum",
"paste",
"satrs-shared",
"smallvec",
"spacepackets",
]
[[package]]
name = "satrs-shared"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6042477018c2d43fffccaaa5099bc299a58485139b4d31c5b276889311e474f1"
dependencies = [
"spacepackets",
]
[[package]]
name = "satrs-stm32f3-disco-rtic"
version = "0.1.0"
dependencies = [
"cobs 0.2.3 (git+https://github.com/robamu/cobs.rs.git?branch=all_features)",
"cortex-m",
"cortex-m-rt",
"cortex-m-rtic",
"embedded-hal",
"cortex-m-semihosting",
"defmt",
"defmt-brtt",
"defmt-test",
"embedded-hal 0.2.7",
"enumset",
"heapless",
"itm_logger",
"panic-itm",
"panic-probe",
"rtic",
"rtic-monotonics",
"satrs",
"stm32f3-discovery",
"stm32f3xx-hal",
"systick-monotonic",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
[[package]]
name = "semver"
version = "0.9.0"
@ -595,9 +798,9 @@ dependencies = [
[[package]]
name = "semver"
version = "1.0.16"
version = "1.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a"
checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b"
[[package]]
name = "semver-parser"
@ -607,17 +810,28 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "slice-group-by"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec"
checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7"
[[package]]
name = "spin"
version = "0.9.4"
name = "smallvec"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "spacepackets"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e85574d113a06312010c0ba51aadccd4ba2806231ebe9a49fc6473d0534d8696"
dependencies = [
"lock_api",
"crc",
"defmt",
"delegate",
"num-traits",
"num_enum",
"zerocopy",
]
[[package]]
@ -639,9 +853,9 @@ dependencies = [
[[package]]
name = "stm32f3"
version = "0.14.0"
version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "265cda62ac13307414de4aca58dbbbd8038ddba85cffbb335823aa216f2e3200"
checksum = "b28b37228ef3fa47956af38c6abd756e912f244c1657f14e66d42fc8d74ea96f"
dependencies = [
"bare-metal 1.0.0",
"cortex-m",
@ -651,7 +865,8 @@ dependencies = [
[[package]]
name = "stm32f3-discovery"
version = "0.8.0-pre.0"
version = "0.8.0-alpha.0"
source = "git+https://github.com/robamu/stm32f3-discovery?branch=complete-dma-update-hal#5ccacae07ceff02d7d3649df67a6a0ba2a144752"
dependencies = [
"accelerometer",
"cortex-m",
@ -663,20 +878,20 @@ dependencies = [
[[package]]
name = "stm32f3xx-hal"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e422c5c044e8f3a068b1e14b83c071449e27c9d4bc0e24f972b552d79f2be03"
version = "0.11.0-alpha.0"
source = "git+https://github.com/robamu/stm32f3xx-hal?branch=complete-dma-update#04fc76b7912649c84b57bd0ab803ea3ccf2aadae"
dependencies = [
"bare-metal 1.0.0",
"bxcan",
"cfg-if",
"cortex-m",
"cortex-m-rt",
"critical-section",
"embedded-dma",
"embedded-hal",
"embedded-hal 0.2.7",
"embedded-time",
"enumset",
"nb 1.0.0",
"nb 1.1.0",
"num-traits",
"paste",
"rtcc",
"slice-group-by",
@ -691,14 +906,14 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90a4adc8cbd1726249b161898e48e0f3f1ce74d34dc784cbbc98fba4ed283fbf"
dependencies = [
"embedded-hal",
"embedded-hal 0.2.7",
]
[[package]]
name = "syn"
version = "1.0.107"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
@ -706,27 +921,47 @@ dependencies = [
]
[[package]]
name = "systick-monotonic"
version = "1.0.1"
name = "syn"
version = "2.0.65"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67fb822d5c615a0ae3a4795ee5b1d06381c7faf488d861c0a4fa8e6a88d5ff84"
checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106"
dependencies = [
"cortex-m",
"fugit",
"rtic-monotonic",
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thiserror"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.65",
]
[[package]]
name = "typenum"
version = "1.16.0"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "unicode-ident"
version = "1.0.6"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "usb-device"
@ -754,9 +989,30 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
[[package]]
name = "volatile-register"
version = "0.2.1"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ee8f19f9d74293faf70901bc20ad067dc1ad390d2cbf1e3f75f721ffee908b6"
checksum = "de437e2a6208b014ab52972a27e59b33fa2920d3e00fe05026167a1c509d19cc"
dependencies = [
"vcell",
]
[[package]]
name = "zerocopy"
version = "0.7.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087"
dependencies = [
"byteorder",
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.65",
]

View File

@ -0,0 +1,84 @@
[package]
name = "satrs-stm32f3-disco-rtic"
version = "0.1.0"
edition = "2021"
default-run = "satrs-stm32f3-disco-rtic"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
cortex-m = { version = "0.7", features = ["critical-section-single-core"] }
cortex-m-rt = "0.7"
defmt = "0.3"
defmt-brtt = { version = "0.1", default-features = false, features = ["rtt"] }
panic-probe = { version = "0.3", features = ["print-defmt"] }
embedded-hal = "0.2.7"
cortex-m-semihosting = "0.5.0"
enumset = "1"
heapless = "0.8"
[dependencies.rtic]
version = "2"
features = ["thumbv7-backend"]
[dependencies.rtic-monotonics]
version = "1"
features = ["cortex-m-systick"]
[dependencies.cobs]
git = "https://github.com/robamu/cobs.rs.git"
branch = "all_features"
default-features = false
[dependencies.stm32f3xx-hal]
git = "https://github.com/robamu/stm32f3xx-hal"
version = "0.11.0-alpha.0"
features = ["stm32f303xc", "rt", "enumset"]
branch = "complete-dma-update"
# Can be used in workspace to develop and update HAL
# path = "../stm32f3xx-hal"
[dependencies.stm32f3-discovery]
git = "https://github.com/robamu/stm32f3-discovery"
version = "0.8.0-alpha.0"
branch = "complete-dma-update-hal"
# Can be used in workspace to develop and update BSP
# path = "../stm32f3-discovery"
[dependencies.satrs]
# path = "satrs"
version = "0.2"
default-features = false
features = ["defmt"]
[dev-dependencies]
defmt-test = "0.3"
# cargo test
[profile.test]
codegen-units = 1
debug = 2
debug-assertions = true # <-
incremental = false
opt-level = "s" # <-
overflow-checks = true # <-
# cargo build/run --release
[profile.release]
codegen-units = 1
debug = 2
debug-assertions = false # <-
incremental = false
lto = 'fat'
opt-level = "s" # <-
overflow-checks = false # <-
# cargo test --release
[profile.bench]
codegen-units = 1
debug = 2
debug-assertions = false # <-
incremental = false
lto = 'fat'
opt-level = "s" # <-
overflow-checks = false # <-

View File

@ -0,0 +1,114 @@
sat-rs example for the STM32F3-Discovery board
=======
This example application shows how the [sat-rs library](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
can be used on an embedded target.
It also shows how a relatively simple OBSW could be built when no standard runtime is available.
It uses [RTIC](https://rtic.rs/2/book/en/) as the concurrency framework and the
[defmt](https://defmt.ferrous-systems.com/) framework for logging.
The STM32F3-Discovery device was picked because it is a cheap Cortex-M4 based device which is also
used by the [Rust Embedded Book](https://docs.rust-embedded.org/book/intro/hardware.html) and the
[Rust Discovery](https://docs.rust-embedded.org/discovery/f3discovery/) book as an introduction
to embedded Rust.
## Pre-Requisites
Make sure the following tools are installed:
1. [`probe-rs`](https://probe.rs/): Application used to flash and debug the MCU.
2. Optional and recommended: [VS Code](https://code.visualstudio.com/) with
[probe-rs plugin](https://marketplace.visualstudio.com/items?itemName=probe-rs.probe-rs-debugger)
for debugging.
## Preparing Rust and the repository
Building an application requires the `thumbv7em-none-eabihf` cross-compiler toolchain.
If you have not installed it yet, you can do so with
```sh
rustup target add thumbv7em-none-eabihf
```
A default `.cargo` config file is provided for this project, but needs to be copied to have
the correct name. This is so that the config file can be updated or edited for custom needs
without being tracked by git.
```sh
cp def_config.toml config.toml
```
The configuration file will also set the target so it does not always have to be specified with
the `--target` argument.
## Building
After that, assuming that you have a `.cargo/config.toml` setting the correct build target,
you can simply build the application with
```sh
cargo build
```
## Flashing from the command line
You can flash the application from the command line using `probe-rs`:
```sh
probe-rs run --chip STM32F303VCTx
```
## Debugging with VS Code
The STM32F3-Discovery comes with an on-board ST-Link so all that is required to flash and debug
the board is a Mini-USB cable. The code in this repository was debugged using [`probe-rs`](https://probe.rs/docs/tools/debuggerA)
and the VS Code [`probe-rs` plugin](https://marketplace.visualstudio.com/items?itemName=probe-rs.probe-rs-debugger).
Make sure to install this plugin first.
Sample configuration files are provided inside the `vscode` folder.
Use `cp vscode .vscode -r` to use them for your project.
Some sample configuration files for VS Code were provided as well. You can simply use `Run` and `Debug`
to automatically rebuild and flash your application.
The `tasks.json` and `launch.json` files are generic and you can use them immediately by opening
the folder in VS code or adding it to a workspace.
## Commanding with Python
When the SW is running on the Discovery board, you can command the MCU via a serial interface,
using COBS encoded PUS packets.
It is recommended to use a virtual environment to do this. To set up one in the command line,
you can use `python3 -m venv venv` on Unix systems or `py -m venv venv` on Windows systems.
After doing this, you can check the [venv tutorial](https://docs.python.org/3/tutorial/venv.html)
on how to activate the environment and then use the following command to install the required
dependency:
```sh
pip install -r requirements.txt
```
The packets are exchanged using a dedicated serial interface. You can use any generic USB-to-UART
converter device with the TX pin connected to the PA3 pin and the RX pin connected to the PA2 pin.
A default configuration file for the python application is provided and can be used by running
```sh
cp def_tmtc_conf.json tmtc_conf.json
```
After that, you can for example send a ping to the MCU using the following command
```sh
./main.py -p /ping
```
You can configure the blinky frequency using
```sh
./main.py -p /change_blink_freq
```
All these commands will package a PUS telecommand which will be sent to the MCU using the COBS
format as the packet framing format.

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,9 @@ break main
# # send captured ITM to the file itm.fifo
# # (the microcontroller SWO pin must be connected to the programmer SWO pin)
# # 8000000 must match the core clock frequency
monitor tpiu config internal itm.txt uart off 8000000
# # 2000000 is the frequency of the SWO pin. This was added for newer
# openocd versions like v0.12.0.
# monitor tpiu config internal itm.txt uart off 8000000 2000000
# # OR: make the microcontroller SWO pin output compatible with UART (8N1)
# # 8000000 must match the core clock frequency
@ -32,7 +34,7 @@ monitor tpiu config internal itm.txt uart off 8000000
# monitor tpiu config external uart off 8000000 2000000
# # enable ITM port 0
monitor itm port 0 on
# monitor itm port 0 on
load

View File

@ -1,4 +1,5 @@
/venv
/.tmtc-history.txt
/log
/.idea/*
!/.idea/runConfigurations

View File

@ -1,39 +1,40 @@
#!/usr/bin/env python3
"""Example client for the sat-rs example application"""
import enum
import struct
import logging
import sys
import time
from typing import Optional, cast
from typing import Any, Optional, cast
from prompt_toolkit.history import FileHistory, History
from spacepackets.ecss.tm import CdsShortTimestamp
import tmtccmd
from spacepackets.ecss import PusTelemetry, PusTelecommand, PusVerificator
from spacepackets.ecss import PusTelemetry, PusTelecommand, PusTm, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm
from tmtccmd import CcsdsTmtcBackend, TcHandlerBase, ProcedureParamsWrapper
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest
from tmtccmd.core.ccsds_backend import QueueWrapper
from tmtccmd.logging import add_colorlog_console_logger
from tmtccmd.pus import VerificationWrapper
from tmtccmd.tm import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com_if import ComInterface
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com import ComInterface
from tmtccmd.config import (
CmdTreeNode,
default_json_path,
SetupParams,
TmTcCfgHookBase,
TmtcDefinitionWrapper,
CoreServiceList,
OpCodeEntry,
HookBase,
params_to_procedure_conversion,
)
from tmtccmd.config.com_if import SerialCfgWrapper
from tmtccmd.config.com import SerialCfgWrapper
from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper
from tmtccmd.logging import get_console_logger
from tmtccmd.logging.pus import (
RegularTmtcLogWrapper,
RawTmtcTimedLogWrapper,
TimedLogWhen,
)
from tmtccmd.tc import (
from tmtccmd.tmtc import (
TcQueueEntryType,
ProcedureWrapper,
TcProcedureType,
@ -41,27 +42,26 @@ from tmtccmd.tc import (
SendCbParams,
DefaultPusQueueHelper,
)
from tmtccmd.tm.pus_5_event import Service5Tm
from tmtccmd.util import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.pus.s5_fsfw_event import Service5Tm
from spacepackets.seqcount import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT
from tmtccmd.util.tmtc_printer import FsfwTmTcPrinter
LOGGER = get_console_logger()
_LOGGER = logging.getLogger()
EXAMPLE_PUS_APID = 0x02
class SatRsConfigHook(TmTcCfgHookBase):
class SatRsConfigHook(HookBase):
def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path=json_cfg_path)
super().__init__(json_cfg_path)
def assign_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com_if import (
def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com import (
create_com_interface_default,
create_com_interface_cfg_default,
)
assert self.cfg_path is not None
cfg = create_com_interface_cfg_default(
com_if_key=com_if_key,
json_cfg_path=self.cfg_path,
@ -76,35 +76,14 @@ class SatRsConfigHook(TmTcCfgHookBase):
cfg.serial_cfg.serial_timeout = 0.5
return create_com_interface_default(cfg)
def get_tmtc_definitions(self) -> TmtcDefinitionWrapper:
from tmtccmd.config.globals import get_default_tmtc_defs
def get_command_definitions(self) -> CmdTreeNode:
"""This function should return the root node of the command definition tree."""
return create_cmd_definition_tree()
defs = get_default_tmtc_defs()
srv_5 = OpCodeEntry()
srv_5.add("0", "Event Test")
defs.add_service(
name=CoreServiceList.SERVICE_5.value,
info="PUS Service 5 Event",
op_code_entry=srv_5,
)
srv_17 = OpCodeEntry()
srv_17.add("0", "Ping Test")
defs.add_service(
name=CoreServiceList.SERVICE_17_ALT,
info="PUS Service 17 Test",
op_code_entry=srv_17,
)
srv_3 = OpCodeEntry()
defs.add_service(
name=CoreServiceList.SERVICE_3,
info="PUS Service 3 Housekeeping",
op_code_entry=srv_3,
)
return defs
def perform_mode_operation(self, tmtc_backend: CcsdsTmtcBackend, mode: int):
LOGGER.info("Mode operation hook was called")
pass
def get_cmd_history(self) -> Optional[History]:
"""Optionlly return a history class for the past command paths which will be used
when prompting a command path from the user in CLI mode."""
return FileHistory(".tmtc-history.txt")
def get_object_ids(self) -> ObjectIdDictT:
from tmtccmd.config.objects import get_core_object_ids
@ -112,74 +91,77 @@ class SatRsConfigHook(TmTcCfgHookBase):
return get_core_object_ids()
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
root_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
root_node.add_child(CmdTreeNode("change_blink_freq", "Change blink frequency"))
return root_node
class PusHandler(SpecificApidHandlerBase):
def __init__(
self,
file_logger: logging.Logger,
verif_wrapper: VerificationWrapper,
printer: FsfwTmTcPrinter,
raw_logger: RawTmtcTimedLogWrapper,
):
super().__init__(EXAMPLE_PUS_APID, None)
self.printer = printer
self.file_logger = file_logger
self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: any):
def handle_tm(self, packet: bytes, _user_args: Any):
try:
tm_packet = PusTelemetry.unpack(packet)
pus_tm = PusTm.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
except ValueError as e:
LOGGER.warning("Could not generate PUS TM object from raw data")
LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
_LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e
service = tm_packet.service
dedicated_handler = False
service = pus_tm.service
tm_packet = None
if service == 1:
tm_packet = Service1Tm.unpack(data=packet, params=UnpackParams(1, 2))
tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.TIMESTAMP_SIZE, 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
LOGGER.info(
_LOGGER.info(
f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] "
f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}"
)
LOGGER.warning(
_LOGGER.warning(
f"No matching telecommand found for {tm_packet.tc_req_id}"
)
else:
self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res)
dedicated_handler = True
if service == 3:
LOGGER.info("No handling for HK packets implemented")
LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet)
_LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:]
dedicated_handler = True
_LOGGER.info("received JSON string: " + json_str.decode("utf-8"))
if service == 5:
tm_packet = Service5Tm.unpack(packet)
tm_packet = Service5Tm.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if service == 17:
tm_packet = Service17Tm.unpack(packet)
dedicated_handler = True
tm_packet = Service17Tm.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if tm_packet.subservice == 2:
self.printer.file_logger.info("Received Ping Reply TM[17,2]")
LOGGER.info("Received Ping Reply TM[17,2]")
_LOGGER.info("Received Ping Reply TM[17,2]")
else:
self.printer.file_logger.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
LOGGER.info(
_LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
if tm_packet is None:
LOGGER.info(
_LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTelemetry.unpack(packet)
self.raw_logger.log_tm(tm_packet)
if not dedicated_handler and tm_packet is not None:
self.printer.handle_long_tm_print(packet_if=tm_packet, info_if=tm_packet)
tm_packet = PusTelemetry.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
self.raw_logger.log_tm(pus_tm)
def make_addressable_id(target_id: int, unique_id: int) -> bytes:
@ -198,8 +180,11 @@ class TcHandler(TcHandlerBase):
self.seq_count_provider = seq_count_provider
self.verif_wrapper = verif_wrapper
self.queue_helper = DefaultPusQueueHelper(
queue_wrapper=None,
queue_wrapper=QueueWrapper.empty(),
tc_sched_timestamp_len=7,
seq_cnt_provider=seq_count_provider,
pus_verificator=verif_wrapper.pus_verificator,
default_pus_apid=EXAMPLE_PUS_APID,
)
def send_cb(self, send_params: SendCbParams):
@ -212,61 +197,74 @@ class TcHandler(TcHandlerBase):
)
self.verif_wrapper.add_tc(pus_tc_wrapper.pus_tc)
raw_tc = pus_tc_wrapper.pus_tc.pack()
LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}")
_LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}")
send_params.com_if.send(raw_tc)
elif entry_helper.entry_type == TcQueueEntryType.LOG:
log_entry = entry_helper.to_log_entry()
LOGGER.info(log_entry.log_str)
_LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, helper: ProcedureWrapper):
if helper.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure()
LOGGER.info(
f"Queue handling finished for service {def_proc.service} and "
f"op code {def_proc.op_code}"
)
def queue_finished_cb(self, info: ProcedureWrapper):
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
def feed_cb(self, helper: ProcedureWrapper, wrapper: FeedWrapper):
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper
if helper.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure()
service = def_proc.service
op_code = def_proc.op_code
if (
service == CoreServiceList.SERVICE_17
or service == CoreServiceList.SERVICE_17_ALT
):
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
cmd_path = def_proc.cmd_path
if cmd_path == "/ping":
q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
q.add_pus_tc(PusTelecommand(service=17, subservice=1))
if cmd_path == "/change_blink_freq":
self.create_change_blink_freq_command(q)
def create_change_blink_freq_command(self, q: DefaultPusQueueHelper):
q.add_log_cmd("Changing blink frequency")
while True:
blink_freq = int(
input(
"Please specify new blink frequency in ms. Valid Range [2..10000]: "
)
)
if blink_freq < 2 or blink_freq > 10000:
print(
"Invalid blink frequency. Please specify a value between 2 and 10000."
)
continue
break
app_data = struct.pack("!I", blink_freq)
q.add_pus_tc(PusTelecommand(service=8, subservice=1, app_data=app_data))
def main():
add_colorlog_console_logger(_LOGGER)
tmtccmd.init_printout(False)
hook_obj = SatRsConfigHook(json_cfg_path=default_json_path())
parser_wrapper = PreArgsParsingWrapper()
parser_wrapper.create_default_parent_parser()
parser_wrapper.create_default_parser()
parser_wrapper.add_def_proc_args()
post_args_wrapper = parser_wrapper.parse(hook_obj)
params = SetupParams()
post_args_wrapper = parser_wrapper.parse(hook_obj, params)
proc_wrapper = ProcedureParamsWrapper()
if post_args_wrapper.use_gui:
post_args_wrapper.set_params_without_prompts(params, proc_wrapper)
post_args_wrapper.set_params_without_prompts(proc_wrapper)
else:
post_args_wrapper.set_params_with_prompts(params, proc_wrapper)
post_args_wrapper.set_params_with_prompts(proc_wrapper)
params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
)
# Create console logger helper and file loggers
tmtc_logger = RegularTmtcLogWrapper()
printer = FsfwTmTcPrinter(tmtc_logger.logger)
file_logger = tmtc_logger.logger
raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1)
verificator = PusVerificator()
verification_wrapper = VerificationWrapper(verificator, LOGGER, printer.file_logger)
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(verification_wrapper, printer, raw_logger)
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None)
ccsds_handler.add_apid_handler(tm_handler)
@ -288,7 +286,7 @@ def main():
if state.request == BackendRequest.TERMINATION_NO_ERROR:
sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE:
LOGGER.info("TMTC Client in IDLE mode")
_LOGGER.info("TMTC Client in IDLE mode")
time.sleep(3.0)
elif state.request == BackendRequest.DELAY_LISTENER:
time.sleep(0.8)

View File

@ -1,2 +1,2 @@
tmtccmd == 4.0.0a0
tmtccmd == 8.0.1
# -e git+https://github.com/robamu-org/tmtccmd.git@main#egg=tmtccmd

View File

@ -1,17 +1,15 @@
#![no_std]
#![no_main]
use satrs_stm32f3_disco_rtic as _;
extern crate panic_itm;
use cortex_m_rt::entry;
use stm32f3_discovery::leds::Leds;
use stm32f3_discovery::stm32f3xx_hal::delay::Delay;
use stm32f3_discovery::stm32f3xx_hal::{pac, prelude::*};
use stm32f3_discovery::leds::Leds;
use stm32f3_discovery::switch_hal::{OutputSwitch, ToggleableOutputSwitch};
#[entry]
#[cortex_m_rt::entry]
fn main() -> ! {
defmt::println!("STM32F3 Discovery Blinky");
let dp = pac::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
let cp = cortex_m::Peripherals::take().unwrap();
@ -30,49 +28,49 @@ fn main()-> ! {
gpioe.pe14,
gpioe.pe15,
&mut gpioe.moder,
&mut gpioe.otyper
&mut gpioe.otyper,
);
let delay_ms = 200u16;
loop {
leds.ld3.toggle().ok();
leds.ld3_n.toggle().ok();
delay.delay_ms(delay_ms);
leds.ld3.toggle().ok();
leds.ld3_n.toggle().ok();
delay.delay_ms(delay_ms);
//explicit on/off
leds.ld4.on().ok();
leds.ld4_nw.on().ok();
delay.delay_ms(delay_ms);
leds.ld4.off().ok();
leds.ld4_nw.off().ok();
delay.delay_ms(delay_ms);
leds.ld5.on().ok();
leds.ld5_ne.on().ok();
delay.delay_ms(delay_ms);
leds.ld5.off().ok();
leds.ld5_ne.off().ok();
delay.delay_ms(delay_ms);
leds.ld6.on().ok();
leds.ld6_w.on().ok();
delay.delay_ms(delay_ms);
leds.ld6.off().ok();
leds.ld6_w.off().ok();
delay.delay_ms(delay_ms);
leds.ld7.on().ok();
leds.ld7_e.on().ok();
delay.delay_ms(delay_ms);
leds.ld7.off().ok();
leds.ld7_e.off().ok();
delay.delay_ms(delay_ms);
leds.ld8.on().ok();
leds.ld8_sw.on().ok();
delay.delay_ms(delay_ms);
leds.ld8.off().ok();
leds.ld8_sw.off().ok();
delay.delay_ms(delay_ms);
leds.ld9.on().ok();
leds.ld9_se.on().ok();
delay.delay_ms(delay_ms);
leds.ld9.off().ok();
leds.ld9_se.off().ok();
delay.delay_ms(delay_ms);
leds.ld10.on().ok();
leds.ld10_s.on().ok();
delay.delay_ms(delay_ms);
leds.ld10.off().ok();
leds.ld10_s.off().ok();
delay.delay_ms(delay_ms);
}
}

View File

@ -0,0 +1,51 @@
#![no_main]
#![no_std]
use cortex_m_semihosting::debug;
use defmt_brtt as _; // global logger
use stm32f3xx_hal as _; // memory layout
use panic_probe as _;
// same panicking *behavior* as `panic-probe` but doesn't print a panic message
// this prevents the panic message being printed *twice* when `defmt::panic` is invoked
#[defmt::panic_handler]
fn panic() -> ! {
cortex_m::asm::udf()
}
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with status code 0.
pub fn exit() -> ! {
loop {
debug::exit(debug::EXIT_SUCCESS);
}
}
/// Hardfault handler.
///
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with an error. This seems better than the default, which is to spin in a
/// loop.
#[cortex_m_rt::exception]
unsafe fn HardFault(_frame: &cortex_m_rt::ExceptionFrame) -> ! {
loop {
debug::exit(debug::EXIT_FAILURE);
}
}
// defmt-test 0.3.0 has the limitation that this `#[tests]` attribute can only be used
// once within a crate. the module can be in any file but there can only be at most
// one `#[tests]` module in this library crate
#[cfg(test)]
#[defmt_test::tests]
mod unit_tests {
use defmt::assert;
#[test]
fn it_works() {
assert!(true)
}
}

View File

@ -0,0 +1,684 @@
#![no_std]
#![no_main]
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReportCreator, VerificationToken,
};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::ecss::EcssEnumU16;
use satrs::spacepackets::CcsdsPacket;
use satrs::spacepackets::{ByteConversionError, SpHeader};
// global logger + panicking-behavior + memory layout
use satrs_stm32f3_disco_rtic as _;
use rtic::app;
use heapless::{mpmc::Q8, Vec};
#[allow(unused_imports)]
use rtic_monotonics::systick::fugit::{MillisDurationU32, TimerInstantU32};
use rtic_monotonics::systick::ExtU32;
use satrs::seq_count::SequenceCountProviderCore;
use satrs::spacepackets::{ecss::PusPacket, ecss::WritablePusPacket};
use stm32f3xx_hal::dma::dma1;
use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3};
use stm32f3xx_hal::pac::USART2;
use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent};
const UART_BAUD: u32 = 115200;
const DEFAULT_BLINK_FREQ_MS: u32 = 1000;
const TX_HANDLER_FREQ_MS: u32 = 20;
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u32 = 5;
const MAX_TC_LEN: usize = 128;
const MAX_TM_LEN: usize = 128;
pub const PUS_APID: u16 = 0x02;
type TxType = Tx<USART2, PA2<AF7<PushPull>>>;
type RxType = Rx<USART2, PA3<AF7<PushPull>>>;
type InstantFugit = TimerInstantU32<1000>;
type TxDmaTransferType = SerialDmaTx<&'static [u8], dma1::C7, TxType>;
type RxDmaTransferType = SerialDmaRx<&'static mut [u8], dma1::C6, RxType>;
// This is the predictable maximum overhead of the COBS encoding scheme.
// It is simply the maximum packet lenght dividied by 254 rounded up.
const COBS_TC_OVERHEAD: usize = (MAX_TC_LEN + 254 - 1) / 254;
const COBS_TM_OVERHEAD: usize = (MAX_TM_LEN + 254 - 1) / 254;
const TC_BUF_LEN: usize = MAX_TC_LEN + COBS_TC_OVERHEAD;
const TM_BUF_LEN: usize = MAX_TC_LEN + COBS_TM_OVERHEAD;
// This is a static buffer which should ONLY (!) be used as the TX DMA
// transfer buffer.
static mut DMA_TX_BUF: [u8; TM_BUF_LEN] = [0; TM_BUF_LEN];
// This is a static buffer which should ONLY (!) be used as the RX DMA
// transfer buffer.
static mut DMA_RX_BUF: [u8; TC_BUF_LEN] = [0; TC_BUF_LEN];
type TmPacket = Vec<u8, MAX_TM_LEN>;
type TcPacket = Vec<u8, MAX_TC_LEN>;
static TM_REQUESTS: Q8<TmPacket> = Q8::new();
use core::sync::atomic::{AtomicU16, Ordering};
pub struct SeqCountProviderAtomicRef {
atomic: AtomicU16,
ordering: Ordering,
}
impl SeqCountProviderAtomicRef {
pub const fn new(ordering: Ordering) -> Self {
Self {
atomic: AtomicU16::new(0),
ordering,
}
}
}
impl SequenceCountProviderCore<u16> for SeqCountProviderAtomicRef {
fn get(&self) -> u16 {
self.atomic.load(self.ordering)
}
fn increment(&self) {
self.atomic.fetch_add(1, self.ordering);
}
fn get_and_increment(&self) -> u16 {
self.atomic.fetch_add(1, self.ordering)
}
}
static SEQ_COUNT_PROVIDER: SeqCountProviderAtomicRef =
SeqCountProviderAtomicRef::new(Ordering::Relaxed);
pub struct TxIdle {
tx: TxType,
dma_channel: dma1::C7,
}
#[derive(Debug, defmt::Format)]
pub enum TmSendError {
ByteConversion(ByteConversionError),
Queue,
}
impl From<ByteConversionError> for TmSendError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversion(value)
}
}
fn send_tm(tm_creator: PusTmCreator) -> Result<(), TmSendError> {
if tm_creator.len_written() > MAX_TM_LEN {
return Err(ByteConversionError::ToSliceTooSmall {
expected: tm_creator.len_written(),
found: MAX_TM_LEN,
}
.into());
}
let mut tm_vec = TmPacket::new();
tm_vec
.resize(tm_creator.len_written(), 0)
.expect("vec resize failed");
tm_creator.write_to_bytes(tm_vec.as_mut_slice())?;
defmt::info!(
"Sending TM[{},{}] with size {}",
tm_creator.service(),
tm_creator.subservice(),
tm_creator.len_written()
);
TM_REQUESTS
.enqueue(tm_vec)
.map_err(|_| TmSendError::Queue)?;
Ok(())
}
fn handle_tm_send_error(error: TmSendError) {
defmt::warn!("sending tm failed with error {}", error);
}
pub enum UartTxState {
// Wrapped in an option because we need an owned type later.
Idle(Option<TxIdle>),
// Same as above
Transmitting(Option<TxDmaTransferType>),
}
pub struct UartTxShared {
last_completed: Option<InstantFugit>,
state: UartTxState,
}
pub struct RequestWithToken {
token: VerificationToken<TcStateAccepted>,
request: Request,
}
#[derive(Debug, defmt::Format)]
pub enum Request {
Ping,
ChangeBlinkFrequency(u32),
}
#[derive(Debug, defmt::Format)]
pub enum RequestError {
InvalidApid = 1,
InvalidService = 2,
InvalidSubservice = 3,
NotEnoughAppData = 4,
}
pub fn convert_pus_tc_to_request(
tc: &PusTcReader,
verif_reporter: &mut VerificationReportCreator,
src_data_buf: &mut [u8],
timestamp: &[u8],
) -> Result<RequestWithToken, RequestError> {
defmt::info!(
"Found PUS TC [{},{}] with length {}",
tc.service(),
tc.subservice(),
tc.len_packed()
);
let token = verif_reporter.add_tc(tc);
if tc.apid() != PUS_APID {
defmt::warn!("Received tc with unknown APID {}", tc.apid());
let result = send_tm(
verif_reporter
.acceptance_failure(
src_data_buf,
token,
SEQ_COUNT_PROVIDER.get_and_increment(),
0,
FailParams::new(timestamp, &EcssEnumU16::new(0), &[]),
)
.unwrap(),
);
if let Err(e) = result {
handle_tm_send_error(e);
}
return Err(RequestError::InvalidApid);
}
let (tm_creator, accepted_token) = verif_reporter
.acceptance_success(
src_data_buf,
token,
SEQ_COUNT_PROVIDER.get_and_increment(),
0,
timestamp,
)
.unwrap();
if let Err(e) = send_tm(tm_creator) {
handle_tm_send_error(e);
}
if tc.service() == 17 && tc.subservice() == 1 {
if tc.subservice() == 1 {
return Ok(RequestWithToken {
request: Request::Ping,
token: accepted_token,
});
} else {
return Err(RequestError::InvalidSubservice);
}
} else if tc.service() == 8 {
if tc.subservice() == 1 {
if tc.user_data().len() < 4 {
return Err(RequestError::NotEnoughAppData);
}
let new_freq_ms = u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap());
return Ok(RequestWithToken {
request: Request::ChangeBlinkFrequency(new_freq_ms),
token: accepted_token,
});
} else {
return Err(RequestError::InvalidSubservice);
}
} else {
return Err(RequestError::InvalidService);
}
}
#[app(device = stm32f3xx_hal::pac, peripherals = true)]
mod app {
use super::*;
use core::slice::Iter;
use rtic_monotonics::systick::Systick;
use rtic_monotonics::Monotonic;
use satrs::pus::verification::{TcStateStarted, VerificationReportCreator};
use satrs::spacepackets::{ecss::tc::PusTcReader, time::cds::P_FIELD_BASE};
#[allow(unused_imports)]
use stm32f3_discovery::leds::Direction;
use stm32f3_discovery::leds::Leds;
use stm32f3xx_hal::prelude::*;
use stm32f3_discovery::switch_hal::OutputSwitch;
use stm32f3xx_hal::Switch;
#[allow(dead_code)]
type SerialType = Serial<USART2, (PA2<AF7<PushPull>>, PA3<AF7<PushPull>>)>;
#[shared]
struct Shared {
blink_freq: MillisDurationU32,
tx_shared: UartTxShared,
rx_transfer: Option<RxDmaTransferType>,
}
#[local]
struct Local {
verif_reporter: VerificationReportCreator,
leds: Leds,
last_dir: Direction,
curr_dir: Iter<'static, Direction>,
}
#[init]
fn init(cx: init::Context) -> (Shared, Local) {
let mut rcc = cx.device.RCC.constrain();
// Initialize the systick interrupt & obtain the token to prove that we did
let systick_mono_token = rtic_monotonics::create_systick_token!();
Systick::start(cx.core.SYST, 8_000_000, systick_mono_token);
let mut flash = cx.device.FLASH.constrain();
let clocks = rcc
.cfgr
.use_hse(8.MHz())
.sysclk(8.MHz())
.pclk1(8.MHz())
.freeze(&mut flash.acr);
// Set up monotonic timer.
//let mono_timer = MonoTimer::new(cx.core.DWT, clocks, &mut cx.core.DCB);
defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery");
let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb);
let leds = Leds::new(
gpioe.pe8,
gpioe.pe9,
gpioe.pe10,
gpioe.pe11,
gpioe.pe12,
gpioe.pe13,
gpioe.pe14,
gpioe.pe15,
&mut gpioe.moder,
&mut gpioe.otyper,
);
let mut gpioa = cx.device.GPIOA.split(&mut rcc.ahb);
// USART2 pins
let mut pins = (
// TX pin: PA2
gpioa
.pa2
.into_af_push_pull(&mut gpioa.moder, &mut gpioa.otyper, &mut gpioa.afrl),
// RX pin: PA3
gpioa
.pa3
.into_af_push_pull(&mut gpioa.moder, &mut gpioa.otyper, &mut gpioa.afrl),
);
pins.1.internal_pull_up(&mut gpioa.pupdr, true);
let mut usart2 = Serial::new(
cx.device.USART2,
pins,
UART_BAUD.Bd(),
clocks,
&mut rcc.apb1,
);
usart2.configure_rx_interrupt(RxEvent::Idle, Switch::On);
// This interrupt is enabled to re-schedule new transfers in the interrupt handler immediately.
usart2.configure_tx_interrupt(TxEvent::TransmissionComplete, Switch::On);
let dma1 = cx.device.DMA1.split(&mut rcc.ahb);
let (mut tx_serial, mut rx_serial) = usart2.split();
// This interrupt is immediately triggered, clear it. It will only be reset
// by the hardware when data is received on RX (RXNE event)
rx_serial.clear_event(RxEvent::Idle);
// For some reason, this is also immediately triggered..
tx_serial.clear_event(TxEvent::TransmissionComplete);
let rx_transfer = rx_serial.read_exact(unsafe { DMA_RX_BUF.as_mut_slice() }, dma1.ch6);
defmt::info!("Spawning tasks");
blink::spawn().unwrap();
serial_tx_handler::spawn().unwrap();
let verif_reporter = VerificationReportCreator::new(PUS_APID).unwrap();
(
Shared {
blink_freq: MillisDurationU32::from_ticks(DEFAULT_BLINK_FREQ_MS),
tx_shared: UartTxShared {
last_completed: None,
state: UartTxState::Idle(Some(TxIdle {
tx: tx_serial,
dma_channel: dma1.ch7,
})),
},
rx_transfer: Some(rx_transfer),
},
Local {
verif_reporter,
leds,
last_dir: Direction::North,
curr_dir: Direction::iter(),
},
)
}
#[task(local = [leds, curr_dir, last_dir], shared=[blink_freq])]
async fn blink(mut cx: blink::Context) {
let blink::LocalResources {
leds,
curr_dir,
last_dir,
..
} = cx.local;
let mut toggle_leds = |dir: &Direction| {
let last_led = leds.for_direction(*last_dir);
last_led.off().ok();
let led = leds.for_direction(*dir);
led.on().ok();
*last_dir = *dir;
};
loop {
match curr_dir.next() {
Some(dir) => {
toggle_leds(dir);
}
None => {
*curr_dir = Direction::iter();
toggle_leds(curr_dir.next().unwrap());
}
}
let current_blink_freq = cx.shared.blink_freq.lock(|current| *current);
Systick::delay(current_blink_freq).await;
}
}
#[task(
shared = [tx_shared],
)]
async fn serial_tx_handler(mut cx: serial_tx_handler::Context) {
loop {
let is_idle = cx.shared.tx_shared.lock(|tx_shared| {
if let UartTxState::Idle(_) = tx_shared.state {
return true;
}
false
});
if is_idle {
let last_completed = cx.shared.tx_shared.lock(|shared| shared.last_completed);
if let Some(last_completed) = last_completed {
let elapsed_ms = (Systick::now() - last_completed).to_millis();
if elapsed_ms < MIN_DELAY_BETWEEN_TX_PACKETS_MS {
Systick::delay((MIN_DELAY_BETWEEN_TX_PACKETS_MS - elapsed_ms).millis())
.await;
}
}
} else {
// Check for completion after 1 ms
Systick::delay(1.millis()).await;
continue;
}
if let Some(vec) = TM_REQUESTS.dequeue() {
cx.shared
.tx_shared
.lock(|tx_shared| match &mut tx_shared.state {
UartTxState::Idle(tx) => {
let encoded_len;
//debug!(target: "serial_tx_handler", "bytes: {:x?}", &buf[0..len]);
// Safety: We only copy the data into the TX DMA buffer in this task.
// If the DMA is active, another branch will be taken.
unsafe {
// 0 sentinel value as start marker
DMA_TX_BUF[0] = 0;
encoded_len =
cobs::encode(&vec[0..vec.len()], &mut DMA_TX_BUF[1..]);
// Should never panic, we accounted for the overhead.
// Write into transfer buffer directly, no need for intermediate
// encoding buffer.
// 0 end marker
DMA_TX_BUF[encoded_len + 1] = 0;
}
//debug!(target: "serial_tx_handler", "Sending {} bytes", encoded_len + 2);
//debug!("sent: {:x?}", &mut_tx_dma_buf[0..encoded_len + 2]);
let tx_idle = tx.take().unwrap();
// Transfer completion and re-scheduling of new TX transfers will be done
// by the IRQ handler.
// SAFETY: The DMA is the exclusive writer to the DMA buffer now.
let transfer = tx_idle.tx.write_all(
unsafe { &DMA_TX_BUF[0..encoded_len + 2] },
tx_idle.dma_channel,
);
tx_shared.state = UartTxState::Transmitting(Some(transfer));
// The memory block is automatically returned to the pool when it is dropped.
}
UartTxState::Transmitting(_) => (),
});
// Check for completion after 1 ms
Systick::delay(1.millis()).await;
continue;
}
// Nothing to do, and we are idle.
Systick::delay(TX_HANDLER_FREQ_MS.millis()).await;
}
}
#[task(
local = [
verif_reporter,
decode_buf: [u8; MAX_TC_LEN] = [0; MAX_TC_LEN],
src_data_buf: [u8; MAX_TM_LEN] = [0; MAX_TM_LEN],
timestamp: [u8; 7] = [0; 7],
],
shared = [blink_freq]
)]
async fn serial_rx_handler(
mut cx: serial_rx_handler::Context,
received_packet: Vec<u8, MAX_TC_LEN>,
) {
cx.local.timestamp[0] = P_FIELD_BASE;
defmt::info!("Received packet with {} bytes", received_packet.len());
let decode_buf = cx.local.decode_buf;
let packet = received_packet.as_slice();
let mut start_idx = None;
for (idx, byte) in packet.iter().enumerate() {
if *byte != 0 {
start_idx = Some(idx);
break;
}
}
if start_idx.is_none() {
defmt::warn!("decoding error, can only process cobs encoded frames, data is all 0");
return;
}
let start_idx = start_idx.unwrap();
match cobs::decode(&received_packet.as_slice()[start_idx..], decode_buf) {
Ok(len) => {
defmt::info!("Decoded packet length: {}", len);
let pus_tc = PusTcReader::new(decode_buf);
match pus_tc {
Ok((tc, _tc_len)) => {
match convert_pus_tc_to_request(
&tc,
cx.local.verif_reporter,
cx.local.src_data_buf,
cx.local.timestamp,
) {
Ok(request_with_token) => {
let started_token = handle_start_verification(
request_with_token.token,
cx.local.verif_reporter,
cx.local.src_data_buf,
cx.local.timestamp,
);
match request_with_token.request {
Request::Ping => {
handle_ping_request(cx.local.timestamp);
}
Request::ChangeBlinkFrequency(new_freq_ms) => {
defmt::info!("Received blink frequency change request with new frequncy {}", new_freq_ms);
cx.shared.blink_freq.lock(|blink_freq| {
*blink_freq =
MillisDurationU32::from_ticks(new_freq_ms);
});
}
}
handle_completion_verification(
started_token,
cx.local.verif_reporter,
cx.local.src_data_buf,
cx.local.timestamp,
);
}
Err(e) => {
// TODO: Error handling: Send verification failure based on request error.
defmt::warn!("request error {}", e);
}
}
}
Err(e) => {
defmt::warn!("Error unpacking PUS TC: {}", e);
}
}
}
Err(_) => {
defmt::warn!("decoding error, can only process cobs encoded frames")
}
}
}
fn handle_ping_request(timestamp: &[u8]) {
defmt::info!("Received PUS ping telecommand, sending ping reply TM[17,2]");
let sp_header =
SpHeader::new_for_unseg_tc(PUS_APID, SEQ_COUNT_PROVIDER.get_and_increment(), 0);
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, timestamp);
let ping_reply = PusTmCreator::new(sp_header, sec_header, &[], true);
let mut tm_packet = TmPacket::new();
tm_packet
.resize(ping_reply.len_written(), 0)
.expect("vec resize failed");
ping_reply.write_to_bytes(&mut tm_packet).unwrap();
if TM_REQUESTS.enqueue(tm_packet).is_err() {
defmt::warn!("TC queue full");
return;
}
}
fn handle_start_verification(
accepted_token: VerificationToken<TcStateAccepted>,
verif_reporter: &mut VerificationReportCreator,
src_data_buf: &mut [u8],
timestamp: &[u8],
) -> VerificationToken<TcStateStarted> {
let (tm_creator, started_token) = verif_reporter
.start_success(
src_data_buf,
accepted_token,
SEQ_COUNT_PROVIDER.get(),
0,
&timestamp,
)
.unwrap();
let result = send_tm(tm_creator);
if let Err(e) = result {
handle_tm_send_error(e);
}
started_token
}
fn handle_completion_verification(
started_token: VerificationToken<TcStateStarted>,
verif_reporter: &mut VerificationReportCreator,
src_data_buf: &mut [u8],
timestamp: &[u8],
) {
let result = send_tm(
verif_reporter
.completion_success(
src_data_buf,
started_token,
SEQ_COUNT_PROVIDER.get(),
0,
timestamp,
)
.unwrap(),
);
if let Err(e) = result {
handle_tm_send_error(e);
}
}
#[task(binds = DMA1_CH6, shared = [rx_transfer])]
fn rx_dma_isr(mut cx: rx_dma_isr::Context) {
let mut tc_packet = TcPacket::new();
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_ref = rx_transfer.as_ref().unwrap();
if rx_ref.is_complete() {
let uart_rx_owned = rx_transfer.take().unwrap();
let (buf, c, rx) = uart_rx_owned.stop();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a stack allocaed vector here
// to do this.
tc_packet.resize(buf.len(), 0).expect("vec resize failed");
tc_packet.copy_from_slice(buf);
// Start the next transfer as soon as possible.
*rx_transfer = Some(rx.read_exact(buf, c));
// Send the vector to a regular task.
serial_rx_handler::spawn(tc_packet).expect("spawning rx handler task failed");
// If this happens, there is a high chance that the maximum packet length was
// exceeded. Circular mode is not used here, so data might be missed.
defmt::warn!(
"rx transfer with maximum length {}, might miss data",
TC_BUF_LEN
);
}
});
}
#[task(binds = USART2_EXTI26, shared = [rx_transfer, tx_shared])]
fn serial_isr(mut cx: serial_isr::Context) {
cx.shared
.tx_shared
.lock(|tx_shared| match &mut tx_shared.state {
UartTxState::Idle(_) => (),
UartTxState::Transmitting(transfer) => {
let transfer_ref = transfer.as_ref().unwrap();
if transfer_ref.is_complete() {
let transfer = transfer.take().unwrap();
let (_, dma_channel, mut tx) = transfer.stop();
tx.clear_event(TxEvent::TransmissionComplete);
tx_shared.state = UartTxState::Idle(Some(TxIdle { tx, dma_channel }));
// We cache the last completed time to ensure that there is a minimum delay between consecutive
// transferred packets.
tx_shared.last_completed = Some(Systick::now());
}
}
});
let mut tc_packet = TcPacket::new();
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_transfer_ref = rx_transfer.as_ref().unwrap();
// Received a partial packet.
if rx_transfer_ref.is_event_triggered(RxEvent::Idle) {
let rx_transfer_owned = rx_transfer.take().unwrap();
let (buf, ch, mut rx, rx_len) = rx_transfer_owned.stop_and_return_received_bytes();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a stack
// allocated vector to do this.
tc_packet
.resize(rx_len as usize, 0)
.expect("vec resize failed");
tc_packet[0..rx_len as usize].copy_from_slice(&buf[0..rx_len as usize]);
rx.clear_event(RxEvent::Idle);
serial_rx_handler::spawn(tc_packet).expect("spawning rx handler failed");
*rx_transfer = Some(rx.read_exact(buf, ch));
}
});
}
}

View File

@ -5,7 +5,7 @@
// List of extensions which should be recommended for users of this workspace.
"recommendations": [
"rust-lang.rust",
"marus25.cortex-debug",
"probe-rs.probe-rs-debugger"
],
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
"unwantedRecommendations": []

View File

@ -0,0 +1,22 @@
{
"version": "0.2.0",
"configurations": [
{
"preLaunchTask": "${defaultBuildTask}",
"type": "probe-rs-debug",
"request": "launch",
"name": "probe-rs Debugging ",
"flashingConfig": {
"flashingEnabled": true
},
"chip": "STM32F303VCTx",
"coreConfigs": [
{
"programBinary": "${workspaceFolder}/target/thumbv7em-none-eabihf/debug/satrs-stm32f3-disco-rtic",
"rttEnabled": true,
"svdFile": "STM32F303.svd"
}
]
}
]
}

View File

@ -11,7 +11,8 @@ proc CDSWOConfigure { CDCPUFreqHz CDSWOFreqHz CDSWOOutput } {
# Alternative option: Pipe ITM output into itm.txt file
# tpiu config internal itm.txt uart off $CDCPUFreqHz
# Default option so SWO display of VS code works.
# Default option so SWO display of VS code works. Please note that this might not be required
# anymore starting at openocd v0.12.0
tpiu config internal $CDSWOOutput uart off $CDCPUFreqHz $CDSWOFreqHz
itm port 0 on
}

View File

@ -0,0 +1,29 @@
[target.'cfg(all(target_arch = "arm", target_os = "none"))']
runner = "probe-rs run --chip STM32H743ZITx"
# runner = ["probe-rs", "run", "--chip", "$CHIP", "--log-format", "{L} {s}"]
rustflags = [
"-C", "linker=flip-link",
"-C", "link-arg=-Tlink.x",
"-C", "link-arg=-Tdefmt.x",
# This is needed if your flash or ram addresses are not aligned to 0x10000 in memory.x
# See https://github.com/rust-embedded/cortex-m-quickstart/pull/95
"-C", "link-arg=--nmagic",
# Can be useful for debugging.
# "-Clink-args=-Map=app.map"
]
[build]
# (`thumbv6m-*` is compatible with all ARM Cortex-M chips but using the right
# target improves performance)
# target = "thumbv6m-none-eabi" # Cortex-M0 and Cortex-M0+
# target = "thumbv7m-none-eabi" # Cortex-M3
# target = "thumbv7em-none-eabi" # Cortex-M4 and Cortex-M7 (no FPU)
target = "thumbv7em-none-eabihf" # Cortex-M4F and Cortex-M7F (with FPU)
[alias]
rb = "run --bin"
rrb = "run --release --bin"
[env]
DEFMT_LOG = "info"

View File

@ -0,0 +1,4 @@
/target
/.cargo/config*
/.vscode
/app.map

View File

@ -0,0 +1,881 @@
# This file is automatically @generated by Cargo.
# It is not intended for manual editing.
version = 3
[[package]]
name = "atomic-polyfill"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4"
dependencies = [
"critical-section",
]
[[package]]
name = "autocfg"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0"
[[package]]
name = "bare-metal"
version = "0.2.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5deb64efa5bd81e31fcd1938615a6d98c82eafcbcd787162b6f63b91d6bac5b3"
dependencies = [
"rustc_version 0.2.3",
]
[[package]]
name = "bare-metal"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8fe8f5a8a398345e52358e18ff07cc17a568fbca5c6f73873d3a62056309603"
[[package]]
name = "bitfield"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46afbd2983a5d5a7bd740ccb198caf5b82f45c40c09c0eed36052d91cb92e719"
[[package]]
name = "bitflags"
version = "1.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "byteorder"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cast"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cfg-if"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "cobs"
version = "0.2.3"
source = "git+https://github.com/robamu/cobs.rs.git?branch=all_features#c70a7f30fd00a7cbdb7666dec12b437977385d40"
[[package]]
name = "cortex-m"
version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ec610d8f49840a5b376c69663b6369e71f4b34484b9b2eb29fb918d92516cb9"
dependencies = [
"bare-metal 0.2.5",
"bitfield",
"critical-section",
"embedded-hal 0.2.7",
"volatile-register",
]
[[package]]
name = "cortex-m-rt"
version = "0.7.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2722f5b7d6ea8583cffa4d247044e280ccbb9fe501bed56552e2ba48b02d5f3d"
dependencies = [
"cortex-m-rt-macros",
]
[[package]]
name = "cortex-m-rt-macros"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0f6f3e36f203cfedbc78b357fb28730aa2c6dc1ab060ee5c2405e843988d3c7"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "cortex-m-semihosting"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c23234600452033cc77e4b761e740e02d2c4168e11dbf36ab14a0f58973592b0"
dependencies = [
"cortex-m",
]
[[package]]
name = "crc"
version = "3.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636"
dependencies = [
"crc-catalog",
]
[[package]]
name = "crc-catalog"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
[[package]]
name = "critical-section"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216"
[[package]]
name = "defmt"
version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a99dd22262668b887121d4672af5a64b238f026099f1a2a1b322066c9ecfe9e0"
dependencies = [
"bitflags",
"defmt-macros",
]
[[package]]
name = "defmt-brtt"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2f0ac3635d0c89d12b8101fcb44a7625f5f030a1c0491124b74467eb5a58a78"
dependencies = [
"critical-section",
"defmt",
]
[[package]]
name = "defmt-macros"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3a9f309eff1f79b3ebdf252954d90ae440599c26c2c553fe87a2d17195f2dcb"
dependencies = [
"defmt-parser",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "defmt-parser"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff4a5fefe330e8d7f31b16a318f9ce81000d8e35e69b93eae154d16d2278f70f"
dependencies = [
"thiserror",
]
[[package]]
name = "defmt-test"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290966e8c38f94b11884877242de876280d0eab934900e9642d58868e77c5df1"
dependencies = [
"cortex-m-rt",
"cortex-m-semihosting",
"defmt",
"defmt-test-macros",
]
[[package]]
name = "defmt-test-macros"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "984bc6eca246389726ac2826acc2488ca0fe5fcd6b8d9b48797021951d76a125"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "delegate"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee5df75c70b95bd3aacc8e2fd098797692fb1d54121019c4de481e42f04c8a1"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "derive-new"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d150dea618e920167e5973d70ae6ece4385b7164e0d799fe7c122dd0a5d912ad"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "embedded-alloc"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ddae17915accbac2cfbc64ea0ae6e3b330e6ea124ba108dada63646fd3c6f815"
dependencies = [
"critical-section",
"linked_list_allocator",
]
[[package]]
name = "embedded-dma"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "994f7e5b5cb23521c22304927195f236813053eb9c065dd2226a32ba64695446"
dependencies = [
"stable_deref_trait",
]
[[package]]
name = "embedded-hal"
version = "0.2.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "35949884794ad573cf46071e41c9b60efb0cb311e3ca01f7af807af1debc66ff"
dependencies = [
"nb 0.1.3",
"void",
]
[[package]]
name = "embedded-hal"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "361a90feb7004eca4019fb28352a9465666b24f840f5c3cddf0ff13920590b89"
dependencies = [
"defmt",
]
[[package]]
name = "embedded-hal-async"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c4c685bbef7fe13c3c6dd4da26841ed3980ef33e841cddfa15ce8a8fb3f1884"
dependencies = [
"defmt",
"embedded-hal 1.0.0",
]
[[package]]
name = "embedded-hal-bus"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57b4e6ede84339ebdb418cd986e6320a34b017cdf99b5cc3efceec6450b06886"
dependencies = [
"critical-section",
"defmt",
"embedded-hal 1.0.0",
"embedded-hal-async",
]
[[package]]
name = "embedded-storage"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a21dea9854beb860f3062d10228ce9b976da520a73474aed3171ec276bc0c032"
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "fugit"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17186ad64927d5ac8f02c1e77ccefa08ccd9eaa314d5a4772278aa204a22f7e7"
dependencies = [
"gcd",
]
[[package]]
name = "futures-core"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]]
name = "futures-task"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]]
name = "futures-util"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [
"futures-core",
"futures-task",
"pin-project-lite",
"pin-utils",
]
[[package]]
name = "gcd"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d758ba1b47b00caf47f24925c0074ecb20d6dfcffe7f6d53395c0465674841a"
[[package]]
name = "hash32"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67"
dependencies = [
"byteorder",
]
[[package]]
name = "hash32"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606"
dependencies = [
"byteorder",
]
[[package]]
name = "hashbrown"
version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
[[package]]
name = "heapless"
version = "0.7.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cdc6457c0eb62c71aac4bc17216026d8410337c4126773b9c5daba343f17964f"
dependencies = [
"atomic-polyfill",
"hash32 0.2.1",
"rustc_version 0.4.0",
"spin",
"stable_deref_trait",
]
[[package]]
name = "heapless"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad"
dependencies = [
"defmt",
"hash32 0.3.1",
"stable_deref_trait",
]
[[package]]
name = "indexmap"
version = "2.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26"
dependencies = [
"equivalent",
"hashbrown",
]
[[package]]
name = "linked_list_allocator"
version = "0.10.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286"
[[package]]
name = "lock_api"
version = "0.4.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "managed"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ca88d725a0a943b096803bd34e73a4437208b6077654cc4ecb2947a5f91618d"
[[package]]
name = "nb"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f"
dependencies = [
"nb 1.1.0",
]
[[package]]
name = "nb"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d5439c4ad607c3c23abf66de8c8bf57ba8adcd1f129e699851a6e43935d339d"
[[package]]
name = "num-traits"
version = "0.2.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841"
dependencies = [
"autocfg",
]
[[package]]
name = "num_enum"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "panic-probe"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4047d9235d1423d66cc97da7d07eddb54d4f154d6c13805c6d0793956f4f25b0"
dependencies = [
"cortex-m",
"defmt",
]
[[package]]
name = "paste"
version = "1.0.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a"
[[package]]
name = "pin-project-lite"
version = "0.2.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "portable-atomic"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0"
[[package]]
name = "proc-macro-error"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c"
dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn 1.0.109",
"version_check",
]
[[package]]
name = "proc-macro-error-attr"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869"
dependencies = [
"proc-macro2",
"quote",
"version_check",
]
[[package]]
name = "proc-macro2"
version = "1.0.82"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rtic"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c443db16326376bdd64377da268f6616d5f804aba8ce799bac7d1f7f244e9d51"
dependencies = [
"atomic-polyfill",
"bare-metal 1.0.0",
"cortex-m",
"critical-section",
"rtic-core",
"rtic-macros",
]
[[package]]
name = "rtic-common"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0786b50b81ef9d2a944a000f60405bb28bf30cd45da2d182f3fe636b2321f35c"
dependencies = [
"critical-section",
]
[[package]]
name = "rtic-core"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9369355b04d06a3780ec0f51ea2d225624db777acbc60abd8ca4832da5c1a42"
[[package]]
name = "rtic-macros"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "54053598ea24b1b74937724e366558412a1777eb2680b91ef646db540982789a"
dependencies = [
"indexmap",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "rtic-monotonics"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "058c2397dbd5bb4c5650a0e368c3920953e458805ff5097a0511b8147b3619d7"
dependencies = [
"atomic-polyfill",
"cfg-if",
"cortex-m",
"embedded-hal 1.0.0",
"fugit",
"rtic-time",
]
[[package]]
name = "rtic-sync"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49b1200137ccb2bf272a1801fa6e27264535facd356cb2c1d5bc8e12aa211bad"
dependencies = [
"critical-section",
"defmt",
"embedded-hal 1.0.0",
"embedded-hal-async",
"embedded-hal-bus",
"heapless 0.8.0",
"portable-atomic",
"rtic-common",
]
[[package]]
name = "rtic-time"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75b232e7aebc045cfea81cdd164bc2727a10aca9a4568d406d0a5661cdfd0f19"
dependencies = [
"critical-section",
"futures-util",
"rtic-common",
]
[[package]]
name = "rustc_version"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
dependencies = [
"semver 0.9.0",
]
[[package]]
name = "rustc_version"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
"semver 1.0.23",
]
[[package]]
name = "satrs"
version = "0.2.1"
dependencies = [
"cobs",
"crc",
"defmt",
"delegate",
"derive-new",
"heapless 0.7.17",
"num-traits",
"num_enum",
"paste",
"satrs-shared",
"smallvec",
"spacepackets",
]
[[package]]
name = "satrs-shared"
version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6042477018c2d43fffccaaa5099bc299a58485139b4d31c5b276889311e474f1"
dependencies = [
"spacepackets",
]
[[package]]
name = "satrs-stm32h7-nucleo-rtic"
version = "0.1.0"
dependencies = [
"cortex-m",
"cortex-m-rt",
"cortex-m-semihosting",
"defmt",
"defmt-brtt",
"defmt-test",
"embedded-alloc",
"panic-probe",
"rtic",
"rtic-monotonics",
"rtic-sync",
"satrs",
"smoltcp",
"stm32h7xx-hal",
]
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
[[package]]
name = "semver"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403"
dependencies = [
"semver-parser",
]
[[package]]
name = "semver"
version = "1.0.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b"
[[package]]
name = "semver-parser"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "smallvec"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "smoltcp"
version = "0.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a1a996951e50b5971a2c8c0fa05a381480d70a933064245c4a223ddc87ccc97"
dependencies = [
"bitflags",
"byteorder",
"cfg-if",
"defmt",
"heapless 0.8.0",
"managed",
]
[[package]]
name = "spacepackets"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e85574d113a06312010c0ba51aadccd4ba2806231ebe9a49fc6473d0534d8696"
dependencies = [
"crc",
"defmt",
"delegate",
"num-traits",
"num_enum",
"zerocopy",
]
[[package]]
name = "spin"
version = "0.9.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67"
dependencies = [
"lock_api",
]
[[package]]
name = "stable_deref_trait"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3"
[[package]]
name = "stm32h7"
version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "362f288cd8341e9209587b889c385f323e82fc237b60c272868965bb879bb9b1"
dependencies = [
"bare-metal 1.0.0",
"cortex-m",
"cortex-m-rt",
"vcell",
]
[[package]]
name = "stm32h7xx-hal"
version = "0.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3bd869329be25440b24e2b3583a1c016151b4a54bc36d96d82af7fcd9d010b98"
dependencies = [
"bare-metal 1.0.0",
"cast",
"cortex-m",
"embedded-dma",
"embedded-hal 0.2.7",
"embedded-storage",
"fugit",
"nb 1.1.0",
"paste",
"smoltcp",
"stm32h7",
"void",
]
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ad3dee41f36859875573074334c200d1add8e4a87bb37113ebd31d926b7b11f"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thiserror"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.61"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.64",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "vcell"
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77439c1b53d2303b20d9459b1ade71a83c716e3f9c34f3228c00e6f185d6c002"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "void"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
[[package]]
name = "volatile-register"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de437e2a6208b014ab52972a27e59b33fa2920d3e00fe05026167a1c509d19cc"
dependencies = [
"vcell",
]
[[package]]
name = "zerocopy"
version = "0.7.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087"
dependencies = [
"byteorder",
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.34"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.64",
]

View File

@ -0,0 +1,85 @@
[package]
authors = ["Robin Mueller <robin.mueller.m@gmail.com>"]
name = "satrs-stm32h7-nucleo-rtic"
edition = "2021"
version = "0.1.0"
default-run = "satrs-stm32h7-nucleo-rtic"
[lib]
harness = false
# needed for each integration test
[[test]]
name = "integration"
harness = false
[dependencies]
cortex-m = { version = "0.7", features = ["critical-section-single-core"] }
cortex-m-rt = "0.7"
defmt = "0.3"
defmt-brtt = { version = "0.1", default-features = false, features = ["rtt"] }
panic-probe = { version = "0.3", features = ["print-defmt"] }
cortex-m-semihosting = "0.5.0"
stm32h7xx-hal = { version="0.16", features= ["stm32h743v", "ethernet"] }
embedded-alloc = "0.5"
rtic-sync = { version = "1", features = ["defmt-03"] }
[dependencies.smoltcp]
version = "0.11.0"
default-features = false
features = ["medium-ethernet", "proto-ipv4", "socket-raw", "socket-dhcpv4", "socket-udp", "defmt"]
[dependencies.rtic]
version = "2"
features = ["thumbv7-backend"]
[dependencies.rtic-monotonics]
version = "1"
features = ["cortex-m-systick"]
[dependencies.satrs]
path = "../../satrs"
version = "0.2"
default-features = false
features = ["defmt", "heapless"]
[dev-dependencies]
defmt-test = "0.3"
# cargo build/run
[profile.dev]
codegen-units = 1
debug = 2
debug-assertions = true # <-
incremental = false
opt-level = 's' # <-
overflow-checks = true # <-
# cargo test
[profile.test]
codegen-units = 1
debug = 2
debug-assertions = true # <-
incremental = false
opt-level = 3 # <-
overflow-checks = true # <-
# cargo build/run --release
[profile.release]
codegen-units = 1
debug = 2
debug-assertions = false # <-
incremental = false
lto = 'fat'
opt-level = 3 # <-
overflow-checks = false # <-
# cargo test --release
[profile.bench]
codegen-units = 1
debug = 2
debug-assertions = false # <-
incremental = false
lto = 'fat'
opt-level = 3 # <-
overflow-checks = false # <-

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,118 @@
sat-rs example for the STM32F3-Discovery board
=======
This example application shows how the [sat-rs library](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
can be used on an embedded target.
It also shows how a relatively simple OBSW could be built when no standard runtime is available.
It uses [RTIC](https://rtic.rs/2/book/en/) as the concurrency framework and the
[defmt](https://defmt.ferrous-systems.com/) framework for logging.
The STM32H743ZIT device was picked because it is one of the more powerful Cortex-M based devices
available for STM with which also has a little bit more RAM available and also allows commanding
via TCP/IP.
## Pre-Requisites
Make sure the following tools are installed:
1. [`probe-rs`](https://probe.rs/): Application used to flash and debug the MCU.
2. Optional and recommended: [VS Code](https://code.visualstudio.com/) with
[probe-rs plugin](https://marketplace.visualstudio.com/items?itemName=probe-rs.probe-rs-debugger)
for debugging.
## Preparing Rust and the repository
Building an application requires the `thumbv7em-none-eabihf` cross-compiler toolchain.
If you have not installed it yet, you can do so with
```sh
rustup target add thumbv7em-none-eabihf
```
A default `.cargo` config file is provided for this project, but needs to be copied to have
the correct name. This is so that the config file can be updated or edited for custom needs
without being tracked by git.
```sh
cp def_config.toml config.toml
```
The configuration file will also set the target so it does not always have to be specified with
the `--target` argument.
## Building
After that, assuming that you have a `.cargo/config.toml` setting the correct build target,
you can simply build the application with
```sh
cargo build
```
## Flashing from the command line
You can flash the application from the command line using `probe-rs`:
```sh
probe-rs run --chip STM32H743ZITx
```
## Debugging with VS Code
The STM32F3-Discovery comes with an on-board ST-Link so all that is required to flash and debug
the board is a Mini-USB cable. The code in this repository was debugged using [`probe-rs`](https://probe.rs/docs/tools/debuggerA)
and the VS Code [`probe-rs` plugin](https://marketplace.visualstudio.com/items?itemName=probe-rs.probe-rs-debugger).
Make sure to install this plugin first.
Sample configuration files are provided inside the `vscode` folder.
Use `cp vscode .vscode -r` to use them for your project.
Some sample configuration files for VS Code were provided as well. You can simply use `Run` and `Debug`
to automatically rebuild and flash your application.
The `tasks.json` and `launch.json` files are generic and you can use them immediately by opening
the folder in VS code or adding it to a workspace.
## Commanding with Python
When the SW is running on the Discovery board, you can command the MCU via a serial interface,
using COBS encoded PUS packets.
It is recommended to use a virtual environment to do this. To set up one in the command line,
you can use `python3 -m venv venv` on Unix systems or `py -m venv venv` on Windows systems.
After doing this, you can check the [venv tutorial](https://docs.python.org/3/tutorial/venv.html)
on how to activate the environment and then use the following command to install the required
dependency:
```sh
pip install -r requirements.txt
```
The packets are exchanged using a dedicated serial interface. You can use any generic USB-to-UART
converter device with the TX pin connected to the PA3 pin and the RX pin connected to the PA2 pin.
A default configuration file for the python application is provided and can be used by running
```sh
cp def_tmtc_conf.json tmtc_conf.json
```
After that, you can for example send a ping to the MCU using the following command
```sh
./main.py -p /ping
```
You can configure the blinky frequency using
```sh
./main.py -p /change_blink_freq
```
All these commands will package a PUS telecommand which will be sent to the MCU using the COBS
format as the packet framing format.
## Resources
- [STM32H743ZI Ethernet link checker example](https://github.com/stm32-rs/stm32h7xx-hal/blob/master/examples/ethernet-nucleo-h743zi2.rs)
- [smoltcp DHCP client](https://github.com/smoltcp-rs/smoltcp/blob/main/examples/dhcp_client.rs)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,119 @@
/* Taken from https://github.com/stm32-rs/stm32h7xx-hal/pull/299, adapted slightly to work with */
/* flip-link */
MEMORY
{
/* This file is intended for parts in the STM32H743/743v/753/753v families (RM0433), */
/* with the exception of the STM32H742/742v parts which have a different RAM layout. */
/* - FLASH and RAM are mandatory memory sections. */
/* - The sum of all non-FLASH sections must add to 1060K total device RAM. */
/* - The FLASH section size must match your device, see table below. */
/* FLASH */
/* Flash is divided in two independent banks (except 750xB). */
/* Select the appropriate FLASH size for your device. */
/* - STM32H750xB 128K (only FLASH1) */
/* - STM32H750xB 1M (512K + 512K) */
/* - STM32H743xI/753xI 2M ( 1M + 1M) */
FLASH1 : ORIGIN = 0x08000000, LENGTH = 1M
FLASH2 : ORIGIN = 0x08100000, LENGTH = 1M
/* Data TCM */
/* - Two contiguous 64KB RAMs. */
/* - Used for interrupt handlers, stacks and general RAM. */
/* - Zero wait-states. */
/* - The DTCM is taken as the origin of the base ram. (See below.) */
/* This is also where the interrupt table and such will live, */
/* which is required for deterministic performance. */
/* Need a region called RAM */
/* DTCM : ORIGIN = 0x20000000, LENGTH = 128K */
RAM : ORIGIN = 0x20000000, LENGTH = 128K
/* Instruction TCM */
/* - Used for latency-critical interrupt handlers etc. */
/* - Zero wait-states. */
ITCM : ORIGIN = 0x00000000, LENGTH = 64K
/* AXI SRAM */
/* - AXISRAM is in D1 and accessible by all system masters except BDMA. */
/* - Suitable for application data not stored in DTCM. */
/* - Zero wait-states. */
AXISRAM : ORIGIN = 0x24000000, LENGTH = 512K
/* AHB SRAM */
/* - SRAM1-3 are in D2 and accessible by all system masters except BDMA. */
/* Suitable for use as DMA buffers. */
/* - SRAM4 is in D3 and additionally accessible by the BDMA. Used for BDMA */
/* buffers, for storing application data in lower-power modes. */
/* - Zero wait-states. */
SRAM1 : ORIGIN = 0x30000000, LENGTH = 128K
SRAM2 : ORIGIN = 0x30020000, LENGTH = 128K
SRAM3 : ORIGIN = 0x30040000, LENGTH = 32K
SRAM4 : ORIGIN = 0x38000000, LENGTH = 64K
/* Backup SRAM */
BSRAM : ORIGIN = 0x38800000, LENGTH = 4K
}
/*
/* Assign the memory regions defined above for use. */
/*
/* Provide the mandatory FLASH and RAM definitions for cortex-m-rt's linker script. */
/* These do not work with flip-link */
REGION_ALIAS(FLASH, FLASH1);
/* REGION_ALIAS(RAM, DTCM); */
/* The location of the stack can be overridden using the `_stack_start` symbol. */
/* - Set the stack location at the end of RAM, using all remaining space. */
_stack_start = ORIGIN(RAM) + LENGTH(RAM);
/* The location of the .text section can be overridden using the */
/* `_stext` symbol. By default it will place after .vector_table. */
/* _stext = ORIGIN(FLASH) + 0x40c; */
/* Define sections for placing symbols into the extra memory regions above. */
/* This makes them accessible from code. */
/* - ITCM, DTCM and AXISRAM connect to a 64-bit wide bus -> align to 8 bytes. */
/* - All other memories connect to a 32-bit wide bus -> align to 4 bytes. */
SECTIONS {
.flash2 (NOLOAD) : ALIGN(4) {
*(.flash2 .flash2.*);
. = ALIGN(4);
} > FLASH2
.itcm (NOLOAD) : ALIGN(8) {
*(.itcm .itcm.*);
. = ALIGN(8);
} > ITCM
.axisram (NOLOAD) : ALIGN(8) {
*(.axisram .axisram.*);
. = ALIGN(8);
} > AXISRAM
.sram1 (NOLOAD) : ALIGN(8) {
*(.sram1 .sram1.*);
. = ALIGN(4);
} > SRAM1
.sram2 (NOLOAD) : ALIGN(8) {
*(.sram2 .sram2.*);
. = ALIGN(4);
} > SRAM2
.sram3 (NOLOAD) : ALIGN(4) {
*(.sram3 .sram3.*);
. = ALIGN(4);
} > SRAM3
.sram4 (NOLOAD) : ALIGN(4) {
*(.sram4 .sram4.*);
. = ALIGN(4);
} > SRAM4
.bsram (NOLOAD) : ALIGN(4) {
*(.bsram .bsram.*);
. = ALIGN(4);
} > BSRAM
};

View File

@ -0,0 +1,8 @@
/venv
/.tmtc-history.txt
/log
/.idea/*
!/.idea/runConfigurations
/seqcnt.txt
/tmtc_conf.json

View File

@ -0,0 +1,4 @@
{
"com_if": "udp",
"tcpip_udp_port": 7301
}

View File

@ -0,0 +1,305 @@
#!/usr/bin/env python3
"""Example client for the sat-rs example application"""
import struct
import logging
import sys
import time
from typing import Any, Optional, cast
from prompt_toolkit.history import FileHistory, History
from spacepackets.ecss.tm import CdsShortTimestamp
import tmtccmd
from spacepackets.ecss import PusTelemetry, PusTelecommand, PusTm, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest
from tmtccmd.core.ccsds_backend import QueueWrapper
from tmtccmd.logging import add_colorlog_console_logger
from tmtccmd.pus import VerificationWrapper
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com import ComInterface
from tmtccmd.config import (
CmdTreeNode,
default_json_path,
SetupParams,
HookBase,
params_to_procedure_conversion,
)
from tmtccmd.config.com import SerialCfgWrapper
from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper
from tmtccmd.logging.pus import (
RegularTmtcLogWrapper,
RawTmtcTimedLogWrapper,
TimedLogWhen,
)
from tmtccmd.tmtc import (
TcQueueEntryType,
ProcedureWrapper,
TcProcedureType,
FeedWrapper,
SendCbParams,
DefaultPusQueueHelper,
)
from tmtccmd.pus.s5_fsfw_event import Service5Tm
from spacepackets.seqcount import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT
_LOGGER = logging.getLogger()
EXAMPLE_PUS_APID = 0x02
class SatRsConfigHook(HookBase):
def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path)
def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com import (
create_com_interface_default,
create_com_interface_cfg_default,
)
assert self.cfg_path is not None
cfg = create_com_interface_cfg_default(
com_if_key=com_if_key,
json_cfg_path=self.cfg_path,
space_packet_ids=None,
)
if cfg is None:
raise ValueError(
f"No valid configuration could be retrieved for the COM IF with key {com_if_key}"
)
if cfg.com_if_key == "serial_cobs":
cfg = cast(SerialCfgWrapper, cfg)
cfg.serial_cfg.serial_timeout = 0.5
return create_com_interface_default(cfg)
def get_command_definitions(self) -> CmdTreeNode:
"""This function should return the root node of the command definition tree."""
return create_cmd_definition_tree()
def get_cmd_history(self) -> Optional[History]:
"""Optionlly return a history class for the past command paths which will be used
when prompting a command path from the user in CLI mode."""
return FileHistory(".tmtc-history.txt")
def get_object_ids(self) -> ObjectIdDictT:
from tmtccmd.config.objects import get_core_object_ids
return get_core_object_ids()
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
root_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
root_node.add_child(CmdTreeNode("change_blink_freq", "Change blink frequency"))
return root_node
class PusHandler(SpecificApidHandlerBase):
def __init__(
self,
file_logger: logging.Logger,
verif_wrapper: VerificationWrapper,
raw_logger: RawTmtcTimedLogWrapper,
):
super().__init__(EXAMPLE_PUS_APID, None)
self.file_logger = file_logger
self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: Any):
try:
pus_tm = PusTm.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
except ValueError as e:
_LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e
service = pus_tm.service
tm_packet = None
if service == 1:
tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.TIMESTAMP_SIZE, 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
_LOGGER.info(
f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] "
f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}"
)
_LOGGER.warning(
f"No matching telecommand found for {tm_packet.tc_req_id}"
)
else:
self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res)
if service == 3:
_LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:]
_LOGGER.info("received JSON string: " + json_str.decode("utf-8"))
if service == 5:
tm_packet = Service5Tm.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if service == 17:
tm_packet = Service17Tm.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if tm_packet.subservice == 2:
_LOGGER.info("Received Ping Reply TM[17,2]")
else:
_LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
if tm_packet is None:
_LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTelemetry.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
self.raw_logger.log_tm(pus_tm)
def make_addressable_id(target_id: int, unique_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", target_id))
byte_string.extend(struct.pack("!I", unique_id))
return byte_string
class TcHandler(TcHandlerBase):
def __init__(
self,
seq_count_provider: FileSeqCountProvider,
verif_wrapper: VerificationWrapper,
):
super(TcHandler, self).__init__()
self.seq_count_provider = seq_count_provider
self.verif_wrapper = verif_wrapper
self.queue_helper = DefaultPusQueueHelper(
queue_wrapper=QueueWrapper.empty(),
tc_sched_timestamp_len=7,
seq_cnt_provider=seq_count_provider,
pus_verificator=verif_wrapper.pus_verificator,
default_pus_apid=EXAMPLE_PUS_APID,
)
def send_cb(self, send_params: SendCbParams):
entry_helper = send_params.entry
if entry_helper.is_tc:
if entry_helper.entry_type == TcQueueEntryType.PUS_TC:
pus_tc_wrapper = entry_helper.to_pus_tc_entry()
pus_tc_wrapper.pus_tc.seq_count = (
self.seq_count_provider.get_and_increment()
)
self.verif_wrapper.add_tc(pus_tc_wrapper.pus_tc)
raw_tc = pus_tc_wrapper.pus_tc.pack()
_LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}")
send_params.com_if.send(raw_tc)
elif entry_helper.entry_type == TcQueueEntryType.LOG:
log_entry = entry_helper.to_log_entry()
_LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, info: ProcedureWrapper):
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
cmd_path = def_proc.cmd_path
if cmd_path == "/ping":
q.add_log_cmd("Sending PUS ping telecommand")
q.add_pus_tc(PusTelecommand(service=17, subservice=1))
if cmd_path == "/change_blink_freq":
self.create_change_blink_freq_command(q)
def create_change_blink_freq_command(self, q: DefaultPusQueueHelper):
q.add_log_cmd("Changing blink frequency")
while True:
blink_freq = int(
input(
"Please specify new blink frequency in ms. Valid Range [2..10000]: "
)
)
if blink_freq < 2 or blink_freq > 10000:
print(
"Invalid blink frequency. Please specify a value between 2 and 10000."
)
continue
break
app_data = struct.pack("!I", blink_freq)
q.add_pus_tc(PusTelecommand(service=8, subservice=1, app_data=app_data))
def main():
add_colorlog_console_logger(_LOGGER)
tmtccmd.init_printout(False)
hook_obj = SatRsConfigHook(json_cfg_path=default_json_path())
parser_wrapper = PreArgsParsingWrapper()
parser_wrapper.create_default_parent_parser()
parser_wrapper.create_default_parser()
parser_wrapper.add_def_proc_args()
params = SetupParams()
post_args_wrapper = parser_wrapper.parse(hook_obj, params)
proc_wrapper = ProcedureParamsWrapper()
if post_args_wrapper.use_gui:
post_args_wrapper.set_params_without_prompts(proc_wrapper)
else:
post_args_wrapper.set_params_with_prompts(proc_wrapper)
params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
)
# Create console logger helper and file loggers
tmtc_logger = RegularTmtcLogWrapper()
file_logger = tmtc_logger.logger
raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1)
verificator = PusVerificator()
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None)
ccsds_handler.add_apid_handler(tm_handler)
# Create TC handler
seq_count_provider = PusFileSeqCountProvider()
tc_handler = TcHandler(seq_count_provider, verification_wrapper)
tmtccmd.setup(setup_args=setup_args)
init_proc = params_to_procedure_conversion(setup_args.proc_param_wrapper)
tmtc_backend = tmtccmd.create_default_tmtc_backend(
setup_wrapper=setup_args,
tm_handler=ccsds_handler,
tc_handler=tc_handler,
init_procedure=init_proc,
)
tmtccmd.start(tmtc_backend=tmtc_backend, hook_obj=hook_obj)
try:
while True:
state = tmtc_backend.periodic_op(None)
if state.request == BackendRequest.TERMINATION_NO_ERROR:
sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE:
_LOGGER.info("TMTC Client in IDLE mode")
time.sleep(3.0)
elif state.request == BackendRequest.DELAY_LISTENER:
time.sleep(0.8)
elif state.request == BackendRequest.DELAY_CUSTOM:
if state.next_delay.total_seconds() <= 0.4:
time.sleep(state.next_delay.total_seconds())
else:
time.sleep(0.4)
elif state.request == BackendRequest.CALL_NEXT:
pass
except KeyboardInterrupt:
sys.exit(0)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,2 @@
tmtccmd == 8.0.1
# -e git+https://github.com/robamu-org/tmtccmd.git@main#egg=tmtccmd

View File

@ -0,0 +1,55 @@
//! Blinks an LED
//!
//! This assumes that LD2 (blue) is connected to pb7 and LD3 (red) is connected
//! to pb14. This assumption is true for the nucleo-h743zi board.
#![no_std]
#![no_main]
use satrs_stm32h7_nucleo_rtic as _;
use stm32h7xx_hal::{block, prelude::*, timer::Timer};
use cortex_m_rt::entry;
#[entry]
fn main() -> ! {
defmt::println!("starting stm32h7 blinky example");
// Get access to the device specific peripherals from the peripheral access crate
let dp = stm32h7xx_hal::stm32::Peripherals::take().unwrap();
// Take ownership over the RCC devices and convert them into the corresponding HAL structs
let rcc = dp.RCC.constrain();
let pwr = dp.PWR.constrain();
let pwrcfg = pwr.freeze();
// Freeze the configuration of all the clocks in the system and
// retrieve the Core Clock Distribution and Reset (CCDR) object
let rcc = rcc.use_hse(8.MHz()).bypass_hse();
let ccdr = rcc.freeze(pwrcfg, &dp.SYSCFG);
// Acquire the GPIOB peripheral
let gpiob = dp.GPIOB.split(ccdr.peripheral.GPIOB);
// Configure gpio B pin 0 as a push-pull output.
let mut ld1 = gpiob.pb0.into_push_pull_output();
// Configure gpio B pin 7 as a push-pull output.
let mut ld2 = gpiob.pb7.into_push_pull_output();
// Configure gpio B pin 14 as a push-pull output.
let mut ld3 = gpiob.pb14.into_push_pull_output();
// Configure the timer to trigger an update every second
let mut timer = Timer::tim1(dp.TIM1, ccdr.peripheral.TIM1, &ccdr.clocks);
timer.start(1.Hz());
// Wait for the timer to trigger an update and change the state of the LED
loop {
ld1.toggle();
ld2.toggle();
ld3.toggle();
block!(timer.wait()).unwrap();
}
}

View File

@ -0,0 +1,11 @@
#![no_main]
#![no_std]
use satrs_stm32h7_nucleo_rtic as _; // global logger + panicking-behavior + memory layout
#[cortex_m_rt::entry]
fn main() -> ! {
defmt::println!("Hello, world!");
satrs_stm32h7_nucleo_rtic::exit()
}

View File

@ -0,0 +1,52 @@
#![no_main]
#![no_std]
use cortex_m_semihosting::debug;
use defmt_brtt as _; // global logger
// TODO(5) adjust HAL import
use stm32h7xx_hal as _; // memory layout
use panic_probe as _;
// same panicking *behavior* as `panic-probe` but doesn't print a panic message
// this prevents the panic message being printed *twice* when `defmt::panic` is invoked
#[defmt::panic_handler]
fn panic() -> ! {
cortex_m::asm::udf()
}
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with status code 0.
pub fn exit() -> ! {
loop {
debug::exit(debug::EXIT_SUCCESS);
}
}
/// Hardfault handler.
///
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with an error. This seems better than the default, which is to spin in a
/// loop.
#[cortex_m_rt::exception]
unsafe fn HardFault(_frame: &cortex_m_rt::ExceptionFrame) -> ! {
loop {
debug::exit(debug::EXIT_FAILURE);
}
}
// defmt-test 0.3.0 has the limitation that this `#[tests]` attribute can only be used
// once within a crate. the module can be in any file but there can only be at most
// one `#[tests]` module in this library crate
#[cfg(test)]
#[defmt_test::tests]
mod unit_tests {
use defmt::assert;
#[test]
fn it_works() {
assert!(true)
}
}

View File

@ -0,0 +1,528 @@
#![no_main]
#![no_std]
extern crate alloc;
use rtic::app;
use rtic_monotonics::systick::Systick;
use rtic_monotonics::Monotonic;
use satrs::pool::{PoolAddr, PoolProvider, StaticHeaplessMemoryPool};
use satrs::static_subpool;
// global logger + panicking-behavior + memory layout
use satrs_stm32h7_nucleo_rtic as _;
use smoltcp::socket::udp::UdpMetadata;
use smoltcp::socket::{dhcpv4, udp};
use core::mem::MaybeUninit;
use embedded_alloc::Heap;
use smoltcp::iface::{Config, Interface, SocketHandle, SocketSet, SocketStorage};
use smoltcp::wire::{HardwareAddress, IpAddress, IpCidr};
use stm32h7xx_hal::ethernet;
const DEFAULT_BLINK_FREQ_MS: u32 = 1000;
const PORT: u16 = 7301;
const HEAP_SIZE: usize = 131_072;
const TC_SOURCE_CHANNEL_DEPTH: usize = 16;
pub type SharedPool = StaticHeaplessMemoryPool<3>;
pub type TcSourceChannel = rtic_sync::channel::Channel<PoolAddr, TC_SOURCE_CHANNEL_DEPTH>;
pub type TcSourceTx = rtic_sync::channel::Sender<'static, PoolAddr, TC_SOURCE_CHANNEL_DEPTH>;
pub type TcSourceRx = rtic_sync::channel::Receiver<'static, PoolAddr, TC_SOURCE_CHANNEL_DEPTH>;
#[global_allocator]
static HEAP: Heap = Heap::empty();
// We place the memory pool buffers inside the larger AXISRAM.
pub const SUBPOOL_SMALL_NUM_BLOCKS: u16 = 32;
pub const SUBPOOL_SMALL_BLOCK_SIZE: usize = 32;
pub const SUBPOOL_MEDIUM_NUM_BLOCKS: u16 = 16;
pub const SUBPOOL_MEDIUM_BLOCK_SIZE: usize = 128;
pub const SUBPOOL_LARGE_NUM_BLOCKS: u16 = 8;
pub const SUBPOOL_LARGE_BLOCK_SIZE: usize = 2048;
// This data will be held by Net through a mutable reference
pub struct NetStorageStatic<'a> {
socket_storage: [SocketStorage<'a>; 8],
}
// MaybeUninit allows us write code that is correct even if STORE is not
// initialised by the runtime
static mut STORE: MaybeUninit<NetStorageStatic> = MaybeUninit::uninit();
static mut UDP_RX_META: [udp::PacketMetadata; 12] = [udp::PacketMetadata::EMPTY; 12];
static mut UDP_RX: [u8; 2048] = [0; 2048];
static mut UDP_TX_META: [udp::PacketMetadata; 12] = [udp::PacketMetadata::EMPTY; 12];
static mut UDP_TX: [u8; 2048] = [0; 2048];
/// Locally administered MAC address
const MAC_ADDRESS: [u8; 6] = [0x02, 0x00, 0x11, 0x22, 0x33, 0x44];
pub struct Net {
iface: Interface,
ethdev: ethernet::EthernetDMA<4, 4>,
dhcp_handle: SocketHandle,
}
impl Net {
pub fn new(
sockets: &mut SocketSet<'static>,
mut ethdev: ethernet::EthernetDMA<4, 4>,
ethernet_addr: HardwareAddress,
) -> Self {
let config = Config::new(ethernet_addr);
let mut iface = Interface::new(
config,
&mut ethdev,
smoltcp::time::Instant::from_millis((Systick::now() - Systick::ZERO).to_millis()),
);
// Create sockets
let dhcp_socket = dhcpv4::Socket::new();
iface.update_ip_addrs(|addrs| {
let _ = addrs.push(IpCidr::new(IpAddress::v4(192, 168, 1, 99), 0));
});
let dhcp_handle = sockets.add(dhcp_socket);
Net {
iface,
ethdev,
dhcp_handle,
}
}
/// Polls on the ethernet interface. You should refer to the smoltcp
/// documentation for poll() to understand how to call poll efficiently
pub fn poll<'a>(&mut self, sockets: &'a mut SocketSet) -> bool {
let uptime = Systick::now() - Systick::ZERO;
let timestamp = smoltcp::time::Instant::from_millis(uptime.to_millis());
self.iface.poll(timestamp, &mut self.ethdev, sockets)
}
pub fn poll_dhcp<'a>(&mut self, sockets: &'a mut SocketSet) -> Option<dhcpv4::Event<'a>> {
let opt_event = sockets.get_mut::<dhcpv4::Socket>(self.dhcp_handle).poll();
if let Some(event) = &opt_event {
match event {
dhcpv4::Event::Deconfigured => {
defmt::info!("DHCP lost configuration");
self.iface.update_ip_addrs(|addrs| addrs.clear());
self.iface.routes_mut().remove_default_ipv4_route();
}
dhcpv4::Event::Configured(config) => {
defmt::info!("DHCP configuration acquired");
defmt::info!("IP address: {}", config.address);
self.iface.update_ip_addrs(|addrs| {
addrs.clear();
addrs.push(IpCidr::Ipv4(config.address)).unwrap();
});
if let Some(router) = config.router {
defmt::debug!("Default gateway: {}", router);
self.iface
.routes_mut()
.add_default_ipv4_route(router)
.unwrap();
} else {
defmt::debug!("Default gateway: None");
self.iface.routes_mut().remove_default_ipv4_route();
}
}
}
}
opt_event
}
}
pub struct UdpNet {
udp_handle: SocketHandle,
last_client: Option<UdpMetadata>,
tc_source_tx: TcSourceTx,
}
impl UdpNet {
pub fn new<'sockets>(sockets: &mut SocketSet<'sockets>, tc_source_tx: TcSourceTx) -> Self {
// SAFETY: The RX and TX buffers are passed here and not used anywhere else.
let udp_rx_buffer =
smoltcp::socket::udp::PacketBuffer::new(unsafe { &mut UDP_RX_META[..] }, unsafe {
&mut UDP_RX[..]
});
let udp_tx_buffer =
smoltcp::socket::udp::PacketBuffer::new(unsafe { &mut UDP_TX_META[..] }, unsafe {
&mut UDP_TX[..]
});
let udp_socket = smoltcp::socket::udp::Socket::new(udp_rx_buffer, udp_tx_buffer);
let udp_handle = sockets.add(udp_socket);
Self {
udp_handle,
last_client: None,
tc_source_tx,
}
}
pub fn poll<'sockets>(
&mut self,
sockets: &'sockets mut SocketSet,
shared_pool: &mut SharedPool,
) {
let socket = sockets.get_mut::<udp::Socket>(self.udp_handle);
if !socket.is_open() {
if let Err(e) = socket.bind(PORT) {
defmt::warn!("binding UDP socket failed: {}", e);
}
}
loop {
match socket.recv() {
Ok((data, client)) => {
match shared_pool.add(data) {
Ok(store_addr) => {
if let Err(e) = self.tc_source_tx.try_send(store_addr) {
defmt::warn!("TC source channel is full: {}", e);
}
}
Err(e) => {
defmt::warn!("could not add UDP packet to shared pool: {}", e);
}
}
self.last_client = Some(client);
// TODO: Implement packet wiretapping.
}
Err(e) => match e {
udp::RecvError::Exhausted => {
break;
}
udp::RecvError::Truncated => {
defmt::warn!("UDP packet was truncacted");
}
},
};
}
}
}
#[app(device = stm32h7xx_hal::stm32, peripherals = true)]
mod app {
use core::ptr::addr_of_mut;
use super::*;
use rtic_monotonics::systick::fugit::MillisDurationU32;
use rtic_monotonics::systick::Systick;
use satrs::spacepackets::ecss::tc::PusTcReader;
use stm32h7xx_hal::ethernet::{EthernetMAC, PHY};
use stm32h7xx_hal::gpio::{Output, Pin};
use stm32h7xx_hal::prelude::*;
use stm32h7xx_hal::stm32::Interrupt;
struct BlinkyLeds {
led1: Pin<'B', 7, Output>,
led2: Pin<'B', 14, Output>,
}
#[local]
struct Local {
leds: BlinkyLeds,
link_led: Pin<'B', 0, Output>,
net: Net,
udp: UdpNet,
tc_source_rx: TcSourceRx,
phy: ethernet::phy::LAN8742A<EthernetMAC>,
}
#[shared]
struct Shared {
blink_freq: MillisDurationU32,
eth_link_up: bool,
sockets: SocketSet<'static>,
shared_pool: SharedPool,
}
#[init]
fn init(mut cx: init::Context) -> (Shared, Local) {
defmt::println!("Starting sat-rs demo application for the STM32H743ZIT");
let pwr = cx.device.PWR.constrain();
let pwrcfg = pwr.freeze();
let rcc = cx.device.RCC.constrain();
// Try to keep the clock configuration similar to one used in STM examples:
// https://github.com/STMicroelectronics/STM32CubeH7/blob/master/Projects/NUCLEO-H743ZI/Examples/GPIO/GPIO_EXTI/Src/main.c
let ccdr = rcc
.sys_ck(400.MHz())
.hclk(200.MHz())
.use_hse(8.MHz())
.bypass_hse()
.pclk1(100.MHz())
.pclk2(100.MHz())
.pclk3(100.MHz())
.pclk4(100.MHz())
.freeze(pwrcfg, &cx.device.SYSCFG);
// Initialize the systick interrupt & obtain the token to prove that we did
let systick_mono_token = rtic_monotonics::create_systick_token!();
Systick::start(
cx.core.SYST,
ccdr.clocks.sys_ck().to_Hz(),
systick_mono_token,
);
// Those are used in the smoltcp of the stm32h7xx-hal , I am not fully sure what they are
// good for.
cx.core.SCB.enable_icache();
cx.core.DWT.enable_cycle_counter();
let gpioa = cx.device.GPIOA.split(ccdr.peripheral.GPIOA);
let gpiob = cx.device.GPIOB.split(ccdr.peripheral.GPIOB);
let gpioc = cx.device.GPIOC.split(ccdr.peripheral.GPIOC);
let gpiog = cx.device.GPIOG.split(ccdr.peripheral.GPIOG);
let link_led = gpiob.pb0.into_push_pull_output();
let mut led1 = gpiob.pb7.into_push_pull_output();
let mut led2 = gpiob.pb14.into_push_pull_output();
// Criss-cross pattern looks cooler.
led1.set_high();
led2.set_low();
let leds = BlinkyLeds { led1, led2 };
let rmii_ref_clk = gpioa.pa1.into_alternate::<11>();
let rmii_mdio = gpioa.pa2.into_alternate::<11>();
let rmii_mdc = gpioc.pc1.into_alternate::<11>();
let rmii_crs_dv = gpioa.pa7.into_alternate::<11>();
let rmii_rxd0 = gpioc.pc4.into_alternate::<11>();
let rmii_rxd1 = gpioc.pc5.into_alternate::<11>();
let rmii_tx_en = gpiog.pg11.into_alternate::<11>();
let rmii_txd0 = gpiog.pg13.into_alternate::<11>();
let rmii_txd1 = gpiob.pb13.into_alternate::<11>();
let mac_addr = smoltcp::wire::EthernetAddress::from_bytes(&MAC_ADDRESS);
/// Ethernet descriptor rings are a global singleton
#[link_section = ".sram3.eth"]
static mut DES_RING: MaybeUninit<ethernet::DesRing<4, 4>> = MaybeUninit::uninit();
let (eth_dma, eth_mac) = ethernet::new(
cx.device.ETHERNET_MAC,
cx.device.ETHERNET_MTL,
cx.device.ETHERNET_DMA,
(
rmii_ref_clk,
rmii_mdio,
rmii_mdc,
rmii_crs_dv,
rmii_rxd0,
rmii_rxd1,
rmii_tx_en,
rmii_txd0,
rmii_txd1,
),
// SAFETY: We do not move the returned DMA struct across thread boundaries, so this
// should be safe according to the docs.
unsafe { DES_RING.assume_init_mut() },
mac_addr,
ccdr.peripheral.ETH1MAC,
&ccdr.clocks,
);
// Initialise ethernet PHY...
let mut lan8742a = ethernet::phy::LAN8742A::new(eth_mac.set_phy_addr(0));
lan8742a.phy_reset();
lan8742a.phy_init();
unsafe {
ethernet::enable_interrupt();
cx.core.NVIC.set_priority(Interrupt::ETH, 196); // Mid prio
cortex_m::peripheral::NVIC::unmask(Interrupt::ETH);
}
// unsafe: mutable reference to static storage, we only do this once
let store = unsafe {
let store_ptr = STORE.as_mut_ptr();
// Initialise the socket_storage field. Using `write` instead of
// assignment via `=` to not call `drop` on the old, uninitialised
// value
addr_of_mut!((*store_ptr).socket_storage).write([SocketStorage::EMPTY; 8]);
// Now that all fields are initialised we can safely use
// assume_init_mut to return a mutable reference to STORE
STORE.assume_init_mut()
};
let (tc_source_tx, tc_source_rx) =
rtic_sync::make_channel!(PoolAddr, TC_SOURCE_CHANNEL_DEPTH);
let mut sockets = SocketSet::new(&mut store.socket_storage[..]);
let net = Net::new(&mut sockets, eth_dma, mac_addr.into());
let udp = UdpNet::new(&mut sockets, tc_source_tx);
let mut shared_pool: SharedPool = StaticHeaplessMemoryPool::new(true);
static_subpool!(
SUBPOOL_SMALL,
SUBPOOL_SMALL_SIZES,
SUBPOOL_SMALL_NUM_BLOCKS as usize,
SUBPOOL_SMALL_BLOCK_SIZE,
link_section = ".axisram"
);
static_subpool!(
SUBPOOL_MEDIUM,
SUBPOOL_MEDIUM_SIZES,
SUBPOOL_MEDIUM_NUM_BLOCKS as usize,
SUBPOOL_MEDIUM_BLOCK_SIZE,
link_section = ".axisram"
);
static_subpool!(
SUBPOOL_LARGE,
SUBPOOL_LARGE_SIZES,
SUBPOOL_LARGE_NUM_BLOCKS as usize,
SUBPOOL_LARGE_BLOCK_SIZE,
link_section = ".axisram"
);
shared_pool
.grow(
unsafe { SUBPOOL_SMALL.assume_init_mut() },
unsafe { SUBPOOL_SMALL_SIZES.assume_init_mut() },
SUBPOOL_SMALL_NUM_BLOCKS,
true,
)
.expect("growing heapless memory pool failed");
shared_pool
.grow(
unsafe { SUBPOOL_MEDIUM.assume_init_mut() },
unsafe { SUBPOOL_MEDIUM_SIZES.assume_init_mut() },
SUBPOOL_MEDIUM_NUM_BLOCKS,
true,
)
.expect("growing heapless memory pool failed");
shared_pool
.grow(
unsafe { SUBPOOL_LARGE.assume_init_mut() },
unsafe { SUBPOOL_LARGE_SIZES.assume_init_mut() },
SUBPOOL_LARGE_NUM_BLOCKS,
true,
)
.expect("growing heapless memory pool failed");
// Set up global allocator. Use AXISRAM for the heap.
#[link_section = ".axisram"]
static mut HEAP_MEM: [MaybeUninit<u8>; HEAP_SIZE] = [MaybeUninit::uninit(); HEAP_SIZE];
unsafe { HEAP.init(HEAP_MEM.as_ptr() as usize, HEAP_SIZE) }
eth_link_check::spawn().expect("eth link check failed");
blinky::spawn().expect("spawning blink task failed");
udp_task::spawn().expect("spawning UDP task failed");
tc_source_task::spawn().expect("spawning TC source task failed");
(
Shared {
blink_freq: MillisDurationU32::from_ticks(DEFAULT_BLINK_FREQ_MS),
eth_link_up: false,
sockets,
shared_pool,
},
Local {
link_led,
leds,
net,
udp,
tc_source_rx,
phy: lan8742a,
},
)
}
#[task(local = [leds], shared=[blink_freq])]
async fn blinky(mut cx: blinky::Context) {
let leds = cx.local.leds;
loop {
leds.led1.toggle();
leds.led2.toggle();
let current_blink_freq = cx.shared.blink_freq.lock(|current| *current);
Systick::delay(current_blink_freq).await;
}
}
/// This task checks for the network link.
#[task(local=[link_led, phy], shared=[eth_link_up])]
async fn eth_link_check(mut cx: eth_link_check::Context) {
let phy = cx.local.phy;
let link_led = cx.local.link_led;
loop {
let link_was_up = cx.shared.eth_link_up.lock(|link_up| *link_up);
if phy.poll_link() {
if !link_was_up {
link_led.set_high();
cx.shared.eth_link_up.lock(|link_up| *link_up = true);
defmt::info!("Ethernet link up");
}
} else if link_was_up {
link_led.set_low();
cx.shared.eth_link_up.lock(|link_up| *link_up = false);
defmt::info!("Ethernet link down");
}
Systick::delay(100.millis()).await;
}
}
#[task(binds=ETH, local=[net], shared=[sockets])]
fn eth_isr(mut cx: eth_isr::Context) {
// SAFETY: We do not write the register mentioned inside the docs anywhere else.
unsafe {
ethernet::interrupt_handler();
}
// Check and process ETH frames and DHCP. UDP is checked in a different task.
cx.shared.sockets.lock(|sockets| {
cx.local.net.poll(sockets);
cx.local.net.poll_dhcp(sockets);
});
}
/// This task routes UDP packets.
#[task(local=[udp], shared=[sockets, shared_pool])]
async fn udp_task(mut cx: udp_task::Context) {
loop {
cx.shared.sockets.lock(|sockets| {
cx.shared.shared_pool.lock(|pool| {
cx.local.udp.poll(sockets, pool);
})
});
Systick::delay(40.millis()).await;
}
}
/// This task handles all the incoming telecommands.
#[task(local=[read_buf: [u8; 1024] = [0; 1024], tc_source_rx], shared=[shared_pool])]
async fn tc_source_task(mut cx: tc_source_task::Context) {
loop {
let recv_result = cx.local.tc_source_rx.recv().await;
match recv_result {
Ok(pool_addr) => {
cx.shared.shared_pool.lock(|pool| {
match pool.read(&pool_addr, cx.local.read_buf.as_mut()) {
Ok(packet_len) => {
defmt::info!("received {} bytes in the TC source task", packet_len);
match PusTcReader::new(&cx.local.read_buf[0..packet_len]) {
Ok((packet, _tc_len)) => {
// TODO: Handle packet here or dispatch to dedicated PUS
// handler? Dispatching could simplify some things and make
// the software more scalable..
defmt::info!("received PUS packet: {}", packet);
}
Err(e) => {
defmt::info!("invalid TC format, not a PUS packet: {}", e);
}
}
if let Err(e) = pool.delete(pool_addr) {
defmt::warn!("deleting TC data failed: {}", e);
}
}
Err(e) => {
defmt::warn!("TC packet read failed: {}", e);
}
}
});
}
Err(e) => {
defmt::warn!("TC source reception error: {}", e);
}
};
}
}
}

View File

@ -0,0 +1,16 @@
#![no_std]
#![no_main]
use stm32h7_testapp as _; // memory layout + panic handler
// See https://crates.io/crates/defmt-test/0.3.0 for more documentation (e.g. about the 'state'
// feature)
#[defmt_test::tests]
mod tests {
use defmt::assert;
#[test]
fn it_works() {
assert!(true)
}
}

View File

@ -0,0 +1,2 @@
/settings.json
/.cortex-debug.*

View File

@ -0,0 +1,12 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=827846 to learn about workspace recommendations.
// Extension identifier format: ${publisher}.${name}. Example: vscode.csharp
// List of extensions which should be recommended for users of this workspace.
"recommendations": [
"rust-lang.rust",
"probe-rs.probe-rs-debugger"
],
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
"unwantedRecommendations": []
}

View File

@ -0,0 +1,22 @@
{
"version": "0.2.0",
"configurations": [
{
"preLaunchTask": "${defaultBuildTask}",
"type": "probe-rs-debug",
"request": "launch",
"name": "probe-rs Debugging ",
"flashingConfig": {
"flashingEnabled": true
},
"chip": "STM32H743ZITx",
"coreConfigs": [
{
"programBinary": "${workspaceFolder}/target/thumbv7em-none-eabihf/debug/satrs-stm32h7-nucleo-rtic",
"rttEnabled": true,
"svdFile": "STM32H743.svd"
}
]
}
]
}

View File

@ -0,0 +1,20 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
"version": "2.0.0",
"tasks": [
{
"label": "cargo build",
"type": "shell",
"command": "~/.cargo/bin/cargo", // note: full path to the cargo
"args": [
"build"
],
"group": {
"kind": "build",
"isDefault": true
}
},
]
}

View File

@ -166,7 +166,7 @@ Subsystem<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:
<y:Geometry height="30.0" width="125.0" x="1151.9280499999995" y="281.84403125000006"/>
<y:Fill color="#CCFFFF" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="14" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="20.296875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="76.255859375" x="24.3720703125" xml:space="preserve" y="4.8515625">TM Funnel<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="14" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="20.296875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="58.837890625" x="33.0810546875" xml:space="preserve" y="4.8515625">TM Sink<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
</data>
@ -260,7 +260,7 @@ Mode Tree<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:
<y:Geometry height="57.265600000000006" width="631.1152" x="810.8847999999999" y="411.39428125"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="261.8125" x="166.89412267941418" xml:space="preserve" y="3.144146301369915">satrs-satellite
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="261.8125" x="166.89412267941418" xml:space="preserve" y="3.144146301369915">satrs-minisim
Simulator based on asynchronix<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="-0.028136269449041573" nodeRatioY="-0.08493150684931505" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>
@ -272,7 +272,7 @@ Simulator based on asynchronix<y:LabelModel><y:SmartNodeLabelModel distance="4.0
<y:Geometry height="50.0" width="631.1152000000002" x="810.8847999999998" y="476.2958625000002"/>
<y:Fill hasColor="false" transparent="false"/>
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="374.8359375" x="110.3824039294143" xml:space="preserve" y="0.12842465753431043">satrs-tmtc
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="374.8359375" x="110.3824039294143" xml:space="preserve" y="0.12842465753431043">pytmtc
Command-line interface based TMTC handling<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="-0.028136269449041573" nodeRatioY="-0.08493150684931505" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
<y:Shape type="rectangle"/>
</y:ShapeNode>

File diff suppressed because it is too large Load Diff

BIN
misc/satrs-logo-v2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

View File

@ -15,7 +15,7 @@ action commanding could look like.
2. Target ID and Action String based. The target ID is the same as in the first proposal, but
the unique action is identified by a string.
The framework provides an `ActionRequest` abstraction to model both of these cases.
The library provides an `ActionRequest` abstraction to model both of these cases.
## Commanding with ECSS PUS 8

View File

@ -17,10 +17,10 @@ it is still centered around small packets. `sat-rs` provides support for these E
standards and also attempts to fill the gap to the internet protocol by providing the following
components.
1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/host/udp_server/index.html).
1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/std/udp_server/index.html).
UDP is already packet based which makes it an excellent fit for exchanging space packets.
2. [TCP TMTC Server Components](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/index.html).
TCP is a stream based protocol, so the framework provides building blocks to parse telemetry
TCP is a stream based protocol, so the library provides building blocks to parse telemetry
from an arbitrary bytestream. Two concrete implementations are provided:
- [TCP spacepackets server](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/struct.TcpSpacepacketsServer.html)
to parse tightly packed CCSDS Spacepackets.
@ -39,8 +39,12 @@ task might be to store all arriving telemetry persistently. This is especially i
space systems which do not have permanent contact like low-earth-orbit (LEO) satellites.
The most important task of a TC source is to deliver the telecommands to the correct recipients.
For modern component oriented software using message passing, this usually includes staged
demultiplexing components to determine where a command needs to be sent.
For component oriented software using message passing, this usually includes staged demultiplexing
components to determine where a command needs to be sent.
Using a generic concept of a TC source and a TM sink as part of the software design simplifies
the flexibility of the TMTC infrastructure: Newly added TM generators and TC receiver only have to
forward their generated or received packets to those handler objects.
# Low-level protocols and the bridge to the communcation subsystem

View File

@ -1,13 +1,14 @@
# Framework Design
# Library Design
Satellites and space systems in general are complex systems with a wide range of requirements for
both the hardware and the software. Consequently, the general design of the framework is centered
both the hardware and the software. Consequently, the general design of the library is centered
around many light-weight components which try to impose as few restrictions as possible on how to
solve certain problems.
solve certain problems. This is also the reason why sat-rs is explicitely called a library
instead of a framework.
There are still a lot of common patterns and architectures across these systems where guidance
of how to solve a problem and a common structure would still be extremely useful to avoid pitfalls
which were already solved and to avoid boilerplate code. This framework tries to provide this
which were already solved and to avoid boilerplate code. This library tries to provide this
structure and guidance the following way:
1. Providing this book which explains the architecture and design patterns in respect to common
@ -18,7 +19,7 @@ structure and guidance the following way:
3. Providing a good test suite. This includes both unittests and integration tests. The integration
tests can also serve as smaller usage examples than the large `satrs-example` application.
This framework has special support for standards used in the space industry. This especially
This library has special support for standards used in the space industry. This especially
includes standards provided by Consultative Committee for Space Data Systems (CCSDS) and European
Cooperation for Space Standardization (ECSS). It does not enforce using any of those standards,
but it is always recommended to use some sort of standard for interoperability.
@ -30,10 +31,10 @@ Flying Laptop Project by the University of Stuttgart with Airbus Defence and Spa
It has flight heritage through the 2 mssions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/).
Therefore, a lot of the design concepts were ported more or less unchanged to the `sat-rs`
framework.
library.
FLP is a medium-size small satellite with a higher budget and longer development time than EIVE,
which allowed to build a highly reliable system while EIVE is a smaller 6U+ cubesat which had a
shorter development cycle and was built using cheaper COTS components. This framework also tries
shorter development cycle and was built using cheaper COTS components. This library also tries
to accumulate the knowledge of developing the OBSW and operating the satellite for both these
different systems and provide a solution for a wider range of small satellite systems.

View File

@ -1,16 +1,24 @@
# Events
Events can be an extremely important mechanism used for remote systems to monitor unexpected
or expected anomalies and events occuring on these systems. They are oftentimes tied to
Events are an important mechanism used for remote systems to monitor unexpected
or expected anomalies and events occuring on these systems.
One common use case for events on remote systems is to offer a light-weight publish-subscribe
mechanism and IPC mechanism for software and hardware events which are also packaged as telemetry
(TM) or can trigger a system response. They can also be tied to
Fault Detection, Isolation and Recovery (FDIR) operations, which need to happen autonomously.
Events can also be used as a convenient Inter-Process Communication (IPC) mechansism, which is
also observable for the Ground segment. The PUS Service 5 standardizes how the ground interface
for events might look like, but does not specify how other software components might react
to those events. There is the PUS Service 19, which might be used for that purpose, but the
event components recommended by this framework do not really need this service.
The PUS Service 5 standardizes how the ground interface for events might look like, but does not
specify how other software components might react to those events. There is the PUS Service 19,
which might be used for that purpose, but the event components recommended by this framework do not
rely on the present of this service.
The following images shows how the flow of events could look like in a system where components
can generate events, and where other system components might be interested in those events:
![Event flow](images/events/event_man_arch.png)
For the concrete implementation of your own event management and/or event routing system, you
can have a look at the event management documentation inside the
[API documentation](https://docs.rs/satrs/latest/satrs/event_man/index.html) where you can also
find references to all examples.

View File

@ -1,6 +1,6 @@
# sat-rs Example Application
The `sat-rs` framework includes a monolithic example application which can be found inside
The `sat-rs` library includes a monolithic example application which can be found inside
the [`satrs-example`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example)
subdirectory of the repository. The primary purpose of this example application is to show how
the various components of the sat-rs framework could be used as part of a larger on-board

View File

@ -1,7 +1,7 @@
The sat-rs book
======
This book is the primary information resource for the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
This book is the primary information resource for the [sat-rs library](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
in addition to the regular API documentation. It contains the following resources:
1. Architecture informations and consideration which would exceeds the scope of the regular API.
@ -12,10 +12,15 @@ in addition to the regular API documentation. It contains the following resource
# Introduction
The primary goal of the sat-rs framework is to provide re-usable components
The primary goal of the sat-rs library is to provide re-usable components
to write on-board software for remote systems like rovers or satellites. It is specifically written
for the special requirements for these systems.
It should be noted that sat-rs is early-stage software. Important features are missing. New releases
with breaking changes are released regularly, with all changes documented inside respective
changelog files. You should only use this library if your are willing to work in this
environment.
A lot of the architecture and general design considerations are based on the
[FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage
through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
@ -27,3 +32,14 @@ The [`satrs-example`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/m
provides various practical usage examples of the `sat-rs` framework. If you are more interested in
the practical application of `sat-rs` inside an application, it is recommended to have a look at
the example application.
# Flight Heritage
There is an active and continuous effort to get early flight heritage for the sat-rs library.
Currently this library has the following flight heritage:
- Submission as an [OPS-SAT experiment](https://www.esa.int/Enabling_Support/Operations/OPS-SAT)
which has also
[flown on the satellite](https://blogs.esa.int/rocketscience/2024/05/21/ops-sat-reentry-tomorrow-final-experiments-continue/).
The application is strongly based on the sat-rs example application. You can find the repository
of the experiment [here](https://egit.irs.uni-stuttgart.de/rust/ops-sat-rs).

View File

@ -1,11 +1,11 @@
# Modes
Modes are an extremely useful concept for complex system in general. They also allow simplified
system reasoning for both system operators and OBSW developers. They model the behaviour of a
component and also provide observability of a system. A few examples of how to model
different components of a space system with modes will be given.
Modes are an extremely useful concept to model complex systems. They allow simplified
system reasoning for both system operators and OBSW developers. They also provide a way to alter
the behaviour of a component and also provide observability of a system. A few examples of how to
model the mode of different components within a space system with modes will be given.
## Modelling a pyhsical devices with modes
## Pyhsical device component with modes
The following simple mode scheme with the following three mode
@ -13,7 +13,8 @@ The following simple mode scheme with the following three mode
- `ON`
- `NORMAL`
can be applied to a large number of simpler devices of a remote system, for example sensors.
can be applied to a large number of simpler device controllers of a remote system, for example
sensors.
1. `OFF` means that a device is physically switched off, and the corresponding software component
does not poll the device regularly.
@ -31,7 +32,7 @@ for the majority of devices:
2. `NORMAL` or `ON` to `OFF`: Any important shutdown configuration or handling must be performed
before powering off the device.
## Modelling a controller with modes
## Controller components with modes
Controller components are not modelling physical devices, but a mode scheme is still the best
way to model most of these components.

View File

@ -1,66 +0,0 @@
{
/*
* Requires the Rust Language Server (RLS) and Cortex-Debug extensions
* https://marketplace.visualstudio.com/items?itemName=rust-lang.rust
* https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug
*/
"version": "0.2.0",
"configurations": [
{
/* Launches debug session for currently open example */
"type": "cortex-debug",
"request": "launch",
"name": "Debug",
"servertype": "openocd",
"cwd": "${workspaceRoot}",
"preLaunchTask": "cargo build",
"runToEntryPoint": "true",
"executable": "./target/thumbv7em-none-eabihf/debug/satrs-example-stm32f3-disco",
"preLaunchCommands": ["break rust_begin_unwind"],
"device": "STM32F303VCT6",
"configFiles": [
"${workspaceRoot}/.vscode/openocd-helpers.tcl",
"interface/stlink.cfg",
"target/stm32f3x.cfg"
],
"svdFile": "${env:HOME}/.svd/STM32F303.svd",
"swoConfig": {
"enabled": true,
"cpuFrequency": 8000000,
"swoFrequency": 2000000,
"source": "probe",
"decoders": [
{ "type": "console", "label": "ITM", "port": 0 }
]
}
},
{
/* Launches debug session for currently open example */
"type": "cortex-debug",
"request": "launch",
"name": "Release",
"servertype": "openocd",
"cwd": "${workspaceRoot}",
"preLaunchTask": "cargo build",
"runToEntryPoint": "true",
"executable": "./target/thumbv7em-none-eabihf/release/satrs-example-stm32f3-disco",
"preLaunchCommands": ["break rust_begin_unwind"],
"device": "STM32F303VCT6",
"configFiles": [
"${workspaceRoot}/.vscode/openocd-helpers.tcl",
"interface/stlink.cfg",
"target/stm32f3x.cfg"
],
"svdFile": "${env:HOME}/.svd/STM32F303.svd",
"swoConfig": {
"enabled": true,
"cpuFrequency": 8000000,
"swoFrequency": 2000000,
"source": "probe",
"decoders": [
{ "type": "console", "label": "ITM", "port": 0 }
]
}
}
]
}

View File

@ -1,3 +0,0 @@
{
"cortex-debug.gdbPath.linux": "gdb-multiarch"
}

View File

@ -1,59 +0,0 @@
[package]
name = "satrs-example-stm32f3-disco"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
cortex-m = "0.7"
cortex-m-rt = "0.7"
embedded-hal = "0.2.6"
cortex-m-rtic = "1.0"
enumset = "1.0"
heapless = "0.7"
systick-monotonic = "1.0"
[dependencies.cobs]
git = "https://github.com/robamu/cobs.rs.git"
branch = "all_features"
default-features = false
[dependencies.panic-itm]
version = "0.4"
[dependencies.itm_logger]
git = "https://github.com/robamu/itm_logger.rs.git"
branch = "all_features"
version = "0.1.3-alpha.0"
[dependencies.stm32f3xx-hal]
git = "https://github.com/robamu/stm32f3xx-hal"
version = "0.10.0-alpha.0"
features = ["stm32f303xc", "rt", "enumset"]
branch = "all_features"
# Can be used in workspace to develop and update HAL
# path = "../stm32f3xx-hal"
[dependencies.stm32f3-discovery]
git = "https://github.com/robamu/stm32f3-discovery"
version = "0.8.0-alpha.0"
branch = "all_features"
# Can be used in workspace to develop and update BSP
# path = "../stm32f3-discovery"
[dependencies.satrs-core]
git = "https://egit.irs.uni-stuttgart.de/rust/satrs-core.git"
version = "0.1.0-alpha.0"
default-features = false
# this lets you use `cargo fix`!
# [[bin]]
# name = "stm32f3-blinky"
# test = false
# bench = false
[profile.release]
codegen-units = 1 # better optimizations
debug = true # symbols are nice and they don't increase the size on Flash
lto = true # better optimizations

View File

@ -1,75 +0,0 @@
sat-rs example for the STM32F3-Discovery board
=======
This example application shows how the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad)
can be used on an embedded target. It also shows how a relatively simple OBSW could be built when no
standard runtime is available. It uses [RTIC](https://rtic.rs/1/book/en/) as the concurrency
framework.
The STM32F3-Discovery device was picked because it is a cheap Cortex-M4 based device which is also
used by the [Rust Embedded Book](https://docs.rust-embedded.org/book/intro/hardware.html) and the
[Rust Discovery](https://docs.rust-embedded.org/discovery/f3discovery/) book as an introduction
to embedded Rust.
## Preparing Rust and the repository
Building an application requires the `thumbv7em-none-eabihf` cross-compiler toolchain.
If you have not installed it yet, you can do so with
```sh
rustup target add thumbv7em-none-eabihf
```
A default `.cargo` config file is provided for this project, but needs to be copied to have
the correct name. This is so that the config file can be updated or edited for custom needs
without being tracked by git.
```sh
cp def_config.toml config.toml
```
The configuration file will also set the target so it does not always have to be specified with
the `--target` argument.
## Building
After that, assuming that you have a `.cargo/config.toml` setting the correct build target,
you can simply build the application with
```sh
cargo build
```
## Flashing and Debugging from the command line
TODO
## Debugging with VS Code
The STM32F3-Discovery comes with an on-board ST-Link so all that is required to flash and debug
the board is a Mini-USB cable. The code in this repository was debugged using `openocd`
and the VS Code [`Cortex-Debug` plugin](https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug).
Some sample configuration files for VS Code were provided as well. You can simply use `Run` and `Debug`
to automatically rebuild and flash your application.
The `tasks.json` and `launch.json` files are generic and you can use them immediately by opening
the folder in VS code or adding it to a workspace.
If you would like to use a custom GDB application, you can specify the gdb binary in the following
configuration variables in your `settings.json`:
- `"cortex-debug.gdbPath"`
- `"cortex-debug.gdbPath.linux"`
- `"cortex-debug.gdbPath.windows"`
- `"cortex-debug.gdbPath.osx"`
## Commanding with Python
When the SW is running on the Discovery board, you can command the MCU via a serial interface,
using COBS encoded CCSDS packets.
TODO:
- How and where to connect serial interface on the MCU
- How to set up Python venv (or at least strongly recommend it) and install deps
- How to copy `def_tmtc_conf.json` to `tmtc_conf.json` and adapt it for custom serial port

View File

@ -1,18 +0,0 @@
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
fn main() {
// Put the linker script somewhere the linker can find it
let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
File::create(out.join("memory.x"))
.unwrap()
.write_all(include_bytes!("memory.x"))
.unwrap();
println!("cargo:rustc-link-search={}", out.display());
// Only re-run the build script when memory.x is changed,
// instead of when any part of the source code changes.
println!("cargo:rerun-if-changed=memory.x");
}

View File

@ -1,604 +0,0 @@
#![no_std]
#![no_main]
extern crate panic_itm;
use rtic::app;
use heapless::{
mpmc::Q16,
pool,
pool::singleton::{Box, Pool},
};
#[allow(unused_imports)]
use itm_logger::{debug, info, logger_init, warn};
use satrs_core::spacepackets::{ecss::PusPacket, tm::PusTm};
use satrs_core::{
pus::{EcssTmErrorWithSend, EcssTmSenderCore},
seq_count::SequenceCountProviderCore,
};
use stm32f3xx_hal::dma::dma1;
use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3};
use stm32f3xx_hal::pac::USART2;
use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent};
use systick_monotonic::{fugit::Duration, Systick};
const UART_BAUD: u32 = 115200;
const BLINK_FREQ_MS: u64 = 1000;
const TX_HANDLER_FREQ_MS: u64 = 20;
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u16 = 5;
const MAX_TC_LEN: usize = 200;
const MAX_TM_LEN: usize = 200;
pub const PUS_APID: u16 = 0x02;
type TxType = Tx<USART2, PA2<AF7<PushPull>>>;
type RxType = Rx<USART2, PA3<AF7<PushPull>>>;
type MsDuration = Duration<u64, 1, 1000>;
type TxDmaTransferType = SerialDmaTx<&'static [u8], dma1::C7, TxType>;
type RxDmaTransferType = SerialDmaRx<&'static mut [u8], dma1::C6, RxType>;
// This is the predictable maximum overhead of the COBS encoding scheme.
// It is simply the maximum packet lenght dividied by 254 rounded up.
const COBS_TC_OVERHEAD: usize = (MAX_TC_LEN + 254 - 1) / 254;
const COBS_TM_OVERHEAD: usize = (MAX_TM_LEN + 254 - 1) / 254;
const TC_BUF_LEN: usize = MAX_TC_LEN + COBS_TC_OVERHEAD;
const TM_BUF_LEN: usize = MAX_TC_LEN + COBS_TM_OVERHEAD;
// This is a static buffer which should ONLY (!) be used as the TX DMA
// transfer buffer.
static mut DMA_TX_BUF: [u8; TM_BUF_LEN] = [0; TM_BUF_LEN];
// This is a static buffer which should ONLY (!) be used as the RX DMA
// transfer buffer.
static mut DMA_RX_BUF: [u8; TC_BUF_LEN] = [0; TC_BUF_LEN];
static TX_REQUESTS: Q16<(Box<poolmod::TM>, usize)> = Q16::new();
const TC_POOL_SLOTS: usize = 12;
const TM_POOL_SLOTS: usize = 12;
use core::sync::atomic::{AtomicU16, Ordering};
pub struct SeqCountProviderAtomicRef {
atomic: AtomicU16,
ordering: Ordering,
}
impl SeqCountProviderAtomicRef {
pub const fn new(ordering: Ordering) -> Self {
Self {
atomic: AtomicU16::new(0),
ordering,
}
}
}
impl SequenceCountProviderCore<u16> for SeqCountProviderAtomicRef {
fn get(&self) -> u16 {
self.atomic.load(self.ordering)
}
fn increment(&self) {
self.atomic.fetch_add(1, self.ordering);
}
fn get_and_increment(&self) -> u16 {
self.atomic.fetch_add(1, self.ordering)
}
}
static SEQ_COUNT_PROVIDER: SeqCountProviderAtomicRef =
SeqCountProviderAtomicRef::new(Ordering::Relaxed);
// Otherwise, warnings because of heapless pool macro.
#[allow(non_camel_case_types)]
mod poolmod {
use super::*;
// Must hold full TC length including COBS overhead.
pool!(TC: [u8; TC_BUF_LEN]);
// Only encoded at the end, so no need to account for COBS overhead.
pool!(TM: [u8; MAX_TM_LEN]);
}
pub struct TxIdle {
tx: TxType,
dma_channel: dma1::C7,
}
#[derive(Debug)]
pub enum TmStoreError {
StoreFull,
StoreSlotsTooSmall,
}
impl From<TmStoreError> for EcssTmErrorWithSend<TmStoreError> {
fn from(value: TmStoreError) -> Self {
Self::SendError(value)
}
}
pub struct TmSender {
mem_block: Option<Box<poolmod::TM>>,
ctx: &'static str,
}
impl TmSender {
pub fn new(mem_block: Box<poolmod::TM>, ctx: &'static str) -> Self {
Self {
mem_block: Some(mem_block),
ctx,
}
}
}
impl EcssTmSenderCore for TmSender {
type Error = TmStoreError;
fn send_tm(
&mut self,
tm: PusTm,
) -> Result<(), satrs_core::pus::EcssTmErrorWithSend<Self::Error>> {
let mem_block = self.mem_block.take();
if mem_block.is_none() {
panic!("send_tm should only be called once");
}
let mut mem_block = mem_block.unwrap();
if tm.len_packed() > MAX_TM_LEN {
return Err(EcssTmErrorWithSend::SendError(
TmStoreError::StoreSlotsTooSmall,
));
}
tm.write_to_bytes(mem_block.as_mut_slice())
.map_err(|e| EcssTmErrorWithSend::EcssTmError(e.into()))?;
info!(target: self.ctx, "Sending TM[{},{}] with size {}", tm.service(), tm.subservice(), tm.len_packed());
TX_REQUESTS
.enqueue((mem_block, tm.len_packed()))
.map_err(|_| TmStoreError::StoreFull)?;
Ok(())
}
}
pub enum UartTxState {
// Wrapped in an option because we need an owned type later.
Idle(Option<TxIdle>),
// Same as above
Transmitting(Option<TxDmaTransferType>),
}
#[app(device = stm32f3xx_hal::pac, peripherals = true, dispatchers = [TIM20_BRK, TIM20_UP, TIM20_TRG_COM])]
mod app {
use super::*;
use core::slice::Iter;
use cortex_m::iprintln;
use satrs_core::pus::verification::FailParams;
use satrs_core::pus::verification::VerificationReporterCore;
use satrs_core::spacepackets::{
ecss::EcssEnumU16,
tc::PusTc,
time::cds::P_FIELD_BASE,
tm::{PusTm, PusTmSecondaryHeader},
CcsdsPacket, SpHeader,
};
#[allow(unused_imports)]
use stm32f3_discovery::leds::Direction;
use stm32f3_discovery::leds::Leds;
use stm32f3xx_hal::prelude::*;
use stm32f3xx_hal::Toggle;
use stm32f3_discovery::switch_hal::OutputSwitch;
#[allow(dead_code)]
type SerialType = Serial<USART2, (PA2<AF7<PushPull>>, PA3<AF7<PushPull>>)>;
#[shared]
struct Shared {
tx_transfer: UartTxState,
rx_transfer: Option<RxDmaTransferType>,
}
#[local]
struct Local {
leds: Leds,
last_dir: Direction,
verif_reporter: VerificationReporterCore,
curr_dir: Iter<'static, Direction>,
}
#[monotonic(binds = SysTick, default = true)]
type MonoTimer = Systick<1000>;
#[init(local = [
tc_pool_mem: [u8; TC_BUF_LEN * TC_POOL_SLOTS] = [0; TC_BUF_LEN * TC_POOL_SLOTS],
tm_pool_mem: [u8; MAX_TM_LEN * TM_POOL_SLOTS] = [0; MAX_TM_LEN * TM_POOL_SLOTS]
])]
fn init(mut cx: init::Context) -> (Shared, Local, init::Monotonics) {
let mut rcc = cx.device.RCC.constrain();
let mono = Systick::new(cx.core.SYST, 8_000_000);
logger_init();
let mut flash = cx.device.FLASH.constrain();
let clocks = rcc
.cfgr
.use_hse(8.MHz())
.sysclk(8.MHz())
.pclk1(8.MHz())
.freeze(&mut flash.acr);
// setup ITM output
iprintln!(
&mut cx.core.ITM.stim[0],
"Starting sat-rs demo application for the STM32F3-Discovery"
);
let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb);
// Assign memory to the pools.
poolmod::TC::grow(cx.local.tc_pool_mem);
poolmod::TM::grow(cx.local.tm_pool_mem);
let verif_reporter = VerificationReporterCore::new(PUS_APID).unwrap();
let leds = Leds::new(
gpioe.pe8,
gpioe.pe9,
gpioe.pe10,
gpioe.pe11,
gpioe.pe12,
gpioe.pe13,
gpioe.pe14,
gpioe.pe15,
&mut gpioe.moder,
&mut gpioe.otyper,
);
let mut gpioa = cx.device.GPIOA.split(&mut rcc.ahb);
// USART2 pins
let mut pins = (
// TX pin: PA2
gpioa
.pa2
.into_af_push_pull(&mut gpioa.moder, &mut gpioa.otyper, &mut gpioa.afrl),
// RX pin: PA3
gpioa
.pa3
.into_af_push_pull(&mut gpioa.moder, &mut gpioa.otyper, &mut gpioa.afrl),
);
pins.1.internal_pull_up(&mut gpioa.pupdr, true);
let mut usart2 = Serial::new(
cx.device.USART2,
pins,
UART_BAUD.Bd(),
clocks,
&mut rcc.apb1,
);
usart2.configure_rx_interrupt(RxEvent::Idle, Toggle::On);
// This interrupt is enabled to re-schedule new transfers in the interrupt handler immediately.
usart2.configure_tx_interrupt(TxEvent::TransmissionComplete, Toggle::On);
let dma1 = cx.device.DMA1.split(&mut rcc.ahb);
let (tx_serial, mut rx_serial) = usart2.split();
// This interrupt is immediately triggered, clear it. It will only be reset
// by the hardware when data is received on RX (RXNE event)
rx_serial.clear_event(RxEvent::Idle);
let rx_transfer = rx_serial.read_exact(unsafe { DMA_RX_BUF.as_mut_slice() }, dma1.ch6);
info!(target: "init", "Spawning tasks");
blink::spawn().unwrap();
serial_tx_handler::spawn().unwrap();
(
Shared {
tx_transfer: UartTxState::Idle(Some(TxIdle {
tx: tx_serial,
dma_channel: dma1.ch7,
})),
rx_transfer: Some(rx_transfer),
},
Local {
leds,
last_dir: Direction::North,
curr_dir: Direction::iter(),
verif_reporter,
},
init::Monotonics(mono),
)
}
#[task(local = [leds, curr_dir, last_dir])]
fn blink(cx: blink::Context) {
let toggle_leds = |dir: &Direction| {
let leds = cx.local.leds;
let last_led = leds.for_direction(*cx.local.last_dir);
last_led.off().ok();
let led = leds.for_direction(*dir);
led.on().ok();
*cx.local.last_dir = *dir;
};
match cx.local.curr_dir.next() {
Some(dir) => {
toggle_leds(dir);
}
None => {
*cx.local.curr_dir = Direction::iter();
toggle_leds(cx.local.curr_dir.next().unwrap());
}
}
blink::spawn_after(MsDuration::from_ticks(BLINK_FREQ_MS)).unwrap();
}
#[task(
shared = [tx_transfer],
local = []
)]
fn serial_tx_handler(mut cx: serial_tx_handler::Context) {
if let Some((buf, len)) = TX_REQUESTS.dequeue() {
cx.shared.tx_transfer.lock(|tx_state| match tx_state {
UartTxState::Idle(tx) => {
//debug!(target: "serial_tx_handler", "bytes: {:x?}", &buf[0..len]);
// Safety: We only copy the data into the TX DMA buffer in this task.
// If the DMA is active, another branch will be taken.
let mut_tx_dma_buf = unsafe { &mut DMA_TX_BUF };
// 0 sentinel value as start marker
mut_tx_dma_buf[0] = 0;
// Should never panic, we accounted for the overhead.
// Write into transfer buffer directly, no need for intermediate
// encoding buffer.
let encoded_len = cobs::encode(&buf[0..len], &mut mut_tx_dma_buf[1..]);
// 0 end marker
mut_tx_dma_buf[encoded_len + 1] = 0;
//debug!(target: "serial_tx_handler", "Sending {} bytes", encoded_len + 2);
//debug!("sent: {:x?}", &mut_tx_dma_buf[0..encoded_len + 2]);
let tx_idle = tx.take().unwrap();
// Transfer completion and re-scheduling of new TX transfers will be done
// by the IRQ handler.
let transfer = tx_idle
.tx
.write_all(&mut_tx_dma_buf[0..encoded_len + 2], tx_idle.dma_channel);
*tx_state = UartTxState::Transmitting(Some(transfer));
// The memory block is automatically returned to the pool when it is dropped.
}
UartTxState::Transmitting(_) => {
// This is a SW configuration error. Only the ISR which
// detects transfer completion should be able to spawn a new
// task, and that ISR should set the state to IDLE.
panic!("invalid internal tx state detected")
}
})
} else {
cx.shared.tx_transfer.lock(|tx_state| {
if let UartTxState::Idle(_) = tx_state {
serial_tx_handler::spawn_after(MsDuration::from_ticks(TX_HANDLER_FREQ_MS))
.unwrap();
}
});
}
}
#[task(
local = [
stamp_buf: [u8; 7] = [0; 7],
decode_buf: [u8; MAX_TC_LEN] = [0; MAX_TC_LEN],
src_data_buf: [u8; MAX_TM_LEN] = [0; MAX_TM_LEN],
verif_reporter
],
)]
fn serial_rx_handler(
cx: serial_rx_handler::Context,
received_packet: Box<poolmod::TC>,
rx_len: usize,
) {
let tgt: &'static str = "serial_rx_handler";
cx.local.stamp_buf[0] = P_FIELD_BASE;
info!(target: tgt, "Received packet with {} bytes", rx_len);
let decode_buf = cx.local.decode_buf;
let packet = received_packet.as_slice();
let mut start_idx = None;
for (idx, byte) in packet.iter().enumerate() {
if *byte != 0 {
start_idx = Some(idx);
break;
}
}
if start_idx.is_none() {
warn!(
target: tgt,
"decoding error, can only process cobs encoded frames, data is all 0"
);
return;
}
let start_idx = start_idx.unwrap();
match cobs::decode(&received_packet.as_slice()[start_idx..], decode_buf) {
Ok(len) => {
info!(target: tgt, "Decoded packet length: {}", len);
let pus_tc = PusTc::from_bytes(decode_buf);
let verif_reporter = cx.local.verif_reporter;
match pus_tc {
Ok((tc, tc_len)) => handle_tc(
tc,
tc_len,
verif_reporter,
cx.local.src_data_buf,
cx.local.stamp_buf,
tgt,
),
Err(e) => {
warn!(target: tgt, "Error unpacking PUS TC: {}", e);
}
}
}
Err(_) => {
warn!(
target: tgt,
"decoding error, can only process cobs encoded frames"
)
}
}
}
fn handle_tc(
tc: PusTc,
tc_len: usize,
verif_reporter: &mut VerificationReporterCore,
src_data_buf: &mut [u8; MAX_TM_LEN],
stamp_buf: &[u8; 7],
tgt: &'static str,
) {
info!(
target: tgt,
"Found PUS TC [{},{}] with length {}",
tc.service(),
tc.subservice(),
tc_len
);
let token = verif_reporter.add_tc(&tc);
if tc.apid() != PUS_APID {
warn!(target: tgt, "Received tc with unknown APID {}", tc.apid());
let sendable = verif_reporter
.acceptance_failure(
src_data_buf,
token,
&SEQ_COUNT_PROVIDER,
FailParams::new(stamp_buf, &EcssEnumU16::new(0), None),
)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
if let Err(e) =
verif_reporter.send_acceptance_failure(sendable, &SEQ_COUNT_PROVIDER, &mut sender)
{
warn!(target: tgt, "Sending acceptance failure failed: {:?}", e.0);
};
return;
}
let sendable = verif_reporter
.acceptance_success(src_data_buf, token, &SEQ_COUNT_PROVIDER, stamp_buf)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
let accepted_token = match verif_reporter.send_acceptance_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
Ok(token) => token,
Err(e) => {
warn!(target: "serial_rx_handler", "Sending acceptance success failed: {:?}", e.0);
return;
}
};
if tc.service() == 17 {
if tc.subservice() == 1 {
let sendable = verif_reporter
.start_success(src_data_buf, accepted_token, &SEQ_COUNT_PROVIDER, stamp_buf)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
let started_token = match verif_reporter.send_start_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
Ok(token) => token,
Err(e) => {
warn!(target: tgt, "Sending acceptance success failed: {:?}", e.0);
return;
}
};
info!(
target: tgt,
"Received PUS ping telecommand, sending ping reply TM[17,2]"
);
let mut sp_header =
SpHeader::tc_unseg(PUS_APID, SEQ_COUNT_PROVIDER.get(), 0).unwrap();
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, stamp_buf);
let ping_reply = PusTm::new(&mut sp_header, sec_header, None, true);
let mut mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let reply_len = ping_reply.write_to_bytes(mem_block.as_mut_slice()).unwrap();
if TX_REQUESTS.enqueue((mem_block, reply_len)).is_err() {
warn!(target: tgt, "TC queue full");
return;
}
SEQ_COUNT_PROVIDER.increment();
let sendable = verif_reporter
.completion_success(src_data_buf, started_token, &SEQ_COUNT_PROVIDER, stamp_buf)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
if let Err(e) = verif_reporter.send_step_or_completion_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
warn!(target: tgt, "Sending completion success failed: {:?}", e.0);
}
} else {
// TODO: Invalid subservice
}
}
}
#[task(binds = DMA1_CH6, shared = [rx_transfer])]
fn rx_dma_isr(mut cx: rx_dma_isr::Context) {
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_ref = rx_transfer.as_ref().unwrap();
if rx_ref.is_complete() {
let uart_rx_owned = rx_transfer.take().unwrap();
let (buf, c, rx) = uart_rx_owned.stop();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a memory pool here
// to do this.
let mut mem_block = poolmod::TC::alloc()
.expect("allocating memory block for rx failed")
.init([0u8; TC_BUF_LEN]);
// Copy data into memory pool.
mem_block.copy_from_slice(buf);
*rx_transfer = Some(rx.read_exact(buf, c));
// Only send owning pointer to pool memory and the received packet length.
serial_rx_handler::spawn(mem_block, TC_BUF_LEN)
.expect("spawning rx handler task failed");
// If this happens, there is a high chance that the maximum packet length was
// exceeded. Circular mode is not used here, so data might be missed.
warn!(
"rx transfer with maximum length {}, might miss data",
TC_BUF_LEN
);
}
});
}
#[task(binds = USART2_EXTI26, shared = [rx_transfer, tx_transfer])]
fn serial_isr(mut cx: serial_isr::Context) {
cx.shared.tx_transfer.lock(|tx_state| match tx_state {
UartTxState::Idle(_) => (),
UartTxState::Transmitting(transfer) => {
let transfer_ref = transfer.as_ref().unwrap();
if transfer_ref.is_complete() {
let transfer = transfer.take().unwrap();
let (_, dma_channel, tx) = transfer.stop();
*tx_state = UartTxState::Idle(Some(TxIdle { tx, dma_channel }));
serial_tx_handler::spawn_after(MsDuration::from_ticks(
MIN_DELAY_BETWEEN_TX_PACKETS_MS.into(),
))
.unwrap();
}
}
});
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_transfer_ref = rx_transfer.as_ref().unwrap();
// Received a partial packet.
if rx_transfer_ref.is_event_triggered(RxEvent::Idle) {
let rx_transfer_owned = rx_transfer.take().unwrap();
let (buf, ch, mut rx, rx_len) = rx_transfer_owned.stop_and_return_received_bytes();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a memory pool here
// to do this.
let mut mem_block = poolmod::TC::alloc()
.expect("allocating memory block for rx failed")
.init([0u8; TC_BUF_LEN]);
// Copy data into memory pool.
mem_block[0..rx_len as usize].copy_from_slice(&buf[0..rx_len as usize]);
rx.clear_event(RxEvent::Idle);
// Only send owning pointer to pool memory and the received packet length.
serial_rx_handler::spawn(mem_block, rx_len as usize)
.expect("spawning rx handler task failed");
*rx_transfer = Some(rx.read_exact(buf, ch));
}
});
}
}

View File

@ -17,16 +17,23 @@ zerocopy = "0.6"
csv = "1"
num_enum = "0.7"
thiserror = "1"
lazy_static = "1"
strum = { version = "0.26", features = ["derive"] }
derive-new = "0.5"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
[dependencies.satrs]
version = "0.2.0-rc.0"
# path = "../satrs"
path = "../satrs"
features = ["test_util"]
[dependencies.satrs-mib]
version = "0.1.1"
# path = "../satrs-mib"
path = "../satrs-mib"
[features]
dyn_tmtc = []
default = ["dyn_tmtc"]
[dev-dependencies]
env_logger = "0.11"

View File

@ -4,11 +4,13 @@ import dataclasses
import enum
import struct
from spacepackets.ecss.tc import PacketId, PacketType
EXAMPLE_PUS_APID = 0x02
EXAMPLE_PUS_PACKET_ID_TM = PacketId(PacketType.TM, True, EXAMPLE_PUS_APID)
TM_PACKET_IDS = [EXAMPLE_PUS_PACKET_ID_TM]
class Apid(enum.IntEnum):
SCHED = 1
GENERIC_PUS = 2
ACS = 3
CFDP = 4
TMTC = 5
class EventSeverity(enum.IntEnum):
@ -36,8 +38,8 @@ class EventU32:
)
class RequestTargetId(enum.IntEnum):
ACS = 1
class AcsId(enum.IntEnum):
MGM_0 = 0
class AcsHkIds(enum.IntEnum):

View File

@ -3,10 +3,11 @@
import logging
import sys
import time
from typing import Optional
from typing import Any, Optional
from prompt_toolkit.history import History
from prompt_toolkit.history import FileHistory
from spacepackets.ccsds import PacketId, PacketType
import tmtccmd
from spacepackets.ecss import PusTelemetry, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm
@ -16,7 +17,7 @@ from spacepackets.ccsds.time import CdsShortTimestamp
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest
from tmtccmd.pus import VerificationWrapper
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.tmtc import CcsdsTmHandler, GenericApidHandlerBase
from tmtccmd.com import ComInterface
from tmtccmd.config import (
CmdTreeNode,
@ -46,7 +47,7 @@ from tmtccmd.util.obj_id import ObjectIdDictT
import pus_tc
from common import EXAMPLE_PUS_APID, TM_PACKET_IDS, EventU32
from common import Apid, EventU32
_LOGGER = logging.getLogger()
@ -62,10 +63,13 @@ class SatRsConfigHook(HookBase):
)
assert self.cfg_path is not None
packet_id_list = []
for apid in Apid:
packet_id_list.append(PacketId(PacketType.TM, True, apid))
cfg = create_com_interface_cfg_default(
com_if_key=com_if_key,
json_cfg_path=self.cfg_path,
space_packet_ids=TM_PACKET_IDS,
space_packet_ids=packet_id_list,
)
assert cfg is not None
return create_com_interface_default(cfg)
@ -85,21 +89,23 @@ class SatRsConfigHook(HookBase):
return get_core_object_ids()
class PusHandler(SpecificApidHandlerBase):
class PusHandler(GenericApidHandlerBase):
def __init__(
self,
file_logger: logging.Logger,
verif_wrapper: VerificationWrapper,
raw_logger: RawTmtcTimedLogWrapper,
):
super().__init__(EXAMPLE_PUS_APID, None)
super().__init__(None)
self.file_logger = file_logger
self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: any):
def handle_tm(self, apid: int, packet: bytes, _user_args: Any):
try:
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
pus_tm = PusTelemetry.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
except ValueError as e:
_LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
@ -107,7 +113,7 @@ class PusHandler(SpecificApidHandlerBase):
service = pus_tm.service
if service == 1:
tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.empty(), 1, 2)
data=packet, params=UnpackParams(CdsShortTimestamp.TIMESTAMP_SIZE, 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
@ -124,7 +130,9 @@ class PusHandler(SpecificApidHandlerBase):
elif service == 3:
_LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
pus_tm = PusTelemetry.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
@ -132,16 +140,18 @@ class PusHandler(SpecificApidHandlerBase):
_LOGGER.info(json_str)
elif service == 5:
tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty()
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
src_data = tm_packet.source_data
event_u32 = EventU32.unpack(src_data)
_LOGGER.info(f"Received event packet. Event: {event_u32}")
_LOGGER.info(
f"Received event packet. Source APID: {Apid(tm_packet.apid)!r}, Event: {event_u32}"
)
if event_u32.group_id == 0 and event_u32.unique_id == 0:
_LOGGER.info("Received test event")
elif service == 17:
tm_packet = Service17Tm.unpack(
packet, time_reader=CdsShortTimestamp.empty()
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
if tm_packet.subservice == 2:
self.file_logger.info("Received Ping Reply TM[17,2]")
@ -158,7 +168,7 @@ class PusHandler(SpecificApidHandlerBase):
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTelemetry.unpack(
packet, time_reader=CdsShortTimestamp.empty()
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
self.raw_logger.log_tm(pus_tm)
@ -177,7 +187,7 @@ class TcHandler(TcHandlerBase):
tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE,
seq_cnt_provider=seq_count_provider,
pus_verificator=self.verif_wrapper.pus_verificator,
default_pus_apid=EXAMPLE_PUS_APID,
default_pus_apid=None,
)
def send_cb(self, send_params: SendCbParams):
@ -193,15 +203,15 @@ class TcHandler(TcHandlerBase):
_LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, info: ProcedureWrapper):
if info.proc_type == TcProcedureType.DEFAULT:
def_proc = info.to_def_procedure()
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper
if info.proc_type == TcProcedureType.DEFAULT:
def_proc = info.to_def_procedure()
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
assert def_proc.cmd_path is not None
pus_tc.pack_pus_telecommands(q, def_proc.cmd_path)
@ -221,7 +231,6 @@ def main():
post_args_wrapper.set_params_without_prompts(proc_wrapper)
else:
post_args_wrapper.set_params_with_prompts(proc_wrapper)
params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
)
@ -233,8 +242,9 @@ def main():
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None)
ccsds_handler.add_apid_handler(tm_handler)
ccsds_handler = CcsdsTmHandler(generic_handler=tm_handler)
# TODO: We could add the CFDP handler for the CFDP APID at a later stage.
# ccsds_handler.add_apid_handler(tm_handler)
# Create TC handler
seq_count_provider = PusFileSeqCountProvider()
@ -252,6 +262,7 @@ def main():
while True:
state = tmtc_backend.periodic_op(None)
if state.request == BackendRequest.TERMINATION_NO_ERROR:
tmtc_backend.close_com_if()
sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE:
_LOGGER.info("TMTC Client in IDLE mode")
@ -266,6 +277,7 @@ def main():
elif state.request == BackendRequest.CALL_NEXT:
pass
except KeyboardInterrupt:
tmtc_backend.close_com_if()
sys.exit(0)

View File

@ -0,0 +1,143 @@
import datetime
import struct
import logging
from spacepackets.ccsds import CdsShortTimestamp
from spacepackets.ecss import PusTelecommand
from tmtccmd.config import CmdTreeNode
from tmtccmd.pus.tc.s200_fsfw_mode import Mode
from tmtccmd.tmtc import DefaultPusQueueHelper
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from tmtccmd.pus.s200_fsfw_mode import Subservice as ModeSubservice
from common import AcsId, Apid
_LOGGER = logging.getLogger(__name__)
def create_set_mode_cmd(
apid: int, unique_id: int, mode: int, submode: int
) -> PusTelecommand:
app_data = bytearray()
app_data.extend(struct.pack("!I", unique_id))
app_data.extend(struct.pack("!I", mode))
app_data.extend(struct.pack("!H", submode))
return PusTelecommand(
service=200,
subservice=ModeSubservice.TC_MODE_COMMAND,
apid=apid,
app_data=app_data,
)
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
hk_node = CmdTreeNode("hk", "Housekeeping Node", hide_children_for_print=True)
hk_node.add_child(CmdTreeNode("one_shot_hk", "Request One Shot HK set"))
hk_node.add_child(
CmdTreeNode("enable", "Enable periodic housekeeping data generation")
)
hk_node.add_child(
CmdTreeNode("disable", "Disable periodic housekeeping data generation")
)
mode_node = CmdTreeNode("mode", "Mode Node", hide_children_for_print=True)
set_mode_node = CmdTreeNode(
"set_mode", "Set Node", hide_children_which_are_leaves=True
)
set_mode_node.add_child(CmdTreeNode("off", "Set OFF Mode"))
set_mode_node.add_child(CmdTreeNode("on", "Set ON Mode"))
set_mode_node.add_child(CmdTreeNode("normal", "Set NORMAL Mode"))
mode_node.add_child(set_mode_node)
mode_node.add_child(CmdTreeNode("read_mode", "Read Mode"))
test_node = CmdTreeNode("test", "Test Node")
test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event"))
root_node.add_child(test_node)
scheduler_node = CmdTreeNode("scheduler", "Scheduler Node")
scheduler_node.add_child(
CmdTreeNode(
"schedule_ping_10_secs_ahead", "Schedule Ping to execute in 10 seconds"
)
)
root_node.add_child(scheduler_node)
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(mode_node)
mgm_node.add_child(hk_node)
acs_node.add_child(mgm_node)
root_node.add_child(acs_node)
return root_node
def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
# It should always be at least the root path "/", so we split of the empty portion left of it.
cmd_path_list = cmd_path.split("/")[1:]
if len(cmd_path_list) == 0:
_LOGGER.warning("empty command path")
return
if cmd_path_list[0] == "test":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "ping":
q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=1)
)
elif cmd_path_list[1] == "trigger_event":
q.add_log_cmd("Triggering test event")
return q.add_pus_tc(
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=128)
)
if cmd_path_list[0] == "scheduler":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
q.add_log_cmd("Sending PUS scheduled TC telecommand")
crt_time = CdsShortTimestamp.from_now()
time_stamp = crt_time + datetime.timedelta(seconds=10)
time_stamp = time_stamp.pack()
return q.add_pus_tc(
create_time_tagged_cmd(
time_stamp,
PusTelecommand(service=17, subservice=1),
apid=Apid.SCHED,
)
)
if cmd_path_list[0] == "acs":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "mgms":
assert len(cmd_path_list) >= 3
if cmd_path_list[2] == "hk":
if cmd_path_list[3] == "one_shot_hk":
q.add_log_cmd("Sending HK one shot request")
# TODO: Fix
# q.add_pus_tc(
# create_request_one_hk_command(
# make_addressable_id(Apid.ACS, AcsId.MGM_SET)
# )
# )
if cmd_path_list[2] == "mode":
if cmd_path_list[3] == "set_mode":
handle_set_mode_cmd(
q, "MGM 0", cmd_path_list[4], Apid.ACS, AcsId.MGM_0
)
def handle_set_mode_cmd(
q: DefaultPusQueueHelper, target_str: str, mode_str: str, apid: int, unique_id: int
):
if mode_str == "off":
q.add_log_cmd(f"Sending Mode OFF to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.OFF, 0))
elif mode_str == "on":
q.add_log_cmd(f"Sending Mode ON to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.ON, 0))
elif mode_str == "normal":
q.add_log_cmd(f"Sending Mode NORMAL to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.NORMAL, 0))

View File

@ -1,2 +1,2 @@
tmtccmd == 8.0.0rc1
tmtccmd == 8.0.0rc2
# -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd

View File

@ -1,85 +0,0 @@
import datetime
import logging
from spacepackets.ccsds import CdsShortTimestamp
from spacepackets.ecss import PusTelecommand
from tmtccmd.config import CmdTreeNode
from tmtccmd.tmtc import DefaultPusQueueHelper
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from tmtccmd.pus.tc.s3_fsfw_hk import create_request_one_hk_command
from common import (
EXAMPLE_PUS_APID,
make_addressable_id,
RequestTargetId,
AcsHkIds,
)
_LOGGER = logging.getLogger(__name__)
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
test_node = CmdTreeNode("test", "Test Node")
test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event"))
root_node.add_child(test_node)
scheduler_node = CmdTreeNode("scheduler", "Scheduler Node")
scheduler_node.add_child(
CmdTreeNode(
"schedule_ping_10_secs_ahead", "Schedule Ping to execute in 10 seconds"
)
)
root_node.add_child(scheduler_node)
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(CmdTreeNode("one_shot_hk", "Request one shot HK"))
acs_node.add_child(mgm_node)
root_node.add_child(acs_node)
return root_node
def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
# It should always be at least the root path "/", so we split of the empty portion left of it.
cmd_path_list = cmd_path.split("/")[1:]
if len(cmd_path_list) == 0:
_LOGGER.warning("empty command path")
return
if cmd_path_list[0] == "test":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "ping":
q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
elif cmd_path_list[1] == "trigger_event":
q.add_log_cmd("Triggering test event")
return q.add_pus_tc(PusTelecommand(service=17, subservice=128))
if cmd_path_list[0] == "scheduler":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
q.add_log_cmd("Sending PUS scheduled TC telecommand")
crt_time = CdsShortTimestamp.from_now()
time_stamp = crt_time + datetime.timedelta(seconds=10)
time_stamp = time_stamp.pack()
return q.add_pus_tc(
create_time_tagged_cmd(
time_stamp,
PusTelecommand(service=17, subservice=1),
apid=EXAMPLE_PUS_APID,
)
)
if cmd_path_list[0] == "acs":
assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "mgm":
assert len(cmd_path_list) >= 3
if cmd_path_list[2] == "one_shot_hk":
q.add_log_cmd("Sending HK one shot request")
q.add_pus_tc(
create_request_one_hk_command(
make_addressable_id(RequestTargetId.ACS, AcsHkIds.MGM_SET)
)
)

View File

@ -1,118 +0,0 @@
use std::sync::mpsc::{self, TryRecvError};
use log::{info, warn};
use satrs::pus::verification::{VerificationReporterWithSender, VerificationReportingProvider};
use satrs::pus::{EcssTmSender, PusTmWrapper};
use satrs::request::TargetAndApidId;
use satrs::spacepackets::ecss::hk::Subservice as HkSubservice;
use satrs::{
hk::HkRequest,
spacepackets::{
ecss::tm::{PusTmCreator, PusTmSecondaryHeader},
time::cds::{DaysLen16Bits, TimeProvider},
SequenceFlags, SpHeader,
},
};
use satrs_example::config::{RequestTargetId, PUS_APID};
use crate::{
hk::{AcsHkIds, HkUniqueId},
requests::{Request, RequestWithToken},
update_time,
};
pub struct AcsTask {
timestamp: [u8; 7],
time_provider: TimeProvider<DaysLen16Bits>,
verif_reporter: VerificationReporterWithSender,
tm_sender: Box<dyn EcssTmSender>,
request_rx: mpsc::Receiver<RequestWithToken>,
}
impl AcsTask {
pub fn new(
tm_sender: impl EcssTmSender,
request_rx: mpsc::Receiver<RequestWithToken>,
verif_reporter: VerificationReporterWithSender,
) -> Self {
Self {
timestamp: [0; 7],
time_provider: TimeProvider::new_with_u16_days(0, 0),
verif_reporter,
tm_sender: Box::new(tm_sender),
request_rx,
}
}
fn handle_hk_request(&mut self, target_id: u32, unique_id: u32) {
assert_eq!(target_id, RequestTargetId::AcsSubsystem as u32);
if unique_id == AcsHkIds::TestMgmSet as u32 {
let mut sp_header = SpHeader::tm(PUS_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTmSecondaryHeader::new_simple(
3,
HkSubservice::TmHkPacket as u8,
&self.timestamp,
);
let mut buf: [u8; 8] = [0; 8];
let hk_id = HkUniqueId::new(target_id, unique_id);
hk_id.write_to_be_bytes(&mut buf).unwrap();
let pus_tm = PusTmCreator::new(&mut sp_header, sec_header, &buf, true);
self.tm_sender
.send_tm(PusTmWrapper::Direct(pus_tm))
.expect("Sending HK TM failed");
}
// TODO: Verification failure for invalid unique IDs.
}
pub fn try_reading_one_request(&mut self) -> bool {
match self.request_rx.try_recv() {
Ok(request) => {
info!(
"ACS thread: Received HK request {:?}",
request.targeted_request
);
let target_and_apid_id = TargetAndApidId::from(request.targeted_request.target_id);
match request.targeted_request.request {
Request::Hk(hk_req) => match hk_req {
HkRequest::OneShot(unique_id) => {
self.handle_hk_request(target_and_apid_id.target(), unique_id)
}
HkRequest::Enable(_) => {}
HkRequest::Disable(_) => {}
HkRequest::ModifyCollectionInterval(_, _) => {}
},
Request::Mode(_mode_req) => {
warn!("mode request handling not implemented yet")
}
Request::Action(_action_req) => {
warn!("action request handling not implemented yet")
}
}
let started_token = self
.verif_reporter
.start_success(request.token, &self.timestamp)
.expect("Sending start success failed");
self.verif_reporter
.completion_success(started_token, &self.timestamp)
.expect("Sending completion success failed");
true
}
Err(e) => match e {
TryRecvError::Empty => false,
TryRecvError::Disconnected => {
warn!("ACS thread: Message Queue TX disconnected!");
false
}
},
}
}
pub fn periodic_operation(&mut self) {
update_time(&mut self.time_provider, &mut self.timestamp);
loop {
if !self.try_reading_one_request() {
break;
}
}
}
}

View File

@ -0,0 +1,282 @@
use derive_new::new;
use satrs::hk::{HkRequest, HkRequestVariant};
use satrs::queue::{GenericSendError, GenericTargetedMessagingError};
use satrs::spacepackets::ecss::hk;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::SpHeader;
use satrs_example::{DeviceMode, TimeStampHelper};
use std::sync::mpsc::{self};
use std::sync::{Arc, Mutex};
use satrs::mode::{
ModeAndSubmode, ModeError, ModeProvider, ModeReply, ModeRequest, ModeRequestHandler,
};
use satrs::pus::{EcssTmSender, PusTmVariant};
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs_example::config::components::PUS_MODE_SERVICE;
use crate::pus::hk::{HkReply, HkReplyVariant};
use crate::requests::CompositeRequest;
use serde::{Deserialize, Serialize};
const GAUSS_TO_MICROTESLA_FACTOR: f32 = 100.0;
// This is the selected resoltion for the STM LIS3MDL device for the 4 Gauss sensitivity setting.
const FIELD_LSB_PER_GAUSS_4_SENS: f32 = 1.0 / 6842.0;
pub trait SpiInterface {
type Error;
fn transfer(&mut self, tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error>;
}
#[derive(Default)]
pub struct SpiDummyInterface {
pub dummy_val_0: i16,
pub dummy_val_1: i16,
pub dummy_val_2: i16,
}
impl SpiInterface for SpiDummyInterface {
type Error = ();
fn transfer(&mut self, _tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error> {
rx[0..2].copy_from_slice(&self.dummy_val_0.to_be_bytes());
rx[2..4].copy_from_slice(&self.dummy_val_1.to_be_bytes());
rx[4..6].copy_from_slice(&self.dummy_val_2.to_be_bytes());
Ok(())
}
}
#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize)]
pub struct MgmData {
pub valid: bool,
pub x: f32,
pub y: f32,
pub z: f32,
}
pub struct MpscModeLeafInterface {
pub request_rx: mpsc::Receiver<GenericMessage<ModeRequest>>,
pub reply_tx_to_pus: mpsc::Sender<GenericMessage<ModeReply>>,
pub reply_tx_to_parent: mpsc::Sender<GenericMessage<ModeReply>>,
}
/// Example MGM device handler strongly based on the LIS3MDL MEMS device.
#[derive(new)]
#[allow(clippy::too_many_arguments)]
pub struct MgmHandlerLis3Mdl<ComInterface: SpiInterface, TmSender: EcssTmSender> {
id: UniqueApidTargetId,
dev_str: &'static str,
mode_interface: MpscModeLeafInterface,
composite_request_receiver: mpsc::Receiver<GenericMessage<CompositeRequest>>,
hk_reply_sender: mpsc::Sender<GenericMessage<HkReply>>,
tm_sender: TmSender,
com_interface: ComInterface,
shared_mgm_set: Arc<Mutex<MgmData>>,
#[new(value = "ModeAndSubmode::new(satrs_example::DeviceMode::Off as u32, 0)")]
mode_and_submode: ModeAndSubmode,
#[new(default)]
tx_buf: [u8; 12],
#[new(default)]
rx_buf: [u8; 12],
#[new(default)]
tm_buf: [u8; 16],
#[new(default)]
stamp_helper: TimeStampHelper,
}
impl<ComInterface: SpiInterface, TmSender: EcssTmSender> MgmHandlerLis3Mdl<ComInterface, TmSender> {
pub fn periodic_operation(&mut self) {
self.stamp_helper.update_from_now();
// Handle requests.
self.handle_composite_requests();
self.handle_mode_requests();
if self.mode() == DeviceMode::Normal as u32 {
log::trace!("polling LIS3MDL sensor {}", self.dev_str);
// Communicate with the device.
let result = self.com_interface.transfer(&self.tx_buf, &mut self.rx_buf);
assert!(result.is_ok());
// Actual data begins on the second byte, similarly to how a lot of SPI devices behave.
let x_raw = i16::from_be_bytes(self.rx_buf[1..3].try_into().unwrap());
let y_raw = i16::from_be_bytes(self.rx_buf[3..5].try_into().unwrap());
let z_raw = i16::from_be_bytes(self.rx_buf[5..7].try_into().unwrap());
// Simple scaling to retrieve the float value, assuming a sensor resolution of
let mut mgm_guard = self.shared_mgm_set.lock().unwrap();
mgm_guard.x = x_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
mgm_guard.y = y_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
mgm_guard.z = z_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
drop(mgm_guard);
}
}
pub fn handle_composite_requests(&mut self) {
loop {
match self.composite_request_receiver.try_recv() {
Ok(ref msg) => match &msg.message {
CompositeRequest::Hk(hk_request) => {
self.handle_hk_request(&msg.requestor_info, hk_request)
}
// TODO: This object does not have actions (yet).. Still send back completion failure
// reply.
CompositeRequest::Action(_action_req) => {}
},
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::warn!(
"{}: failed to receive composite request: {:?}",
self.dev_str,
e
);
} else {
break;
}
}
}
}
}
pub fn handle_hk_request(&mut self, requestor_info: &MessageMetadata, hk_request: &HkRequest) {
match hk_request.variant {
HkRequestVariant::OneShot => {
self.hk_reply_sender
.send(GenericMessage::new(
*requestor_info,
HkReply::new(hk_request.unique_id, HkReplyVariant::Ack),
))
.expect("failed to send HK reply");
let sec_header = PusTmSecondaryHeader::new(
3,
hk::Subservice::TmHkPacket as u8,
0,
0,
self.stamp_helper.stamp(),
);
let mgm_snapshot = *self.shared_mgm_set.lock().unwrap();
// Use binary serialization here. We want the data to be tightly packed.
self.tm_buf[0] = mgm_snapshot.valid as u8;
self.tm_buf[1..5].copy_from_slice(&mgm_snapshot.x.to_be_bytes());
self.tm_buf[5..9].copy_from_slice(&mgm_snapshot.y.to_be_bytes());
self.tm_buf[9..13].copy_from_slice(&mgm_snapshot.z.to_be_bytes());
let hk_tm = PusTmCreator::new(
SpHeader::new_from_apid(self.id.apid),
sec_header,
&self.tm_buf[0..12],
true,
);
self.tm_sender
.send_tm(self.id.id(), PusTmVariant::Direct(hk_tm))
.expect("failed to send HK TM");
}
HkRequestVariant::EnablePeriodic => todo!(),
HkRequestVariant::DisablePeriodic => todo!(),
HkRequestVariant::ModifyCollectionInterval(_) => todo!(),
}
}
pub fn handle_mode_requests(&mut self) {
loop {
// TODO: Only allow one set mode request per cycle?
match self.mode_interface.request_rx.try_recv() {
Ok(msg) => {
let result = self.handle_mode_request(msg);
// TODO: Trigger event?
if result.is_err() {
log::warn!(
"{}: mode request failed with error {:?}",
self.dev_str,
result.err().unwrap()
);
}
}
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::warn!("{}: failed to receive mode request: {:?}", self.dev_str, e);
} else {
break;
}
}
}
}
}
}
impl<ComInterface: SpiInterface, TmSender: EcssTmSender> ModeProvider
for MgmHandlerLis3Mdl<ComInterface, TmSender>
{
fn mode_and_submode(&self) -> ModeAndSubmode {
self.mode_and_submode
}
}
impl<ComInterface: SpiInterface, TmSender: EcssTmSender> ModeRequestHandler
for MgmHandlerLis3Mdl<ComInterface, TmSender>
{
type Error = ModeError;
fn start_transition(
&mut self,
requestor: MessageMetadata,
mode_and_submode: ModeAndSubmode,
) -> Result<(), satrs::mode::ModeError> {
log::info!(
"{}: transitioning to mode {:?}",
self.dev_str,
mode_and_submode
);
self.mode_and_submode = mode_and_submode;
self.handle_mode_reached(Some(requestor))?;
Ok(())
}
fn announce_mode(&self, _requestor_info: Option<MessageMetadata>, _recursive: bool) {
log::info!(
"{} announcing mode: {:?}",
self.dev_str,
self.mode_and_submode
);
}
fn handle_mode_reached(
&mut self,
requestor: Option<MessageMetadata>,
) -> Result<(), Self::Error> {
self.announce_mode(requestor, false);
if let Some(requestor) = requestor {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
);
} else {
self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode()))?;
}
}
Ok(())
}
fn send_mode_reply(
&self,
requestor: MessageMetadata,
reply: ModeReply,
) -> Result<(), Self::Error> {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
);
}
self.mode_interface
.reply_tx_to_pus
.send(GenericMessage::new(requestor, reply))
.map_err(|_| GenericTargetedMessagingError::Send(GenericSendError::RxDisconnected))?;
Ok(())
}
fn handle_mode_info(
&mut self,
_requestor_info: MessageMetadata,
_info: ModeAndSubmode,
) -> Result<(), Self::Error> {
Ok(())
}
}

View File

@ -0,0 +1 @@
pub mod mgm;

View File

@ -12,8 +12,7 @@ use std::time::Duration;
fn main() {
let mut buf = [0; 32];
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap();
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let pus_tc = PusTcCreator::new_simple(SpHeader::new_from_apid(0x02), 17, 1, &[], true);
let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed");
let tc_req_id = RequestId::new(&pus_tc);
println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}");

View File

@ -1,44 +0,0 @@
use satrs::pus::ReceivesEcssPusTc;
use satrs::spacepackets::{CcsdsPacket, SpHeader};
use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc};
use satrs_example::config::PUS_APID;
#[derive(Clone)]
pub struct CcsdsReceiver<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone,
E,
> {
pub tc_source: TcSource,
}
impl<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
E: 'static,
> CcsdsPacketHandler for CcsdsReceiver<TcSource, E>
{
type Error = E;
fn valid_apids(&self) -> &'static [u16] {
&[PUS_APID]
}
fn handle_known_apid(
&mut self,
sp_header: &SpHeader,
tc_raw: &[u8],
) -> Result<(), Self::Error> {
if sp_header.apid() == PUS_APID {
return self.tc_source.pass_ccsds(sp_header, tc_raw);
}
Ok(())
}
fn handle_unknown_apid(
&mut self,
sp_header: &SpHeader,
_tc_raw: &[u8],
) -> Result<(), Self::Error> {
println!("Unknown APID 0x{:x?} detected", sp_header.apid());
Ok(())
}
}

View File

@ -1,7 +1,12 @@
use satrs::res_code::ResultU16;
use lazy_static::lazy_static;
use satrs::{
res_code::ResultU16,
spacepackets::{PacketId, PacketType},
};
use satrs_mib::res_code::ResultU16Info;
use satrs_mib::resultcode;
use std::net::Ipv4Addr;
use std::{collections::HashSet, net::Ipv4Addr};
use strum::IntoEnumIterator;
use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::{
@ -9,8 +14,6 @@ use satrs::{
pool::{StaticMemoryPool, StaticPoolConfig},
};
pub const PUS_APID: u16 = 0x02;
#[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
pub enum CustomPusServiceId {
@ -29,13 +32,30 @@ pub const AOCS_APID: u16 = 1;
pub enum GroupId {
Tmtc = 0,
Hk = 1,
Mode = 2,
}
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
pub const SERVER_PORT: u16 = 7301;
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(0, 0);
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(0, 0);
lazy_static! {
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, id as u16));
}
set
};
pub static ref APID_VALIDATOR: HashSet<u16> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(id as u16);
}
set
};
}
pub mod tmtc_err {
@ -53,6 +73,8 @@ pub mod tmtc_err {
pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 4);
#[resultcode]
pub const ROUTING_ERROR: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 5);
#[resultcode(info = "Request timeout for targeted PUS request. P1: Request ID. P2: Target ID")]
pub const REQUEST_TIMEOUT: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 6);
#[resultcode(
info = "Not enough data inside the TC application data field. Optionally includes: \
@ -92,32 +114,75 @@ pub mod hk_err {
];
}
#[allow(clippy::enum_variant_names)]
pub mod mode_err {
use super::*;
#[resultcode]
pub const WRONG_MODE: ResultU16 = ResultU16::new(GroupId::Mode as u8, 0);
}
pub mod components {
use satrs::request::UniqueApidTargetId;
use strum::EnumIter;
#[derive(Copy, Clone, PartialEq, Eq, EnumIter)]
pub enum Apid {
Sched = 1,
GenericPus = 2,
Acs = 3,
Cfdp = 4,
Tmtc = 5,
}
// Component IDs for components with the PUS APID.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum TmSenderId {
PusVerification = 0,
PusTest = 1,
PusEvent = 2,
PusHk = 3,
PusAction = 4,
PusSched = 5,
AllEvents = 6,
AcsSubsystem = 7,
pub enum PusId {
PusEventManagement = 0,
PusRouting = 1,
PusTest = 2,
PusAction = 3,
PusMode = 4,
PusHk = 5,
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum TcReceiverId {
PusTest = 1,
PusEvent = 2,
PusHk = 3,
PusAction = 4,
PusSched = 5,
pub enum AcsId {
Mgm0 = 0,
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum TmtcId {
UdpServer = 0,
TcpServer = 1,
}
pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32);
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, 0);
pub const PUS_ROUTING_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32);
pub const PUS_TEST_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32);
pub const PUS_MODE_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32);
pub const PUS_HK_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32);
pub const PUS_SCHED_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Sched as u16, 0);
pub const MGM_HANDLER_0: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
pub const UDP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::UdpServer as u32);
pub const TCP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::TcpServer as u32);
}
pub mod pool {
use super::*;
pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) {
(
StaticMemoryPool::new(StaticPoolConfig::new(
StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![
(30, 32),
(15, 64),
@ -128,7 +193,7 @@ pub mod pool {
],
true,
)),
StaticMemoryPool::new(StaticPoolConfig::new(
StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![
(30, 32),
(15, 64),
@ -143,7 +208,7 @@ pub mod pool {
}
pub fn create_sched_tc_pool() -> StaticMemoryPool {
StaticMemoryPool::new(StaticPoolConfig::new(
StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![
(30, 32),
(15, 64),

View File

@ -1,71 +1,90 @@
use std::sync::mpsc::{self, SendError};
use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter;
use satrs::event_man::{EventMessageU32, EventRoutingError};
use satrs::pus::event::EventTmHookProvider;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::EcssTmSender;
use satrs::request::UniqueApidTargetId;
use satrs::{
event_man::{
EventManager, EventManagerWithMpscQueue, MpscEventReceiver, MpscEventU32SendProvider,
SendEventProvider,
},
events::EventU32,
params::Params,
event_man::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
pus::{
event_man::{
DefaultPusMgmtBackendProvider, EventReporter, EventRequest, EventRequestWithToken,
PusEventDispatcher,
DefaultPusEventU32TmCreator, EventReporter, EventRequest, EventRequestWithToken,
},
verification::{
TcStateStarted, VerificationReporterWithSender, VerificationReportingProvider,
VerificationToken,
verification::{TcStateStarted, VerificationReportingProvider, VerificationToken},
},
EcssTmSender,
},
spacepackets::time::cds::{self, TimeProvider},
spacepackets::time::cds::CdsTime,
};
use satrs_example::config::PUS_APID;
use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
use crate::update_time;
pub type MpscEventManager = EventManager<SendError<(EventU32, Option<Params>)>>;
pub struct PusEventHandler {
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
pus_event_dispatcher: PusEventDispatcher<(), EventU32>,
pus_event_man_rx: mpsc::Receiver<(EventU32, Option<Params>)>,
tm_sender: Box<dyn EcssTmSender>,
time_provider: TimeProvider,
timestamp: [u8; 7],
verif_handler: VerificationReporterWithSender,
// This helper sets the APID of the event sender for the PUS telemetry.
#[derive(Default)]
pub struct EventApidSetter {
pub next_apid: u16,
}
/*
*/
impl PusEventHandler {
pub fn new(
verif_handler: VerificationReporterWithSender,
event_manager: &mut MpscEventManager,
impl EventTmHookProvider for EventApidSetter {
fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) {
tm.set_apid(self.next_apid);
}
}
/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event
/// packets. It also handles the verification completion of PUS event service requests.
pub struct PusEventHandler<TmSender: EcssTmSender> {
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
pus_event_tm_creator: DefaultPusEventU32TmCreator<EventApidSetter>,
pus_event_man_rx: mpsc::Receiver<EventMessageU32>,
tm_sender: TmSender,
time_provider: CdsTime,
timestamp: [u8; 7],
small_data_buf: [u8; 64],
verif_handler: VerificationReporter,
}
impl<TmSender: EcssTmSender> PusEventHandler<TmSender> {
pub fn new(
tm_sender: TmSender,
verif_handler: VerificationReporter,
event_manager: &mut EventManagerWithBoundedMpsc,
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
tm_sender: impl EcssTmSender,
) -> Self {
let (pus_event_man_tx, pus_event_man_rx) = mpsc::channel();
let event_queue_cap = 30;
let (pus_event_man_tx, pus_event_man_rx) = mpsc::sync_channel(event_queue_cap);
// All events sent to the manager are routed to the PUS event manager, which generates PUS event
// telemetry for each event.
let event_reporter = EventReporter::new(PUS_APID, 128).unwrap();
let pus_tm_backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
let event_reporter = EventReporter::new_with_hook(
PUS_EVENT_MANAGEMENT.raw(),
0,
0,
128,
EventApidSetter::default(),
)
.unwrap();
let pus_event_dispatcher =
PusEventDispatcher::new(event_reporter, Box::new(pus_tm_backend));
let pus_event_man_send_provider = MpscEventU32SendProvider::new(1, pus_event_man_tx);
DefaultPusEventU32TmCreator::new_with_default_backend(event_reporter);
let pus_event_man_send_provider = EventU32SenderMpscBounded::new(
PUS_EVENT_MANAGEMENT.raw(),
pus_event_man_tx,
event_queue_cap,
);
event_manager.subscribe_all(pus_event_man_send_provider.id());
event_manager.subscribe_all(pus_event_man_send_provider.target_id());
event_manager.add_sender(pus_event_man_send_provider);
Self {
event_request_rx,
pus_event_dispatcher,
pus_event_tm_creator: pus_event_dispatcher,
pus_event_man_rx,
time_provider: cds::TimeProvider::new_with_u16_days(0, 0),
time_provider: CdsTime::new_with_u16_days(0, 0),
timestamp: [0; 7],
small_data_buf: [0; 64],
verif_handler,
tm_sender: Box::new(tm_sender),
tm_sender,
}
}
@ -76,115 +95,203 @@ impl PusEventHandler {
.try_into()
.expect("expected start verification token");
self.verif_handler
.completion_success(started_token, timestamp)
.completion_success(&self.tm_sender, started_token, timestamp)
.expect("Sending completion success failed");
};
loop {
// handle event requests
if let Ok(event_req) = self.event_request_rx.try_recv() {
match event_req.request {
match self.event_request_rx.try_recv() {
Ok(event_req) => match event_req.request {
EventRequest::Enable(event) => {
self.pus_event_dispatcher
self.pus_event_tm_creator
.enable_tm_for_event(&event)
.expect("Enabling TM failed");
update_time(&mut self.time_provider, &mut self.timestamp);
report_completion(event_req, &self.timestamp);
}
EventRequest::Disable(event) => {
self.pus_event_dispatcher
self.pus_event_tm_creator
.disable_tm_for_event(&event)
.expect("Disabling TM failed");
update_time(&mut self.time_provider, &mut self.timestamp);
report_completion(event_req, &self.timestamp);
}
},
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("all event request senders have disconnected");
break;
}
},
}
}
}
pub fn generate_pus_event_tm(&mut self) {
loop {
// Perform the generation of PUS event packets
if let Ok((event, _param)) = self.pus_event_man_rx.try_recv() {
match self.pus_event_man_rx.try_recv() {
Ok(event_msg) => {
// We use the TM modification hook to set the sender APID for each event.
self.pus_event_tm_creator.reporter.tm_hook.next_apid =
UniqueApidTargetId::from(event_msg.sender_id()).apid;
update_time(&mut self.time_provider, &mut self.timestamp);
self.pus_event_dispatcher
.generate_pus_event_tm_generic(
self.tm_sender.upcast_mut(),
let generation_result = self
.pus_event_tm_creator
.generate_pus_event_tm_generic_with_generic_params(
&self.tm_sender,
&self.timestamp,
event,
None,
event_msg.event(),
&mut self.small_data_buf,
event_msg.params(),
)
.expect("Sending TM as event failed");
if !generation_result.params_were_propagated {
log::warn!(
"Event TM parameters were not propagated: {:?}",
event_msg.params()
);
}
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("All event senders have disconnected");
break;
}
},
}
}
}
}
pub struct EventManagerWrapper {
event_manager: MpscEventManager,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
pub struct EventHandler<TmSender: EcssTmSender> {
pub pus_event_handler: PusEventHandler<TmSender>,
event_manager: EventManagerWithBoundedMpsc,
}
impl EventManagerWrapper {
pub fn new() -> Self {
// The sender handle is the primary sender handle for all components which want to create events.
// The event manager will receive the RX handle to receive all the events.
let (event_sender, event_man_rx) = mpsc::channel();
let event_recv = MpscEventReceiver::<EventU32>::new(event_man_rx);
Self {
event_manager: EventManagerWithMpscQueue::new(Box::new(event_recv)),
event_sender,
}
}
pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option<Params>)> {
self.event_sender.clone()
}
pub fn event_manager(&mut self) -> &mut MpscEventManager {
&mut self.event_manager
}
pub fn try_event_routing(&mut self) {
// Perform the event routing.
self.event_manager
.try_event_handling()
.expect("event handling failed");
}
}
pub struct EventHandler {
pub event_man_wrapper: EventManagerWrapper,
pub pus_event_handler: PusEventHandler,
}
impl EventHandler {
impl<TmSender: EcssTmSender> EventHandler<TmSender> {
pub fn new(
tm_sender: impl EcssTmSender,
verif_handler: VerificationReporterWithSender,
tm_sender: TmSender,
event_rx: mpsc::Receiver<EventMessageU32>,
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
) -> Self {
let mut event_man_wrapper = EventManagerWrapper::new();
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler = PusEventHandler::new(
verif_handler,
event_man_wrapper.event_manager(),
event_request_rx,
tm_sender,
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
&mut event_manager,
event_request_rx,
);
Self {
event_man_wrapper,
pus_event_handler,
}
}
pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option<Params>)> {
self.event_man_wrapper.clone_event_sender()
Self {
pus_event_handler,
event_manager,
}
}
#[allow(dead_code)]
pub fn event_manager(&mut self) -> &mut MpscEventManager {
self.event_man_wrapper.event_manager()
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
&mut self.event_manager
}
pub fn periodic_operation(&mut self) {
self.pus_event_handler.handle_event_requests();
self.event_man_wrapper.try_event_routing();
self.try_event_routing();
self.pus_event_handler.generate_pus_event_tm();
}
pub fn try_event_routing(&mut self) {
let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| {
self.routing_error_handler(event_msg, error)
};
// Perform the event routing.
self.event_manager.try_event_handling(error_handler);
}
pub fn routing_error_handler(&self, event_msg: &EventMessageU32, error: EventRoutingError) {
log::warn!("event routing error for event {event_msg:?}: {error:?}");
}
}
#[cfg(test)]
mod tests {
use satrs::{
events::EventU32,
pus::verification::VerificationReporterCfg,
spacepackets::{
ecss::{tm::PusTmReader, PusPacket},
CcsdsPacket,
},
tmtc::PacketAsVec,
};
use super::*;
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(1, 2);
const TEST_EVENT: EventU32 = EventU32::new(satrs::events::Severity::Info, 1, 1);
pub struct EventManagementTestbench {
pub event_tx: mpsc::SyncSender<EventMessageU32>,
pub event_manager: EventManagerWithBoundedMpsc,
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub pus_event_handler: PusEventHandler<mpsc::Sender<PacketAsVec>>,
}
impl EventManagementTestbench {
pub fn new() -> Self {
let (event_tx, event_rx) = mpsc::sync_channel(10);
let (_event_req_tx, event_req_rx) = mpsc::sync_channel(10);
let (tm_sender, tm_receiver) = mpsc::channel();
let verif_reporter_cfg = VerificationReporterCfg::new(0x05, 2, 2, 128).unwrap();
let verif_reporter =
VerificationReporter::new(PUS_EVENT_MANAGEMENT.id(), &verif_reporter_cfg);
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
let pus_event_handler = PusEventHandler::<mpsc::Sender<PacketAsVec>>::new(
tm_sender,
verif_reporter,
&mut event_manager,
event_req_rx,
);
Self {
event_tx,
tm_receiver,
event_manager,
pus_event_handler,
}
}
}
#[test]
fn test_basic_event_generation() {
let mut testbench = EventManagementTestbench::new();
testbench
.event_tx
.send(EventMessageU32::new(
TEST_CREATOR_ID.id(),
EventU32::new(satrs::events::Severity::Info, 1, 1),
))
.expect("failed to send event");
testbench.pus_event_handler.handle_event_requests();
testbench.event_manager.try_event_handling(|_, _| {});
testbench.pus_event_handler.generate_pus_event_tm();
let tm_packet = testbench
.tm_receiver
.try_recv()
.expect("failed to receive TM packet");
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
.expect("failed to create TM reader")
.0;
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
assert_eq!(tm_reader.user_data().len(), 4);
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());
assert_eq!(event_read_back, TEST_EVENT);
}
#[test]
fn test_basic_event_disabled() {
// TODO: Add test.
}
}

View File

@ -1,27 +1,25 @@
use derive_new::new;
use satrs::hk::UniqueId;
use satrs::request::UniqueApidTargetId;
use satrs::spacepackets::ByteConversionError;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum AcsHkIds {
TestMgmSet = 1,
}
#[derive(Debug, new, Copy, Clone)]
pub struct HkUniqueId {
target_id: u32,
set_id: u32,
target_id: UniqueApidTargetId,
set_id: UniqueId,
}
impl HkUniqueId {
#[allow(dead_code)]
pub fn target_id(&self) -> u32 {
pub fn target_id(&self) -> UniqueApidTargetId {
self.target_id
}
#[allow(dead_code)]
pub fn set_id(&self) -> u32 {
pub fn set_id(&self) -> UniqueId {
self.set_id
}
#[allow(dead_code)]
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < 8 {
return Err(ByteConversionError::ToSliceTooSmall {
@ -29,7 +27,7 @@ impl HkUniqueId {
expected: 8,
});
}
buf[0..4].copy_from_slice(&self.target_id.to_be_bytes());
buf[0..4].copy_from_slice(&self.target_id.unique_id.to_be_bytes());
buf[4..8].copy_from_slice(&self.set_id.to_be_bytes());
Ok(8)

View File

@ -0,0 +1,3 @@
//! This module contains all component related to the direct interface of the example.
pub mod tcp;
pub mod udp;

View File

@ -0,0 +1,154 @@
use std::time::Duration;
use std::{
collections::{HashSet, VecDeque},
fmt::Debug,
marker::PhantomData,
sync::{Arc, Mutex},
};
use log::{info, warn};
use satrs::{
encoding::ccsds::{SpValidity, SpacePacketValidator},
hal::std::tcp_server::{HandledConnectionHandler, ServerConfig, TcpSpacepacketsServer},
spacepackets::{CcsdsPacket, PacketId},
tmtc::{PacketSenderRaw, PacketSource},
};
#[derive(Default)]
pub struct ConnectionFinishedHandler {}
pub struct SimplePacketValidator {
pub valid_ids: HashSet<PacketId>,
}
impl SpacePacketValidator for SimplePacketValidator {
fn validate(
&self,
sp_header: &satrs::spacepackets::SpHeader,
_raw_buf: &[u8],
) -> satrs::encoding::ccsds::SpValidity {
if self.valid_ids.contains(&sp_header.packet_id()) {
return SpValidity::Valid;
}
log::warn!("ignoring space packet with header {:?}", sp_header);
// We could perform a CRC check.. but lets keep this simple and assume that TCP ensures
// data integrity.
SpValidity::Skip
}
}
impl HandledConnectionHandler for ConnectionFinishedHandler {
fn handled_connection(&mut self, info: satrs::hal::std::tcp_server::HandledConnectionInfo) {
info!(
"Served {} TMs and {} TCs for client {:?}",
info.num_sent_tms, info.num_received_tcs, info.addr
);
}
}
#[derive(Default, Clone)]
pub struct SyncTcpTmSource {
tm_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
max_packets_stored: usize,
pub silent_packet_overwrite: bool,
}
impl SyncTcpTmSource {
pub fn new(max_packets_stored: usize) -> Self {
Self {
tm_queue: Arc::default(),
max_packets_stored,
silent_packet_overwrite: true,
}
}
pub fn add_tm(&mut self, tm: &[u8]) {
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failec");
if tm_queue.len() > self.max_packets_stored {
if !self.silent_packet_overwrite {
warn!("TPC TM source is full, deleting oldest packet");
}
tm_queue.pop_front();
}
tm_queue.push_back(tm.to_vec());
}
}
impl PacketSource for SyncTcpTmSource {
type Error = ();
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failed");
if !tm_queue.is_empty() {
let next_vec = tm_queue.front().unwrap();
if buffer.len() < next_vec.len() {
panic!(
"provided buffer too small, must be at least {} bytes",
next_vec.len()
);
}
let next_vec = tm_queue.pop_front().unwrap();
buffer[0..next_vec.len()].copy_from_slice(&next_vec);
if next_vec.len() > 9 {
let service = next_vec[7];
let subservice = next_vec[8];
info!("Sending PUS TM[{service},{subservice}]")
} else {
info!("Sending PUS TM");
}
return Ok(next_vec.len());
}
Ok(0)
}
}
pub type TcpServer<ReceivesTc, SendError> = TcpSpacepacketsServer<
SyncTcpTmSource,
ReceivesTc,
SimplePacketValidator,
ConnectionFinishedHandler,
(),
SendError,
>;
pub struct TcpTask<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>(
pub TcpServer<TcSender, SendError>,
PhantomData<SendError>,
);
impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
TcpTask<TcSender, SendError>
{
pub fn new(
cfg: ServerConfig,
tm_source: SyncTcpTmSource,
tc_sender: TcSender,
valid_ids: HashSet<PacketId>,
) -> Result<Self, std::io::Error> {
Ok(Self(
TcpSpacepacketsServer::new(
cfg,
tm_source,
tc_sender,
SimplePacketValidator { valid_ids },
ConnectionFinishedHandler::default(),
None,
)?,
PhantomData,
))
}
pub fn periodic_operation(&mut self) {
loop {
let result = self
.0
.handle_all_connections(Some(Duration::from_millis(400)));
match result {
Ok(_conn_result) => (),
Err(e) => {
warn!("TCP server error: {e:?}");
}
}
}
}
}

View File

@ -1,13 +1,13 @@
use std::{
net::{SocketAddr, UdpSocket},
sync::mpsc::Receiver,
};
use core::fmt::Debug;
use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc;
use log::{info, warn};
use satrs::pus::HandlingStatus;
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderRaw};
use satrs::{
hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr},
tmtc::CcsdsError,
pool::{PoolProviderWithGuards, SharedStaticMemoryPool},
};
pub trait UdpTmHandler {
@ -15,20 +15,20 @@ pub trait UdpTmHandler {
}
pub struct StaticUdpTmHandler {
pub tm_rx: Receiver<StoreAddr>,
pub tm_rx: mpsc::Receiver<PacketInPool>,
pub tm_store: SharedStaticMemoryPool,
}
impl UdpTmHandler for StaticUdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, &recv_addr: &SocketAddr) {
while let Ok(addr) = self.tm_rx.try_recv() {
while let Ok(pus_tm_in_pool) = self.tm_rx.try_recv() {
let store_lock = self.tm_store.write();
if store_lock.is_err() {
warn!("Locking TM store failed");
continue;
}
let mut store_lock = store_lock.unwrap();
let pg = store_lock.read_with_guard(addr);
let pg = store_lock.read_with_guard(pus_tm_in_pool.store_addr);
let read_res = pg.read_as_vec();
if read_res.is_err() {
warn!("Error reading TM pool data");
@ -44,20 +44,20 @@ impl UdpTmHandler for StaticUdpTmHandler {
}
pub struct DynamicUdpTmHandler {
pub tm_rx: Receiver<Vec<u8>>,
pub tm_rx: mpsc::Receiver<PacketAsVec>,
}
impl UdpTmHandler for DynamicUdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr) {
while let Ok(tm) = self.tm_rx.try_recv() {
if tm.len() > 9 {
let service = tm[7];
let subservice = tm[8];
if tm.packet.len() > 9 {
let service = tm.packet[7];
let subservice = tm.packet[8];
info!("Sending PUS TM[{service},{subservice}]")
} else {
info!("Sending PUS TM");
}
let result = socket.send_to(&tm, recv_addr);
let result = socket.send_to(&tm.packet, recv_addr);
if let Err(e) = result {
warn!("Sending TM with UDP socket failed: {e}")
}
@ -65,49 +65,57 @@ impl UdpTmHandler for DynamicUdpTmHandler {
}
}
pub struct UdpTmtcServer<TmHandler: UdpTmHandler, SendError> {
pub udp_tc_server: UdpTcServer<CcsdsError<SendError>>,
pub struct UdpTmtcServer<
TcSender: PacketSenderRaw<Error = SendError>,
TmHandler: UdpTmHandler,
SendError,
> {
pub udp_tc_server: UdpTcServer<TcSender, SendError>,
pub tm_handler: TmHandler,
}
impl<TmHandler: UdpTmHandler, SendError: core::fmt::Debug + 'static>
UdpTmtcServer<TmHandler, SendError>
impl<
TcSender: PacketSenderRaw<Error = SendError>,
TmHandler: UdpTmHandler,
SendError: Debug + 'static,
> UdpTmtcServer<TcSender, TmHandler, SendError>
{
pub fn periodic_operation(&mut self) {
while self.poll_tc_server() {}
loop {
if self.poll_tc_server() == HandlingStatus::Empty {
break;
}
}
if let Some(recv_addr) = self.udp_tc_server.last_sender() {
self.tm_handler
.send_tm_to_udp_client(&self.udp_tc_server.socket, &recv_addr);
}
}
fn poll_tc_server(&mut self) -> bool {
fn poll_tc_server(&mut self) -> HandlingStatus {
match self.udp_tc_server.try_recv_tc() {
Ok(_) => true,
Err(e) => match e {
ReceiveResult::ReceiverError(e) => match e {
CcsdsError::ByteConversionError(e) => {
warn!("packet error: {e:?}");
true
}
CcsdsError::CustomError(e) => {
warn!("mpsc custom error {e:?}");
true
}
},
ReceiveResult::IoError(e) => {
Ok(_) => HandlingStatus::HandledOne,
Err(e) => {
match e {
ReceiveResult::NothingReceived => (),
ReceiveResult::Io(e) => {
warn!("IO error {e}");
false
}
ReceiveResult::NothingReceived => false,
},
ReceiveResult::Send(send_error) => {
warn!("send error {send_error:?}");
}
}
HandlingStatus::Empty
}
}
}
}
#[cfg(test)]
mod tests {
use std::net::Ipv4Addr;
use std::{
cell::RefCell,
collections::VecDeque,
net::IpAddr,
sync::{Arc, Mutex},
@ -118,21 +126,26 @@ mod tests {
ecss::{tc::PusTcCreator, WritablePusPacket},
SpHeader,
},
tmtc::ReceivesTcCore,
tmtc::PacketSenderRaw,
ComponentId,
};
use satrs_example::config::{OBSW_SERVER_ADDR, PUS_APID};
use satrs_example::config::{components, OBSW_SERVER_ADDR};
use super::*;
#[derive(Default, Debug, Clone)]
pub struct TestReceiver {
tc_vec: Arc<Mutex<VecDeque<Vec<u8>>>>,
const UDP_SERVER_ID: ComponentId = 0x05;
#[derive(Default, Debug)]
pub struct TestSender {
tc_vec: RefCell<VecDeque<PacketAsVec>>,
}
impl ReceivesTcCore for TestReceiver {
type Error = CcsdsError<()>;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
self.tc_vec.lock().unwrap().push_back(tc_raw.to_vec());
impl PacketSenderRaw for TestSender {
type Error = ();
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut mut_queue = self.tc_vec.borrow_mut();
mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
Ok(())
}
}
@ -151,9 +164,10 @@ mod tests {
#[test]
fn test_basic() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestReceiver::default();
let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap();
let test_receiver = TestSender::default();
// let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server =
UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, test_receiver).unwrap();
let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
let mut udp_dyn_server = UdpTmtcServer {
@ -161,16 +175,18 @@ mod tests {
tm_handler,
};
udp_dyn_server.periodic_operation();
assert!(tc_queue.lock().unwrap().is_empty());
let queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow();
assert!(queue.is_empty());
assert!(tm_handler_calls.lock().unwrap().is_empty());
}
#[test]
fn test_transactions() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
let test_receiver = TestReceiver::default();
let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap();
let sock_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0);
let test_receiver = TestSender::default();
// let tc_queue = test_receiver.tc_vec.clone();
let udp_tc_server =
UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, test_receiver).unwrap();
let server_addr = udp_tc_server.socket.local_addr().unwrap();
let tm_handler = TestTmHandler::default();
let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
@ -178,20 +194,21 @@ mod tests {
udp_tc_server,
tm_handler,
};
let mut sph = SpHeader::tc_unseg(PUS_APID, 0, 0).unwrap();
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true)
let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true)
.to_vec()
.unwrap();
let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed");
let client_addr = client.local_addr().unwrap();
client.connect(server_addr).unwrap();
client.send(&ping_tc).unwrap();
println!("{}", server_addr);
client.send_to(&ping_tc, server_addr).unwrap();
udp_dyn_server.periodic_operation();
{
let mut tc_queue = tc_queue.lock().unwrap();
assert!(!tc_queue.is_empty());
let received_tc = tc_queue.pop_front().unwrap();
assert_eq!(received_tc, ping_tc);
let mut queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow_mut();
assert!(!queue.is_empty());
let packet_with_sender = queue.pop_front().unwrap();
assert_eq!(packet_with_sender.packet, ping_tc);
assert_eq!(packet_with_sender.sender_id, UDP_SERVER_ID);
}
{
@ -202,7 +219,9 @@ mod tests {
assert_eq!(received_addr, client_addr);
}
udp_dyn_server.periodic_operation();
assert!(tc_queue.lock().unwrap().is_empty());
let queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow();
assert!(queue.is_empty());
drop(queue);
// Still tries to send to the same client.
{
let mut tm_handler_calls = tm_handler_calls.lock().unwrap();

View File

@ -1 +1,39 @@
use satrs::spacepackets::time::{cds::CdsTime, TimeWriter};
pub mod config;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum DeviceMode {
Off = 0,
On = 1,
Normal = 2,
}
pub struct TimeStampHelper {
stamper: CdsTime,
time_stamp: [u8; 7],
}
impl TimeStampHelper {
pub fn stamp(&self) -> &[u8] {
&self.time_stamp
}
pub fn update_from_now(&mut self) {
self.stamper
.update_from_now()
.expect("Updating timestamp failed");
self.stamper
.write_to_bytes(&mut self.time_stamp)
.expect("Writing timestamp failed");
}
}
impl Default for TimeStampHelper {
fn default() -> Self {
Self {
stamper: CdsTime::now_with_u16_days().expect("creating time stamper failed"),
time_stamp: Default::default(),
}
}
}

View File

@ -1,227 +1,223 @@
mod acs;
mod ccsds;
mod events;
mod hk;
mod interface;
mod logger;
mod pus;
mod requests;
mod tcp;
mod tm_funnel;
mod tmtc;
mod udp;
use crate::events::EventHandler;
use crate::interface::udp::DynamicUdpTmHandler;
use crate::pus::stack::PusStack;
use crate::tm_funnel::{TmFunnelDynamic, TmFunnelStatic};
use crate::tmtc::tc_source::{TcSourceTaskDynamic, TcSourceTaskStatic};
use crate::tmtc::tm_sink::{TmSinkDynamic, TmSinkStatic};
use log::info;
use pus::test::create_test_service_dynamic;
use satrs::hal::std::tcp_server::ServerConfig;
use satrs::hal::std::udp_server::UdpTcServer;
use satrs::request::TargetAndApidId;
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::request::GenericMessage;
use satrs::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools};
use satrs_example::config::tasks::{
FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC,
};
use satrs_example::config::{RequestTargetId, TmSenderId, OBSW_SERVER_ADDR, PUS_APID, SERVER_PORT};
use tmtc::PusTcSourceProviderDynamic;
use udp::DynamicUdpTmHandler;
use satrs_example::config::{OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT};
use crate::acs::AcsTask;
use crate::ccsds::CcsdsReceiver;
use crate::acs::mgm::{MgmHandlerLis3Mdl, MpscModeLeafInterface, SpiDummyInterface};
use crate::interface::tcp::{SyncTcpTmSource, TcpTask};
use crate::interface::udp::{StaticUdpTmHandler, UdpTmtcServer};
use crate::logger::setup_logger;
use crate::pus::action::{create_action_service_dynamic, create_action_service_static};
use crate::pus::event::{create_event_service_dynamic, create_event_service_static};
use crate::pus::hk::{create_hk_service_dynamic, create_hk_service_static};
use crate::pus::mode::{create_mode_service_dynamic, create_mode_service_static};
use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_service_static};
use crate::pus::test::create_test_service_static;
use crate::pus::{PusReceiver, PusTcMpscRouter};
use crate::requests::{GenericRequestRouter, RequestWithToken};
use crate::tcp::{SyncTcpTmSource, TcpTask};
use crate::tmtc::{
PusTcSourceProviderSharedPool, SharedTcPool, TcSourceTaskDynamic, TcSourceTaskStatic,
};
use crate::udp::{StaticUdpTmHandler, UdpTmtcServer};
use crate::pus::{PusTcDistributor, PusTcMpscRouter};
use crate::requests::{CompositeRequest, GenericRequestRouter};
use satrs::mode::ModeRequest;
use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender};
use satrs::pus::{EcssTmSender, MpscTmAsVecSender, MpscTmInSharedPoolSender};
use satrs::spacepackets::{time::cds::TimeProvider, time::TimeWriter};
use satrs::tmtc::CcsdsDistributor;
use satrs::ChannelId;
use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter};
use satrs_example::config::components::{MGM_HANDLER_0, TCP_SERVER, UDP_SERVER};
use std::net::{IpAddr, SocketAddr};
use std::sync::mpsc::{self, channel};
use std::sync::mpsc;
use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Duration;
fn create_verification_reporter(verif_sender: impl EcssTmSender) -> VerificationReporterWithSender {
let verif_cfg = VerificationReporterCfg::new(PUS_APID, 1, 2, 8).unwrap();
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
VerificationReporterWithSender::new(&verif_cfg, Box::new(verif_sender))
}
#[allow(dead_code)]
fn static_tmtc_pool_main() {
let (tm_pool, tc_pool) = create_static_pools();
let shared_tm_pool = SharedTmPool::new(tm_pool);
let shared_tc_pool = SharedTcPool {
pool: Arc::new(RwLock::new(tc_pool)),
};
let (tc_source_tx, tc_source_rx) = channel();
let (tm_funnel_tx, tm_funnel_rx) = channel();
let (tm_server_tx, tm_server_rx) = channel();
let shared_tm_pool = Arc::new(RwLock::new(tm_pool));
let shared_tc_pool = Arc::new(RwLock::new(tc_pool));
let shared_tm_pool_wrapper = SharedPacketPool::new(&shared_tm_pool);
let shared_tc_pool_wrapper = SharedPacketPool::new(&shared_tc_pool);
let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50);
let (tm_sink_tx, tm_sink_rx) = mpsc::sync_channel(50);
let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50);
// Every software component which needs to generate verification telemetry, receives a cloned
// verification reporter.
let verif_reporter = create_verification_reporter(MpscTmInSharedPoolSender::new(
TmSenderId::PusVerification as ChannelId,
"verif_sender",
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
));
let tm_sink_tx_sender =
PacketSenderWithSharedPool::new(tm_sink_tx.clone(), shared_tm_pool_wrapper.clone());
let (mgm_handler_composite_tx, mgm_handler_composite_rx) =
mpsc::channel::<GenericMessage<CompositeRequest>>();
let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::<GenericMessage<ModeRequest>>();
let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32);
let (acs_thread_tx, acs_thread_rx) = channel::<RequestWithToken>();
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let mut request_map = GenericRequestRouter::default();
request_map.0.insert(acs_target_id.into(), acs_thread_tx);
request_map
.composite_router_map
.insert(MGM_HANDLER_0.id(), mgm_handler_composite_tx);
request_map
.mode_router_map
.insert(MGM_HANDLER_0.id(), mgm_handler_mode_tx);
// This helper structure is used by all telecommand providers which need to send telecommands
// to the TC source.
let tc_source = PusTcSourceProviderSharedPool {
shared_pool: shared_tc_pool.clone(),
tc_source: tc_source_tx,
};
let tc_source = PacketSenderWithSharedPool::new(tc_source_tx, shared_tc_pool_wrapper.clone());
// Create event handling components
// These sender handles are used to send event requests, for example to enable or disable
// certain events.
let (event_tx, event_rx) = mpsc::sync_channel(100);
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
// The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation.
let mut event_handler = EventHandler::new(
MpscTmInSharedPoolSender::new(
TmSenderId::AllEvents as ChannelId,
"ALL_EVENTS_TX",
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
),
verif_reporter.clone(),
event_request_rx,
);
let mut event_handler = EventHandler::new(tm_sink_tx.clone(), event_rx, event_request_rx);
let (pus_test_tx, pus_test_rx) = mpsc::channel();
let (pus_event_tx, pus_event_rx) = mpsc::channel();
let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
let (pus_action_tx, pus_action_rx) = mpsc::channel();
let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
let (pus_test_tx, pus_test_rx) = channel();
let (pus_event_tx, pus_event_rx) = channel();
let (pus_sched_tx, pus_sched_rx) = channel();
let (pus_hk_tx, pus_hk_rx) = channel();
let (pus_action_tx, pus_action_rx) = channel();
let pus_router = PusTcMpscRouter {
test_service_receiver: pus_test_tx,
event_service_receiver: pus_event_tx,
sched_service_receiver: pus_sched_tx,
hk_service_receiver: pus_hk_tx,
action_service_receiver: pus_action_tx,
test_tc_sender: pus_test_tx,
event_tc_sender: pus_event_tx,
sched_tc_sender: pus_sched_tx,
hk_tc_sender: pus_hk_tx,
action_tc_sender: pus_action_tx,
mode_tc_sender: pus_mode_tx,
};
let pus_test_service = create_test_service_static(
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
shared_tc_pool.pool.clone(),
event_handler.clone_event_sender(),
tm_sink_tx_sender.clone(),
shared_tc_pool.clone(),
event_tx.clone(),
pus_test_rx,
);
let pus_scheduler_service = create_scheduler_service_static(
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
tm_sink_tx_sender.clone(),
tc_source.clone(),
pus_sched_rx,
create_sched_tc_pool(),
);
let pus_event_service = create_event_service_static(
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
shared_tc_pool.pool.clone(),
tm_sink_tx_sender.clone(),
shared_tc_pool.clone(),
pus_event_rx,
event_request_tx,
);
let pus_action_service = create_action_service_static(
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
shared_tc_pool.pool.clone(),
tm_sink_tx_sender.clone(),
shared_tc_pool.clone(),
pus_action_rx,
request_map.clone(),
pus_action_reply_rx,
);
let pus_hk_service = create_hk_service_static(
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
shared_tc_pool.pool.clone(),
tm_sink_tx_sender.clone(),
shared_tc_pool.clone(),
pus_hk_rx,
request_map.clone(),
pus_hk_reply_rx,
);
let pus_mode_service = create_mode_service_static(
tm_sink_tx_sender.clone(),
shared_tc_pool.clone(),
pus_mode_rx,
request_map,
pus_mode_reply_rx,
);
let mut pus_stack = PusStack::new(
pus_test_service,
pus_hk_service,
pus_event_service,
pus_action_service,
pus_scheduler_service,
pus_test_service,
pus_mode_service,
);
let ccsds_receiver = CcsdsReceiver { tc_source };
let mut tmtc_task = TcSourceTaskStatic::new(
shared_tc_pool.clone(),
shared_tc_pool_wrapper.clone(),
tc_source_rx,
PusReceiver::new(verif_reporter.clone(), pus_router),
PusTcDistributor::new(tm_sink_tx_sender, pus_router),
);
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver.clone()));
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source.clone())
.expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server,
tm_handler: StaticUdpTmHandler {
tm_rx: tm_server_rx,
tm_store: shared_tm_pool.clone_backing_pool(),
tm_store: shared_tm_pool.clone(),
},
};
let tcp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver));
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
let tcp_server_cfg = ServerConfig::new(
TCP_SERVER.id(),
sock_addr,
Duration::from_millis(400),
4096,
8192,
);
let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new(
tcp_server_cfg,
sync_tm_tcp_source.clone(),
tcp_ccsds_distributor,
tc_source.clone(),
PACKET_ID_VALIDATOR.clone(),
)
.expect("tcp server creation failed");
let mut acs_task = AcsTask::new(
MpscTmInSharedPoolSender::new(
TmSenderId::AcsSubsystem as ChannelId,
"ACS_TASK_SENDER",
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
),
acs_thread_rx,
verif_reporter,
let mut tm_sink = TmSinkStatic::new(
shared_tm_pool_wrapper,
sync_tm_tcp_source,
tm_sink_rx,
tm_server_tx,
);
let mut tm_funnel = TmFunnelStatic::new(
shared_tm_pool,
sync_tm_tcp_source,
tm_funnel_rx,
tm_server_tx,
let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) =
mpsc::channel();
let dummy_spi_interface = SpiDummyInterface::default();
let shared_mgm_set = Arc::default();
let mode_leaf_interface = MpscModeLeafInterface {
request_rx: mgm_handler_mode_rx,
reply_tx_to_pus: pus_mode_reply_tx,
reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx,
};
let mut mgm_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_0,
"MGM_0",
mode_leaf_interface,
mgm_handler_composite_rx,
pus_hk_reply_tx,
tm_sink_tx,
dummy_spi_interface,
shared_mgm_set,
);
info!("Starting TMTC and UDP task");
let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string())
.name("SATRS tmtc-udp".to_string())
.spawn(move || {
info!("Running UDP server on port {SERVER_PORT}");
loop {
@ -234,7 +230,7 @@ fn static_tmtc_pool_main() {
info!("Starting TCP task");
let jh_tcp = thread::Builder::new()
.name("TCP".to_string())
.name("sat-rs tcp".to_string())
.spawn(move || {
info!("Running TCP server on port {SERVER_PORT}");
loop {
@ -245,15 +241,15 @@ fn static_tmtc_pool_main() {
info!("Starting TM funnel task");
let jh_tm_funnel = thread::Builder::new()
.name("TM Funnel".to_string())
.name("tm sink".to_string())
.spawn(move || loop {
tm_funnel.operation();
tm_sink.operation();
})
.unwrap();
info!("Starting event handling task");
let jh_event_handling = thread::Builder::new()
.name("Event".to_string())
.name("sat-rs events".to_string())
.spawn(move || loop {
event_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING));
@ -262,16 +258,16 @@ fn static_tmtc_pool_main() {
info!("Starting AOCS thread");
let jh_aocs = thread::Builder::new()
.name("AOCS".to_string())
.name("sat-rs aocs".to_string())
.spawn(move || loop {
acs_task.periodic_operation();
mgm_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
})
.unwrap();
info!("Starting PUS handler thread");
let jh_pus_handler = thread::Builder::new()
.name("PUS".to_string())
.name("sat-rs pus".to_string())
.spawn(move || loop {
pus_stack.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));
@ -298,104 +294,98 @@ fn static_tmtc_pool_main() {
#[allow(dead_code)]
fn dyn_tmtc_pool_main() {
let (tc_source_tx, tc_source_rx) = channel();
let (tm_funnel_tx, tm_funnel_rx) = channel();
let (tm_server_tx, tm_server_rx) = channel();
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
let verif_reporter = create_verification_reporter(MpscTmAsVecSender::new(
TmSenderId::PusVerification as ChannelId,
"verif_sender",
tm_funnel_tx.clone(),
));
let (tc_source_tx, tc_source_rx) = mpsc::channel();
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (tm_server_tx, tm_server_rx) = mpsc::channel();
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let (mgm_handler_composite_tx, mgm_handler_composite_rx) =
mpsc::channel::<GenericMessage<CompositeRequest>>();
let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::<GenericMessage<ModeRequest>>();
let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32);
let (acs_thread_tx, acs_thread_rx) = channel::<RequestWithToken>();
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let mut request_map = GenericRequestRouter::default();
request_map.0.insert(acs_target_id.into(), acs_thread_tx);
let tc_source = PusTcSourceProviderDynamic(tc_source_tx);
request_map
.composite_router_map
.insert(MGM_HANDLER_0.raw(), mgm_handler_composite_tx);
request_map
.mode_router_map
.insert(MGM_HANDLER_0.raw(), mgm_handler_mode_tx);
// Create event handling components
// These sender handles are used to send event requests, for example to enable or disable
// certain events.
let (event_tx, event_rx) = mpsc::sync_channel(100);
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
// The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation.
let mut event_handler = EventHandler::new(
MpscTmAsVecSender::new(
TmSenderId::AllEvents as ChannelId,
"ALL_EVENTS_TX",
tm_funnel_tx.clone(),
),
verif_reporter.clone(),
event_request_rx,
);
let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_rx, event_request_rx);
let (pus_test_tx, pus_test_rx) = mpsc::channel();
let (pus_event_tx, pus_event_rx) = mpsc::channel();
let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
let (pus_action_tx, pus_action_rx) = mpsc::channel();
let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
let (pus_test_tx, pus_test_rx) = channel();
let (pus_event_tx, pus_event_rx) = channel();
let (pus_sched_tx, pus_sched_rx) = channel();
let (pus_hk_tx, pus_hk_rx) = channel();
let (pus_action_tx, pus_action_rx) = channel();
let pus_router = PusTcMpscRouter {
test_service_receiver: pus_test_tx,
event_service_receiver: pus_event_tx,
sched_service_receiver: pus_sched_tx,
hk_service_receiver: pus_hk_tx,
action_service_receiver: pus_action_tx,
test_tc_sender: pus_test_tx,
event_tc_sender: pus_event_tx,
sched_tc_sender: pus_sched_tx,
hk_tc_sender: pus_hk_tx,
action_tc_sender: pus_action_tx,
mode_tc_sender: pus_mode_tx,
};
let pus_test_service = create_test_service_dynamic(
tm_funnel_tx.clone(),
verif_reporter.clone(),
event_handler.clone_event_sender(),
pus_test_rx,
);
let pus_test_service =
create_test_service_dynamic(tm_funnel_tx.clone(), event_tx.clone(), pus_test_rx);
let pus_scheduler_service = create_scheduler_service_dynamic(
tm_funnel_tx.clone(),
verif_reporter.clone(),
tc_source.0.clone(),
tc_source_tx.clone(),
pus_sched_rx,
create_sched_tc_pool(),
);
let pus_event_service = create_event_service_dynamic(
tm_funnel_tx.clone(),
verif_reporter.clone(),
pus_event_rx,
event_request_tx,
);
let pus_event_service =
create_event_service_dynamic(tm_funnel_tx.clone(), pus_event_rx, event_request_tx);
let pus_action_service = create_action_service_dynamic(
tm_funnel_tx.clone(),
verif_reporter.clone(),
pus_action_rx,
request_map.clone(),
pus_action_reply_rx,
);
let pus_hk_service = create_hk_service_dynamic(
tm_funnel_tx.clone(),
verif_reporter.clone(),
pus_hk_rx,
request_map.clone(),
pus_hk_reply_rx,
);
let pus_mode_service = create_mode_service_dynamic(
tm_funnel_tx.clone(),
pus_mode_rx,
request_map,
pus_mode_reply_rx,
);
let mut pus_stack = PusStack::new(
pus_test_service,
pus_hk_service,
pus_event_service,
pus_action_service,
pus_scheduler_service,
pus_test_service,
pus_mode_service,
);
let ccsds_receiver = CcsdsReceiver { tc_source };
let mut tmtc_task = TcSourceTaskDynamic::new(
tc_source_rx,
PusReceiver::new(verif_reporter.clone(), pus_router),
PusTcDistributor::new(tm_funnel_tx.clone(), pus_router),
);
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver.clone()));
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source_tx.clone())
.expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server,
@ -404,30 +394,47 @@ fn dyn_tmtc_pool_main() {
},
};
let tcp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver));
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
let tcp_server_cfg = ServerConfig::new(
TCP_SERVER.id(),
sock_addr,
Duration::from_millis(400),
4096,
8192,
);
let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new(
tcp_server_cfg,
sync_tm_tcp_source.clone(),
tcp_ccsds_distributor,
tc_source_tx.clone(),
PACKET_ID_VALIDATOR.clone(),
)
.expect("tcp server creation failed");
let mut acs_task = AcsTask::new(
MpscTmAsVecSender::new(
TmSenderId::AcsSubsystem as ChannelId,
"ACS_TASK_SENDER",
tm_funnel_tx.clone(),
),
acs_thread_rx,
verif_reporter,
let mut tm_funnel = TmSinkDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx);
let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) =
mpsc::channel();
let dummy_spi_interface = SpiDummyInterface::default();
let shared_mgm_set = Arc::default();
let mode_leaf_interface = MpscModeLeafInterface {
request_rx: mgm_handler_mode_rx,
reply_tx_to_pus: pus_mode_reply_tx,
reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx,
};
let mut mgm_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_0,
"MGM_0",
mode_leaf_interface,
mgm_handler_composite_rx,
pus_hk_reply_tx,
tm_funnel_tx,
dummy_spi_interface,
shared_mgm_set,
);
let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx);
info!("Starting TMTC and UDP task");
let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string())
.name("sat-rs tmtc-udp".to_string())
.spawn(move || {
info!("Running UDP server on port {SERVER_PORT}");
loop {
@ -440,7 +447,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting TCP task");
let jh_tcp = thread::Builder::new()
.name("TCP".to_string())
.name("sat-rs tcp".to_string())
.spawn(move || {
info!("Running TCP server on port {SERVER_PORT}");
loop {
@ -451,7 +458,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting TM funnel task");
let jh_tm_funnel = thread::Builder::new()
.name("TM Funnel".to_string())
.name("sat-rs tm-sink".to_string())
.spawn(move || loop {
tm_funnel.operation();
})
@ -459,7 +466,7 @@ fn dyn_tmtc_pool_main() {
info!("Starting event handling task");
let jh_event_handling = thread::Builder::new()
.name("Event".to_string())
.name("sat-rs events".to_string())
.spawn(move || loop {
event_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING));
@ -468,16 +475,16 @@ fn dyn_tmtc_pool_main() {
info!("Starting AOCS thread");
let jh_aocs = thread::Builder::new()
.name("AOCS".to_string())
.name("sat-rs aocs".to_string())
.spawn(move || loop {
acs_task.periodic_operation();
mgm_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
})
.unwrap();
info!("Starting PUS handler thread");
let jh_pus_handler = thread::Builder::new()
.name("PUS".to_string())
.name("sat-rs pus".to_string())
.spawn(move || loop {
pus_stack.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));
@ -511,7 +518,7 @@ fn main() {
dyn_tmtc_pool_main();
}
pub fn update_time(time_provider: &mut TimeProvider, timestamp: &mut [u8]) {
pub fn update_time(time_provider: &mut CdsTime, timestamp: &mut [u8]) {
time_provider
.update_from_now()
.expect("Could not get current time");

View File

@ -1,174 +1,747 @@
use log::{error, warn};
use satrs::action::ActionRequest;
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
use satrs::pus::action::{PusActionToRequestConverter, PusService8ActionHandler};
use log::warn;
use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::action::{
ActionReplyPus, ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap,
};
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReporterWithSender, VerificationReportingProvider,
VerificationToken,
handle_completion_failure_with_generic_params, handle_step_failure_with_generic_params,
FailParamHelper, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceHelper,
ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver,
MpscTmAsVecSender, PusPacketHandlingError, PusReplyHandler, PusServiceHelper,
PusTcToRequestConverter,
};
use satrs::request::TargetAndApidId;
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::{ChannelId, TargetId};
use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID};
use std::sync::mpsc::{self};
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_ACTION_SERVICE;
use satrs_example::config::tmtc_err;
use std::sync::mpsc;
use std::time::Duration;
use crate::requests::GenericRequestRouter;
use super::GenericRoutingErrorHandler;
use super::{
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
PusTargetedRequestService, TargetedPusService,
};
pub struct ActionReplyHandler {
fail_data_buf: [u8; 128],
}
impl Default for ActionReplyHandler {
fn default() -> Self {
Self {
fail_data_buf: [0; 128],
}
}
}
impl PusReplyHandler<ActivePusActionRequestStd, ActionReplyPus> for ActionReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<ActionReplyPus>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
warn!("received unexpected reply for service 8: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<ActionReplyPus>,
active_request: &ActivePusActionRequestStd,
tm_sender: &(impl EcssTmSender + ?Sized),
verification_handler: &impl VerificationReportingProvider,
timestamp: &[u8],
) -> Result<bool, Self::Error> {
let verif_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
let remove_entry = match &reply.message.variant {
ActionReplyVariant::CompletionFailed { error_code, params } => {
let error_propagated = handle_completion_failure_with_generic_params(
tm_sender,
verif_token,
verification_handler,
FailParamHelper {
error_code,
params: params.as_ref(),
timestamp,
small_data_buf: &mut self.fail_data_buf,
},
)?;
if !error_propagated {
log::warn!(
"error params for completion failure were not propated: {:?}",
params.as_ref()
);
}
true
}
ActionReplyVariant::StepFailed {
error_code,
step,
params,
} => {
let error_propagated = handle_step_failure_with_generic_params(
tm_sender,
verif_token,
verification_handler,
FailParamHelper {
error_code,
params: params.as_ref(),
timestamp,
small_data_buf: &mut self.fail_data_buf,
},
&EcssEnumU16::new(*step),
)?;
if !error_propagated {
log::warn!(
"error params for completion failure were not propated: {:?}",
params.as_ref()
);
}
true
}
ActionReplyVariant::Completed => {
verification_handler.completion_success(tm_sender, verif_token, timestamp)?;
true
}
ActionReplyVariant::StepSuccess { step } => {
verification_handler.step_success(
tm_sender,
&verif_token,
timestamp,
EcssEnumU16::new(*step),
)?;
false
}
_ => false,
};
Ok(remove_entry)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusActionRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"action",
)
}
}
#[derive(Default)]
pub struct ExampleActionRequestConverter {}
pub struct ActionRequestConverter {}
impl PusActionToRequestConverter for ExampleActionRequestConverter {
type Error = PusPacketHandlingError;
impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for ActionRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
time_stamp: &[u8],
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
) -> Result<(TargetId, ActionRequest), Self::Error> {
time_stamp: &[u8],
) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> {
let subservice = tc.subservice();
let user_data = tc.user_data();
if user_data.len() < 8 {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
)
.expect("Sending start failure failed");
return Err(PusPacketHandlingError::NotEnoughAppData {
return Err(GenericConversionError::NotEnoughAppData {
expected: 8,
found: user_data.len(),
});
}
let target_id = TargetAndApidId::from_pus_tc(tc).unwrap();
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap());
if subservice == 128 {
let req_variant = if user_data.len() == 8 {
ActionRequestVariant::NoData
} else {
ActionRequestVariant::VecData(user_data[8..].to_vec())
};
Ok((
target_id.raw(),
ActionRequest::UnsignedIdAndVecData {
ActivePusActionRequestStd::new(
action_id,
data: user_data[8..].to_vec(),
},
target_id_and_apid.into(),
token.into(),
Duration::from_secs(30),
),
ActionRequest::new(action_id, req_variant),
))
} else {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
)
.expect("Sending start failure failed");
Err(PusPacketHandlingError::InvalidSubservice(subservice))
Err(GenericConversionError::InvalidSubservice(subservice))
}
}
}
pub fn create_action_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter,
) -> Pus8Wrapper<EcssTcInSharedStoreConverter> {
let action_srv_tm_sender = MpscTmInSharedPoolSender::new(
TmSenderId::PusAction as ChannelId,
"PUS_8_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let action_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusAction as ChannelId,
"PUS_8_TC_RECV",
pus_action_rx,
);
let pus_8_handler = PusService8ActionHandler::new(
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
Box::new(action_srv_receiver),
Box::new(action_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_ACTION_SERVICE.id(),
pus_action_rx,
tm_sender,
create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
),
ExampleActionRequestConverter::default(),
ActionRequestConverter::default(),
// TODO: Implementation which does not use run-time allocation? Maybe something like
// a bounded wrapper which pre-allocates using [HashMap::with_capacity]..
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
action_router,
GenericRoutingErrorHandler::<8>::default(),
reply_receiver,
);
Pus8Wrapper { pus_8_handler }
ActionServiceWrapper {
service: action_request_handler,
}
}
pub fn create_action_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter,
) -> Pus8Wrapper<EcssTcInVecConverter> {
let action_srv_tm_sender = MpscTmAsVecSender::new(
TmSenderId::PusAction as ChannelId,
"PUS_8_TM_SENDER",
tm_funnel_tx.clone(),
);
let action_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusAction as ChannelId,
"PUS_8_TC_RECV",
pus_action_rx,
);
let pus_8_handler = PusService8ActionHandler::new(
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
Box::new(action_srv_receiver),
Box::new(action_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_ACTION_SERVICE.id(),
pus_action_rx,
tm_funnel_tx,
create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
EcssTcInVecConverter::default(),
),
ExampleActionRequestConverter::default(),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
action_router,
GenericRoutingErrorHandler::<8>::default(),
reply_receiver,
);
Pus8Wrapper { pus_8_handler }
ActionServiceWrapper {
service: action_request_handler,
}
}
pub struct Pus8Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub(crate) pus_8_handler: PusService8ActionHandler<
pub struct ActionServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
pub(crate) service: PusTargetedRequestService<
MpscTcReceiver,
TmSender,
TcInMemConverter,
VerificationReporterWithSender,
ExampleActionRequestConverter,
GenericRequestRouter,
GenericRoutingErrorHandler<8>,
VerificationReporter,
ActionRequestConverter,
ActionReplyHandler,
DefaultActiveActionRequestMap,
ActivePusActionRequestStd,
ActionRequest,
ActionReplyPus,
>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Pus8Wrapper<TcInMemConverter> {
pub fn handle_next_packet(&mut self) -> bool {
match self.pus_8_handler.handle_one_tc() {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 8 partial packet handling success: {e:?}")
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for ActionServiceWrapper<TmSender, TcInMemConverter>
{
const SERVICE_ID: u8 = PusServiceId::Action as u8;
const SERVICE_STR: &'static str = "action";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 8 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 8 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => {
return true;
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{
TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::pus::verification;
use satrs::pus::verification::test_util::TestVerificationReporter;
use satrs::request::MessageMetadata;
use satrs::ComponentId;
use satrs::{
res_code::ResultU16,
spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
WritablePusPacket,
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
SpHeader,
},
};
use crate::{
pus::tests::{PusConverterTestbench, ReplyHandlerTestbench, TargetedPusRequestTestbench},
requests::CompositeRequest,
};
use super::*;
impl
TargetedPusRequestTestbench<
ActionRequestConverter,
ActionReplyHandler,
DefaultActiveActionRequestMap,
ActivePusActionRequestStd,
ActionRequest,
ActionReplyPus,
>
{
pub fn new_for_action(owner_id: ComponentId, target_id: ComponentId) -> Self {
let _ = env_logger::builder().is_test(true).try_init();
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (pus_action_tx, pus_action_rx) = mpsc::channel();
let (action_reply_tx, action_reply_rx) = mpsc::channel();
let (action_req_tx, action_req_rx) = mpsc::channel();
let verif_reporter = TestVerificationReporter::new(owner_id);
let mut generic_req_router = GenericRequestRouter::default();
generic_req_router
.composite_router_map
.insert(target_id, action_req_tx);
Self {
service: PusTargetedRequestService::new(
PusServiceHelper::new(
owner_id,
pus_action_rx,
tm_funnel_tx.clone(),
verif_reporter,
EcssTcInVecConverter::default(),
),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
generic_req_router,
action_reply_rx,
),
request_id: None,
pus_packet_tx: pus_action_tx,
tm_funnel_rx,
reply_tx: action_reply_tx,
request_rx: action_req_rx,
}
}
false
pub fn verify_packet_started(&self) {
self.service
.service_helper
.common
.verif_reporter
.check_next_is_started_success(
self.service.service_helper.id(),
self.request_id.expect("request ID not set").into(),
);
}
pub fn verify_packet_completed(&self) {
self.service
.service_helper
.common
.verif_reporter
.check_next_is_completion_success(
self.service.service_helper.id(),
self.request_id.expect("request ID not set").into(),
);
}
pub fn verify_tm_empty(&self) {
let packet = self.tm_funnel_rx.try_recv();
if let Err(mpsc::TryRecvError::Empty) = packet {
} else {
let tm = packet.unwrap();
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0;
panic!("unexpected TM packet {unexpected_tm:?}");
}
}
pub fn verify_next_tc_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
panic!("unexpected error {:?}", e);
}
let result = result.unwrap();
match result {
HandlingStatus::HandledOne => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_all_tcs_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
panic!("unexpected error {:?}", e);
}
let result = result.unwrap();
match result {
HandlingStatus::Empty => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::HandledOne);
}
pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::Empty);
}
pub fn add_tc(&mut self, tc: &PusTcCreator) {
self.request_id = Some(verification::RequestId::new(tc).into());
let token = self.service.service_helper.verif_reporter_mut().add_tc(tc);
let accepted_token = self
.service
.service_helper
.verif_reporter()
.acceptance_success(self.service.service_helper.tm_sender(), token, &[0; 7])
.expect("TC acceptance failed");
self.service
.service_helper
.verif_reporter()
.check_next_was_added(accepted_token.request_id());
let id = self.service.service_helper.id();
self.service
.service_helper
.verif_reporter()
.check_next_is_acceptance_success(id, accepted_token.request_id());
self.pus_packet_tx
.send(EcssTcAndToken::new(
PacketAsVec::new(self.service.service_helper.id(), tc.to_vec().unwrap()),
accepted_token,
))
.unwrap();
}
}
#[test]
fn basic_request() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sp_header = SpHeader::new_from_apid(TEST_APID);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
testbench.verify_next_tc_is_handled_properly(&time_stamp);
testbench.verify_all_tcs_handled(&time_stamp);
testbench.verify_packet_started();
let possible_req = testbench.request_rx.try_recv();
assert!(possible_req.is_ok());
let req = possible_req.unwrap();
if let CompositeRequest::Action(action_req) = req.message {
assert_eq!(action_req.action_id, action_id);
assert_eq!(action_req.variant, ActionRequestVariant::NoData);
let action_reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
testbench
.reply_tx
.send(GenericMessage::new(req.requestor_info, action_reply))
.unwrap();
} else {
panic!("unexpected request type");
}
testbench.verify_next_reply_is_handled_properly(&time_stamp);
testbench.verify_all_replies_handled(&time_stamp);
testbench.verify_packet_completed();
testbench.verify_tm_empty();
}
#[test]
fn basic_request_routing_error() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&0_u32.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
let result = testbench.service.poll_and_handle_next_tc(&time_stamp);
assert!(result.is_err());
// Verify the correct result and completion failure.
}
#[test]
fn converter_action_req_no_data() {
let mut testbench = PusConverterTestbench::new(
TEST_COMPONENT_ID_0.raw(),
ActionRequestConverter::default(),
);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok());
let (active_req, request) = result.unwrap();
if let ActionRequestVariant::NoData = request.variant {
assert_eq!(request.action_id, action_id);
assert_eq!(active_req.action_id, action_id);
assert_eq!(
active_req.target_id(),
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0).raw()
);
assert_eq!(
active_req.token().request_id(),
testbench.request_id().unwrap()
);
} else {
panic!("unexpected action request variant");
}
}
#[test]
fn converter_action_req_with_data() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ActionRequestConverter::default());
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 16] = [0; 16];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
for i in 0..8 {
app_data[i + 8] = i as u8;
}
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok());
let (active_req, request) = result.unwrap();
if let ActionRequestVariant::VecData(vec) = request.variant {
assert_eq!(request.action_id, action_id);
assert_eq!(active_req.action_id, action_id);
assert_eq!(vec, app_data[8..].to_vec());
} else {
panic!("unexpected action request variant");
}
}
#[test]
fn reply_handling_completion_success() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.assert_full_completion_success(
TEST_COMPONENT_ID_0.id(),
req_id,
None,
);
}
#[test]
fn reply_handling_completion_failure() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3);
let reply = ActionReplyPus::new(
action_id,
ActionReplyVariant::CompletionFailed {
error_code,
params: None,
},
);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.into(),
req_id,
None,
error_code.raw() as u64,
);
}
#[test]
fn reply_handling_step_success() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = ActionReplyPus::new(action_id, ActionReplyVariant::StepSuccess { step: 1 });
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
// Entry should not be removed, completion not done yet.
assert!(!result.unwrap());
testbench.verif_reporter.check_next_was_added(req_id);
testbench
.verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench
.verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench
.verif_reporter
.check_next_is_step_success(TEST_COMPONENT_ID_0.raw(), req_id, 1);
}
#[test]
fn reply_handling_step_failure() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3);
let reply = ActionReplyPus::new(
action_id,
ActionReplyVariant::StepFailed {
error_code,
step: 1,
params: None,
},
);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.check_next_was_added(req_id);
testbench
.verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench
.verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench.verif_reporter.check_next_is_step_failure(
TEST_COMPONENT_ID_0.id(),
req_id,
error_code.raw().into(),
);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_reply = ActionReplyPus::new(5_u32, ActionReplyVariant::Completed);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(
&ActivePusActionRequestStd::new_from_common_req(action_id, active_request),
&[],
);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

View File

@ -1,106 +1,115 @@
use std::sync::mpsc;
use log::{error, warn};
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
use crate::pus::create_verification_reporter;
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusService5EventHandler;
use satrs::pus::verification::VerificationReporterWithSender;
use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusServiceHelper,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter,
EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
};
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::ChannelId;
use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID};
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
use super::{DirectPusService, HandlingStatus};
pub fn create_event_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> Pus5Wrapper<EcssTcInSharedStoreConverter> {
let event_srv_tm_sender = MpscTmInSharedPoolSender::new(
TmSenderId::PusEvent as ChannelId,
"PUS_5_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let event_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusEvent as ChannelId,
"PUS_5_TC_RECV",
pus_event_rx,
);
let pus_5_handler = PusService5EventHandler::new(
) -> EventServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus_5_handler = PusEventServiceHandler::new(
PusServiceHelper::new(
Box::new(event_srv_receiver),
Box::new(event_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_EVENT_MANAGEMENT.id(),
pus_event_rx,
tm_sender,
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
),
event_request_tx,
);
Pus5Wrapper { pus_5_handler }
EventServiceWrapper {
handler: pus_5_handler,
}
}
pub fn create_event_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> Pus5Wrapper<EcssTcInVecConverter> {
let event_srv_tm_sender = MpscTmAsVecSender::new(
TmSenderId::PusEvent as ChannelId,
"PUS_5_TM_SENDER",
tm_funnel_tx,
);
let event_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusEvent as ChannelId,
"PUS_5_TC_RECV",
pus_event_rx,
);
let pus_5_handler = PusService5EventHandler::new(
) -> EventServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let pus_5_handler = PusEventServiceHandler::new(
PusServiceHelper::new(
Box::new(event_srv_receiver),
Box::new(event_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_EVENT_MANAGEMENT.id(),
pus_event_rx,
tm_funnel_tx,
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
EcssTcInVecConverter::default(),
),
event_request_tx,
);
Pus5Wrapper { pus_5_handler }
EventServiceWrapper {
handler: pus_5_handler,
}
}
pub struct Pus5Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub pus_5_handler: PusService5EventHandler<TcInMemConverter, VerificationReporterWithSender>,
pub struct EventServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
pub handler:
PusEventServiceHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Pus5Wrapper<TcInMemConverter> {
pub fn handle_next_packet(&mut self) -> bool {
match self.pus_5_handler.handle_one_tc() {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 5 partial packet handling success: {e:?}")
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
for EventServiceWrapper<TmSender, TcInMemConverter>
{
const SERVICE_ID: u8 = PusServiceId::Event as u8;
const SERVICE_STR: &'static str = "events";
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let result = self
.handler
.poll_and_handle_next_tc(error_handler, time_stamp);
if let Err(e) = result {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 5 invalid subservice {invalid}");
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 5 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => {
return true;
}
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
}
false
HandlingStatus::HandledOne
}
}

View File

@ -1,47 +1,126 @@
use log::{error, warn};
use satrs::hk::{CollectionIntervalFactor, HkRequest};
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
use satrs::pus::hk::{PusHkToRequestConverter, PusService3HkHandler};
use derive_new::new;
use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId};
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReporterWithSender, VerificationReportingProvider,
VerificationToken,
FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceHelper,
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender,
EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender,
PusPacketHandlingError, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
};
use satrs::request::TargetAndApidId;
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket};
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::{ChannelId, TargetId};
use satrs_example::config::{hk_err, tmtc_err, TcReceiverId, TmSenderId, PUS_APID};
use std::sync::mpsc::{self};
use satrs::spacepackets::ecss::{hk, PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_HK_SERVICE;
use satrs_example::config::{hk_err, tmtc_err};
use std::sync::mpsc;
use std::time::Duration;
use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler};
use crate::requests::GenericRequestRouter;
use super::GenericRoutingErrorHandler;
use super::{HandlingStatus, PusTargetedRequestService, TargetedPusService};
#[derive(Clone, PartialEq, Debug, new)]
pub struct HkReply {
pub unique_id: UniqueId,
pub variant: HkReplyVariant,
}
#[derive(Clone, PartialEq, Debug)]
pub enum HkReplyVariant {
Ack,
}
#[derive(Default)]
pub struct ExampleHkRequestConverter {}
pub struct HkReplyHandler {}
impl PusHkToRequestConverter for ExampleHkRequestConverter {
type Error = PusPacketHandlingError;
impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<HkReply>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for service 3: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<HkReply>,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
match reply.message.variant {
HkReplyVariant::Ack => {
verification_handler
.completion_success(tm_sender, started_token, time_stamp)
.expect("sending completion success verification failed");
}
};
Ok(true)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"HK",
)?;
Ok(())
}
}
pub struct HkRequestConverter {
timeout: Duration,
}
impl Default for HkRequestConverter {
fn default() -> Self {
Self {
timeout: Duration::from_secs(60),
}
}
}
impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
time_stamp: &[u8],
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
) -> Result<(TargetId, HkRequest), Self::Error> {
time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> {
let user_data = tc.user_data();
if user_data.is_empty() {
let user_data_len = user_data.len() as u32;
let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(
time_stamp,
@ -50,7 +129,7 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter {
),
)
.expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::NotEnoughAppData {
return Err(GenericConversionError::NotEnoughAppData {
expected: 4,
found: 0,
});
@ -64,46 +143,50 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter {
let user_data_len = user_data.len() as u32;
let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter
.start_failure(token, FailParams::new(time_stamp, err, &user_data_len_raw))
.start_failure(
tm_sender,
token,
FailParams::new(time_stamp, err, &user_data_len_raw),
)
.expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::NotEnoughAppData {
return Err(GenericConversionError::NotEnoughAppData {
expected: 8,
found: 4,
});
}
let subservice = tc.subservice();
let target_id = TargetAndApidId::from_pus_tc(tc).expect("invalid tc format");
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).expect("invalid tc format");
let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap());
let standard_subservice = hk::Subservice::try_from(subservice);
if standard_subservice.is_err() {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]),
)
.expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::InvalidSubservice(subservice));
return Err(GenericConversionError::InvalidSubservice(subservice));
}
Ok((
target_id.into(),
match standard_subservice.unwrap() {
let request = match standard_subservice.unwrap() {
hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => {
HkRequest::Enable(unique_id)
HkRequest::new(unique_id, HkRequestVariant::EnablePeriodic)
}
hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => {
HkRequest::Disable(unique_id)
HkRequest::new(unique_id, HkRequestVariant::DisablePeriodic)
}
hk::Subservice::TcReportHkReportStructures => todo!(),
hk::Subservice::TmHkPacket => todo!(),
hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => {
HkRequest::OneShot(unique_id)
HkRequest::new(unique_id, HkRequestVariant::OneShot)
}
hk::Subservice::TcModifyDiagCollectionInterval
| hk::Subservice::TcModifyHkCollectionInterval => {
if user_data.len() < 12 {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(
time_stamp,
@ -111,21 +194,24 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter {
),
)
.expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::NotEnoughAppData {
return Err(GenericConversionError::NotEnoughAppData {
expected: 12,
found: user_data.len(),
});
}
HkRequest::ModifyCollectionInterval(
HkRequest::new(
unique_id,
HkRequestVariant::ModifyCollectionInterval(
CollectionIntervalFactor::from_be_bytes(
user_data[8..12].try_into().unwrap(),
),
),
)
}
_ => {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(
time_stamp,
@ -134,104 +220,325 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter {
),
)
.expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::InvalidSubservice(subservice));
return Err(GenericConversionError::InvalidSubservice(subservice));
}
},
};
Ok((
ActivePusRequestStd::new(target_id_and_apid.into(), token, self.timeout),
request,
))
}
}
pub fn create_hk_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter,
) -> Pus3Wrapper<EcssTcInSharedStoreConverter> {
let hk_srv_tm_sender = MpscTmInSharedPoolSender::new(
TmSenderId::PusHk as ChannelId,
"PUS_3_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let hk_srv_receiver =
MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
let pus_3_handler = PusService3HkHandler::new(
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
Box::new(hk_srv_receiver),
Box::new(hk_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_HK_SERVICE.id(),
pus_hk_rx,
tm_sender,
create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
),
ExampleHkRequestConverter::default(),
HkRequestConverter::default(),
DefaultActiveRequestMap::default(),
HkReplyHandler::default(),
request_router,
GenericRoutingErrorHandler::default(),
reply_receiver,
);
Pus3Wrapper { pus_3_handler }
HkServiceWrapper {
service: pus_3_handler,
}
}
pub fn create_hk_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter,
) -> Pus3Wrapper<EcssTcInVecConverter> {
let hk_srv_tm_sender = MpscTmAsVecSender::new(
TmSenderId::PusHk as ChannelId,
"PUS_3_TM_SENDER",
tm_funnel_tx.clone(),
);
let hk_srv_receiver =
MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
let pus_3_handler = PusService3HkHandler::new(
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
Box::new(hk_srv_receiver),
Box::new(hk_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_HK_SERVICE.id(),
pus_hk_rx,
tm_funnel_tx,
create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
EcssTcInVecConverter::default(),
),
ExampleHkRequestConverter::default(),
HkRequestConverter::default(),
DefaultActiveRequestMap::default(),
HkReplyHandler::default(),
request_router,
GenericRoutingErrorHandler::default(),
reply_receiver,
);
Pus3Wrapper { pus_3_handler }
HkServiceWrapper {
service: pus_3_handler,
}
}
pub struct Pus3Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub(crate) pus_3_handler: PusService3HkHandler<
pub struct HkServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
pub(crate) service: PusTargetedRequestService<
MpscTcReceiver,
TmSender,
TcInMemConverter,
VerificationReporterWithSender,
ExampleHkRequestConverter,
GenericRequestRouter,
GenericRoutingErrorHandler<3>,
VerificationReporter,
HkRequestConverter,
HkReplyHandler,
DefaultActiveRequestMap<ActivePusRequestStd>,
ActivePusRequestStd,
HkRequest,
HkReply,
>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Pus3Wrapper<TcInMemConverter> {
pub fn handle_next_packet(&mut self) -> bool {
match self.pus_3_handler.handle_one_tc() {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS 3 partial packet handling success: {e:?}")
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for HkServiceWrapper<TmSender, TcInMemConverter>
{
const SERVICE_ID: u8 = PusServiceId::Housekeeping as u8;
const SERVICE_STR: &'static str = "housekeeping";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS 3 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS 3 subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => {
return true;
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{
TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::request::MessageMetadata;
use satrs::{
hk::HkRequestVariant,
pus::test_util::TEST_APID,
request::GenericMessage,
spacepackets::{
ecss::{hk::Subservice, tc::PusTcCreator},
SpHeader,
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
};
use satrs_example::config::tmtc_err;
use crate::pus::{
hk::HkReplyVariant,
tests::{PusConverterTestbench, ReplyHandlerTestbench},
};
use super::{HkReply, HkReplyHandler, HkRequestConverter};
#[test]
fn hk_converter_one_shot_req() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let hk_req = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcGenerateOneShotHk as u8,
&app_data,
true,
);
let accepted_token = hk_bench.add_tc(&hk_req);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::OneShot = req.variant {
} else {
panic!("unexpected HK request")
}
}
false
#[test]
fn hk_converter_enable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::EnablePeriodic = req.variant {
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableHkGeneration as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableDiagGeneration as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_conversion_disable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::DisablePeriodic = req.variant {
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableHkGeneration as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableDiagGeneration as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_conversion_modify_interval() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 12] = [0; 12];
let collection_interval_factor = 5_u32;
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
app_data[8..12].copy_from_slice(&collection_interval_factor.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant {
assert_eq!(interval_factor, collection_interval_factor);
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyHkCollectionInterval as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyDiagCollectionInterval as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_reply_handler() {
let mut reply_testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), HkReplyHandler::default());
let sender_id = 2_u64;
let apid_target_id = 3_u32;
let unique_id = 5_u32;
let (req_id, active_req) = reply_testbench.add_tc(TEST_APID, apid_target_id, &[]);
let reply = GenericMessage::new(
MessageMetadata::new(req_id.into(), sender_id),
HkReply::new(unique_id, HkReplyVariant::Ack),
);
let result = reply_testbench.handle_reply(&reply, &active_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
reply_testbench
.verif_reporter
.assert_full_completion_success(TEST_COMPONENT_ID_0.raw(), req_id, None);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_1, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_1.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

View File

@ -1,122 +1,149 @@
use crate::tmtc::MpscStoreAndSendError;
use crate::requests::GenericRequestRouter;
use log::warn;
use satrs::pool::PoolAddr;
use satrs::pus::verification::{
FailParams, StdVerifReporterWithSender, VerificationReportingProvider,
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReporterCfg, VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
EcssTcAndToken, GenericRoutingError, PusPacketHandlerResult, PusRoutingErrorHandler, TcInMemory,
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter,
EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
HandlingStatus, PusPacketHandlingError, PusReplyHandler, PusRequestRouter, PusServiceHelper,
PusTcToRequestConverter, TcInMemory,
};
use satrs::queue::{GenericReceiveError, GenericSendError};
use satrs::request::{Apid, GenericMessage, MessageMetadata};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusServiceId;
use satrs::spacepackets::time::cds::TimeProvider;
use satrs::spacepackets::time::TimeWriter;
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketInPool};
use satrs::ComponentId;
use satrs_example::config::components::PUS_ROUTING_SERVICE;
use satrs_example::config::{tmtc_err, CustomPusServiceId};
use std::sync::mpsc::Sender;
use satrs_example::TimeStampHelper;
use std::fmt::Debug;
use std::sync::mpsc::{self, Sender};
pub mod action;
pub mod event;
pub mod hk;
pub mod mode;
pub mod scheduler;
pub mod stack;
pub mod test;
pub struct PusTcMpscRouter {
pub test_service_receiver: Sender<EcssTcAndToken>,
pub event_service_receiver: Sender<EcssTcAndToken>,
pub sched_service_receiver: Sender<EcssTcAndToken>,
pub hk_service_receiver: Sender<EcssTcAndToken>,
pub action_service_receiver: Sender<EcssTcAndToken>,
pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter {
let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap();
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
VerificationReporter::new(owner_id, &verif_cfg)
}
pub struct PusReceiver {
pub verif_reporter: StdVerifReporterWithSender,
/// Simple router structure which forwards PUS telecommands to dedicated handlers.
pub struct PusTcMpscRouter {
pub test_tc_sender: Sender<EcssTcAndToken>,
pub event_tc_sender: Sender<EcssTcAndToken>,
pub sched_tc_sender: Sender<EcssTcAndToken>,
pub hk_tc_sender: Sender<EcssTcAndToken>,
pub action_tc_sender: Sender<EcssTcAndToken>,
pub mode_tc_sender: Sender<EcssTcAndToken>,
}
pub struct PusTcDistributor<TmSender: EcssTmSender> {
pub id: ComponentId,
pub tm_sender: TmSender,
pub verif_reporter: VerificationReporter,
pub pus_router: PusTcMpscRouter,
stamp_helper: TimeStampHelper,
}
struct TimeStampHelper {
stamper: TimeProvider,
time_stamp: [u8; 7],
}
impl TimeStampHelper {
pub fn new() -> Self {
impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self {
Self {
stamper: TimeProvider::new_with_u16_days(0, 0),
time_stamp: [0; 7],
}
}
pub fn stamp(&self) -> &[u8] {
&self.time_stamp
}
pub fn update_from_now(&mut self) {
self.stamper
.update_from_now()
.expect("Updating timestamp failed");
self.stamper
.write_to_bytes(&mut self.time_stamp)
.expect("Writing timestamp failed");
}
}
impl PusReceiver {
pub fn new(verif_reporter: StdVerifReporterWithSender, pus_router: PusTcMpscRouter) -> Self {
Self {
verif_reporter,
id: PUS_ROUTING_SERVICE.raw(),
tm_sender,
verif_reporter: create_verification_reporter(
PUS_ROUTING_SERVICE.id(),
PUS_ROUTING_SERVICE.apid,
),
pus_router,
stamp_helper: TimeStampHelper::new(),
}
stamp_helper: TimeStampHelper::default(),
}
}
impl PusReceiver {
pub fn handle_tc_packet(
pub fn handle_tc_packet_vec(
&mut self,
tc_in_memory: TcInMemory,
service: u8,
pus_tc: &PusTcReader,
) -> Result<PusPacketHandlerResult, MpscStoreAndSendError> {
let init_token = self.verif_reporter.add_tc(pus_tc);
packet_as_vec: PacketAsVec,
) -> Result<HandlingStatus, GenericSendError> {
self.handle_tc_generic(packet_as_vec.sender_id, None, &packet_as_vec.packet)
}
pub fn handle_tc_packet_in_store(
&mut self,
packet_in_pool: PacketInPool,
pus_tc_copy: &[u8],
) -> Result<HandlingStatus, GenericSendError> {
self.handle_tc_generic(
packet_in_pool.sender_id,
Some(packet_in_pool.store_addr),
pus_tc_copy,
)
}
pub fn handle_tc_generic(
&mut self,
sender_id: ComponentId,
addr_opt: Option<PoolAddr>,
raw_tc: &[u8],
) -> Result<HandlingStatus, GenericSendError> {
let pus_tc_result = PusTcReader::new(raw_tc);
if pus_tc_result.is_err() {
log::warn!(
"error creating PUS TC from raw data received from {}: {}",
sender_id,
pus_tc_result.unwrap_err()
);
log::warn!("raw data: {:x?}", raw_tc);
// TODO: Shouldn't this be an error?
return Ok(HandlingStatus::HandledOne);
}
let pus_tc = pus_tc_result.unwrap().0;
let init_token = self.verif_reporter.add_tc(&pus_tc);
self.stamp_helper.update_from_now();
let accepted_token = self
.verif_reporter
.acceptance_success(init_token, self.stamp_helper.stamp())
.acceptance_success(&self.tm_sender, init_token, self.stamp_helper.stamp())
.expect("Acceptance success failure");
let service = PusServiceId::try_from(service);
let service = PusServiceId::try_from(pus_tc.service());
let tc_in_memory: TcInMemory = if let Some(store_addr) = addr_opt {
PacketInPool::new(sender_id, store_addr).into()
} else {
PacketAsVec::new(sender_id, Vec::from(raw_tc)).into()
};
match service {
Ok(standard_service) => match standard_service {
PusServiceId::Test => {
self.pus_router.test_service_receiver.send(EcssTcAndToken {
PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
})?,
PusServiceId::Housekeeping => {
self.pus_router.hk_service_receiver.send(EcssTcAndToken {
self.pus_router.hk_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
PusServiceId::Event => {
self.pus_router
.event_service_receiver
.send(EcssTcAndToken {
PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
})?,
PusServiceId::Scheduling => {
self.pus_router
.sched_service_receiver
.send(EcssTcAndToken {
self.pus_router.sched_tc_sender.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
_ => {
let result = self.verif_reporter.start_failure(
&self.tm_sender,
accepted_token,
FailParams::new(
self.stamp_helper.stamp(),
@ -132,15 +159,20 @@ impl PusReceiver {
Err(e) => {
if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) {
match custom_service {
CustomPusServiceId::Mode => {
// TODO: Fix mode service.
//self.handle_mode_service(pus_tc, accepted_token)
}
CustomPusServiceId::Mode => self
.pus_router
.mode_tc_sender
.send(EcssTcAndToken {
tc_in_memory,
token: Some(accepted_token.into()),
})
.map_err(|_| GenericSendError::RxDisconnected)?,
CustomPusServiceId::Health => {}
}
} else {
self.verif_reporter
.start_failure(
&self.tm_sender,
accepted_token,
FailParams::new(
self.stamp_helper.stamp(),
@ -152,59 +184,600 @@ impl PusReceiver {
}
}
}
Ok(PusPacketHandlerResult::RequestHandled)
Ok(HandlingStatus::HandledOne)
}
}
pub trait TargetedPusService {
const SERVICE_ID: u8;
const SERVICE_STR: &'static str;
fn poll_and_handle_next_tc_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let result = self.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
log::error!(
"PUS service {}({})packet handling error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on error cases.
return HandlingStatus::Empty;
}
result.unwrap()
}
fn poll_and_handle_next_reply_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.poll_and_handle_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!(
"PUS servce {}({}): Handling reply failed with error {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
HandlingStatus::Empty
})
}
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
/// Generic trait for services which handle packets directly. Kept minimal right now because
/// of the difficulty to allow flexible user code for these services..
pub trait DirectPusService {
const SERVICE_ID: u8;
const SERVICE_STR: &'static str;
fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus;
}
/// This is a generic handler class for all PUS services where a PUS telecommand is converted
/// to a targeted request.
///
/// The generic steps for this process are the following
///
/// 1. Poll for TC packets
/// 2. Convert the raw packets to a [PusTcReader].
/// 3. Convert the PUS TC to a typed request using the [PusTcToRequestConverter].
/// 4. Route the requests using the [GenericRequestRouter].
/// 5. Add the request to the active request map using the [ActiveRequestMapProvider] abstraction.
/// 6. Check for replies which complete the forwarded request. The handler takes care of
/// the verification process.
/// 7. Check for timeouts of active requests. Generally, the timeout on the service level should
/// be highest expected timeout for the given target.
///
/// The handler exposes the following API:
///
/// 1. [Self::poll_and_handle_next_tc] which tries to poll and handle one TC packet, covering
/// steps 1-5.
/// 2. [Self::poll_and_check_next_reply] which tries to poll and handle one reply, covering step 6.
/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7.
pub struct PusTargetedRequestService<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
RequestType,
ReplyType,
> {
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub request_router: GenericRequestRouter,
pub request_converter: RequestConverter,
pub active_request_map: ActiveRequestMap,
pub reply_handler: ReplyHandler,
pub reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>,
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
RequestType,
ReplyType,
>
PusTargetedRequestService<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
RequestConverter,
ReplyHandler,
ActiveRequestMap,
ActiveRequestInfo,
RequestType,
ReplyType,
>
where
GenericRequestRouter: PusRequestRouter<RequestType, Error = GenericRoutingError>,
{
pub fn new(
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
request_converter: RequestConverter,
active_request_map: ActiveRequestMap,
reply_hook: ReplyHandler,
request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
) -> Self {
Self {
service_helper,
request_converter,
active_request_map,
reply_handler: reply_hook,
request_router,
reply_receiver,
phantom: std::marker::PhantomData,
}
}
pub fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(HandlingStatus::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper
.tc_in_mem_converter_mut()
.cache(&ecss_tc_and_token.tc_in_memory)?;
let tc = self.service_helper.tc_in_mem_converter().convert()?;
let (mut request_info, request) = match self.request_converter.convert(
ecss_tc_and_token.token,
&tc,
self.service_helper.tm_sender(),
&self.service_helper.common.verif_reporter,
time_stamp,
) {
Ok((info, req)) => (info, req),
Err(e) => {
self.handle_conversion_to_request_error(&e, ecss_tc_and_token.token, time_stamp);
return Err(e.into());
}
};
let accepted_token: VerificationToken<TcStateAccepted> = request_info
.token()
.try_into()
.expect("token not in expected accepted state");
let verif_request_id = verification::RequestId::new(&tc).raw();
match self.request_router.route(
MessageMetadata::new(verif_request_id, self.service_helper.id()),
request_info.target_id(),
request,
) {
Ok(()) => {
let started_token = self
.service_helper
.verif_reporter()
.start_success(
&self.service_helper.common.tm_sender,
accepted_token,
time_stamp,
)
.expect("Start success failure");
request_info.set_token(started_token.into());
self.active_request_map
.insert(&verif_request_id, request_info);
}
Err(e) => {
self.request_router.handle_error_generic(
&request_info,
&tc,
e.clone(),
self.service_helper.tm_sender(),
self.service_helper.verif_reporter(),
time_stamp,
);
return Err(e.into());
}
}
Ok(HandlingStatus::HandledOne)
}
fn handle_conversion_to_request_error(
&mut self,
error: &GenericConversionError,
token: VerificationToken<TcStateAccepted>,
time_stamp: &[u8],
) {
match error {
GenericConversionError::WrongService(service) => {
let service_slice: [u8; 1] = [*service];
self.service_helper
.verif_reporter()
.completion_failure(
self.service_helper.tm_sender(),
token,
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SERVICE, &service_slice),
)
.expect("Sending completion failure failed");
}
GenericConversionError::InvalidSubservice(subservice) => {
let subservice_slice: [u8; 1] = [*subservice];
self.service_helper
.verif_reporter()
.completion_failure(
self.service_helper.tm_sender(),
token,
FailParams::new(
time_stamp,
&tmtc_err::INVALID_PUS_SUBSERVICE,
&subservice_slice,
),
)
.expect("Sending completion failure failed");
}
GenericConversionError::NotEnoughAppData { expected, found } => {
let mut context_info = (*found as u32).to_be_bytes().to_vec();
context_info.extend_from_slice(&(*expected as u32).to_be_bytes());
self.service_helper
.verif_reporter()
.completion_failure(
self.service_helper.tm_sender(),
token,
FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info),
)
.expect("Sending completion failure failed");
}
// Do nothing.. this is service-level and can not be handled generically here.
GenericConversionError::InvalidAppData(_) => (),
}
}
pub fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError> {
match self.reply_receiver.try_recv() {
Ok(reply) => {
self.handle_reply(&reply, time_stamp)?;
Ok(HandlingStatus::HandledOne)
}
Err(e) => match e {
mpsc::TryRecvError::Empty => Ok(HandlingStatus::Empty),
mpsc::TryRecvError::Disconnected => Err(EcssTmtcError::Receive(
GenericReceiveError::TxDisconnected(None),
)),
},
}
}
pub fn handle_reply(
&mut self,
reply: &GenericMessage<ReplyType>,
time_stamp: &[u8],
) -> Result<(), EcssTmtcError> {
let active_req_opt = self.active_request_map.get(reply.request_id());
if active_req_opt.is_none() {
self.reply_handler
.handle_unrequested_reply(reply, &self.service_helper.common.tm_sender)?;
return Ok(());
}
let active_request = active_req_opt.unwrap();
let result = self.reply_handler.handle_reply(
reply,
active_request,
&self.service_helper.common.tm_sender,
&self.service_helper.common.verif_reporter,
time_stamp,
);
if result.is_err() || (result.is_ok() && *result.as_ref().unwrap()) {
self.active_request_map.remove(reply.request_id());
}
result.map(|_| ())
}
pub fn check_for_request_timeouts(&mut self) {
let mut requests_to_delete = Vec::new();
self.active_request_map
.for_each(|request_id, request_info| {
if request_info.has_timed_out() {
requests_to_delete.push(*request_id);
}
});
if !requests_to_delete.is_empty() {
for request_id in requests_to_delete {
self.active_request_map.remove(request_id);
}
}
}
}
/// Generic timeout handling: Handle the verification failure with a dedicated return code
/// and also log the error.
pub fn generic_pus_request_timeout_handler(
sender: &(impl EcssTmSender + ?Sized),
active_request: &(impl ActiveRequestProvider + Debug),
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
service_str: &'static str,
) -> Result<(), EcssTmtcError> {
log::warn!("timeout for active request {active_request:?} on {service_str} service");
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("token not in expected started state");
verification_handler.completion_failure(
sender,
started_token,
FailParams::new(time_stamp, &tmtc_err::REQUEST_TIMEOUT, &[]),
)?;
Ok(())
}
#[cfg(test)]
pub(crate) mod tests {
use std::time::Duration;
use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::{MpscTmAsVecSender, PusTmVariant};
use satrs::request::RequestId;
use satrs::{
pus::{
verification::test_util::TestVerificationReporter, ActivePusRequestStd,
ActiveRequestMapProvider, EcssTcInVecConverter, MpscTcReceiver,
},
request::UniqueApidTargetId,
spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
WritablePusPacket,
},
SpHeader,
},
};
use crate::requests::CompositeRequest;
use super::*;
// Testbench dedicated to the testing of [PusReplyHandler]s
pub struct ReplyHandlerTestbench<
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
ActiveRequestInfo: ActiveRequestProvider,
Reply,
> {
pub id: ComponentId,
pub verif_reporter: TestVerificationReporter,
pub reply_handler: ReplyHandler,
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
pub default_timeout: Duration,
tm_sender: MpscTmAsVecSender,
phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>,
}
impl<
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
ActiveRequestInfo: ActiveRequestProvider,
Reply,
> ReplyHandlerTestbench<ReplyHandler, ActiveRequestInfo, Reply>
{
pub fn new(owner_id: ComponentId, reply_handler: ReplyHandler) -> Self {
let test_verif_reporter = TestVerificationReporter::new(owner_id);
let (tm_sender, tm_receiver) = mpsc::channel();
Self {
id: TEST_COMPONENT_ID_0.raw(),
verif_reporter: test_verif_reporter,
reply_handler,
default_timeout: Duration::from_secs(30),
tm_sender,
tm_receiver,
phantom: std::marker::PhantomData,
}
}
pub fn add_tc(
&mut self,
apid: u16,
apid_target: u32,
time_stamp: &[u8],
) -> (verification::RequestId, ActivePusRequestStd) {
let sp_header = SpHeader::new_from_apid(apid);
let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0);
let init = self.verif_reporter.add_tc(&PusTcCreator::new(
sp_header,
sec_header_dummy,
&[],
true,
));
let accepted = self
.verif_reporter
.acceptance_success(&self.tm_sender, init, time_stamp)
.expect("acceptance failed");
let started = self
.verif_reporter
.start_success(&self.tm_sender, accepted, time_stamp)
.expect("start failed");
(
started.request_id(),
ActivePusRequestStd::new(
UniqueApidTargetId::new(apid, apid_target).raw(),
started,
self.default_timeout,
),
)
}
pub fn handle_reply(
&mut self,
reply: &GenericMessage<Reply>,
active_request: &ActiveRequestInfo,
time_stamp: &[u8],
) -> Result<bool, ReplyHandler::Error> {
self.reply_handler.handle_reply(
reply,
active_request,
&self.tm_sender,
&self.verif_reporter,
time_stamp,
)
}
pub fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<Reply>,
) -> Result<(), ReplyHandler::Error> {
self.reply_handler
.handle_unrequested_reply(reply, &self.tm_sender)
}
pub fn handle_request_timeout(
&mut self,
active_request_info: &ActiveRequestInfo,
time_stamp: &[u8],
) -> Result<(), ReplyHandler::Error> {
self.reply_handler.handle_request_timeout(
active_request_info,
&self.tm_sender,
&self.verif_reporter,
time_stamp,
)
}
}
#[derive(Default)]
pub struct GenericRoutingErrorHandler<const SERVICE_ID: u8> {}
pub struct DummySender {}
impl<const SERVICE_ID: u8> PusRoutingErrorHandler for GenericRoutingErrorHandler<SERVICE_ID> {
type Error = satrs::pus::GenericRoutingError;
/// Dummy sender component which does nothing on the [Self::send_tm] call.
///
/// Useful for unit tests.
impl EcssTmSender for DummySender {
fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> {
Ok(())
}
}
fn handle_error(
&self,
target_id: satrs::TargetId,
token: satrs::pus::verification::VerificationToken<
satrs::pus::verification::TcStateAccepted,
>,
_tc: &PusTcReader,
error: Self::Error,
// Testbench dedicated to the testing of [PusTcToRequestConverter]s
pub struct PusConverterTestbench<
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
ActiveRequestInfo: ActiveRequestProvider,
Request,
> {
pub id: ComponentId,
pub verif_reporter: TestVerificationReporter,
pub converter: Converter,
dummy_sender: DummySender,
current_request_id: Option<verification::RequestId>,
current_packet: Option<Vec<u8>>,
phantom: std::marker::PhantomData<(ActiveRequestInfo, Request)>,
}
impl<
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
ActiveRequestInfo: ActiveRequestProvider,
Request,
> PusConverterTestbench<Converter, ActiveRequestInfo, Request>
{
pub fn new(owner_id: ComponentId, converter: Converter) -> Self {
let test_verif_reporter = TestVerificationReporter::new(owner_id);
Self {
id: owner_id,
verif_reporter: test_verif_reporter,
converter,
dummy_sender: DummySender::default(),
current_request_id: None,
current_packet: None,
phantom: std::marker::PhantomData,
}
}
pub fn add_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let token = self.verif_reporter.add_tc(tc);
self.current_request_id = Some(verification::RequestId::new(tc));
self.current_packet = Some(tc.to_vec().unwrap());
self.verif_reporter
.acceptance_success(&self.dummy_sender, token, &[])
.expect("acceptance failed")
}
pub fn request_id(&self) -> Option<verification::RequestId> {
self.current_request_id
}
pub fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) {
warn!("Routing request for service {SERVICE_ID} failed: {error:?}");
match error {
GenericRoutingError::UnknownTargetId(id) => {
let mut fail_data: [u8; 8] = [0; 8];
fail_data.copy_from_slice(&id.to_be_bytes());
verif_reporter
.start_failure(
expected_apid: u16,
expected_apid_target: u32,
) -> Result<(ActiveRequestInfo, Request), Converter::Error> {
if self.current_packet.is_none() {
return Err(GenericConversionError::InvalidAppData(
"call add_tc first".to_string(),
));
}
let current_packet = self.current_packet.take().unwrap();
let tc_reader = PusTcReader::new(&current_packet).unwrap();
let (active_info, request) = self.converter.convert(
token,
FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data),
)
.expect("Sending start failure failed");
}
GenericRoutingError::SendError(_) => {
let mut fail_data: [u8; 8] = [0; 8];
fail_data.copy_from_slice(&target_id.to_be_bytes());
verif_reporter
.start_failure(
token,
FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data),
)
.expect("Sending start failure failed");
}
GenericRoutingError::NotEnoughAppData { expected, found } => {
let mut context_info = (found as u32).to_be_bytes().to_vec();
context_info.extend_from_slice(&(expected as u32).to_be_bytes());
verif_reporter
.start_failure(
token,
FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info),
)
.expect("Sending start failure failed");
&tc_reader.0,
&self.dummy_sender,
&self.verif_reporter,
time_stamp,
)?;
assert_eq!(
active_info.token().request_id(),
self.request_id().expect("no request id is set")
);
assert_eq!(
active_info.target_id(),
UniqueApidTargetId::new(expected_apid, expected_apid_target).raw()
);
Ok((active_info, request))
}
}
pub struct TargetedPusRequestTestbench<
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
RequestType,
ReplyType,
> {
pub service: PusTargetedRequestService<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
TestVerificationReporter,
RequestConverter,
ReplyHandler,
ActiveRequestMap,
ActiveRequestInfo,
RequestType,
ReplyType,
>,
pub request_id: Option<RequestId>,
pub tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>,
pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>,
pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
}
}

View File

@ -0,0 +1,416 @@
use derive_new::new;
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use std::sync::mpsc;
use std::time::Duration;
use crate::requests::GenericRequestRouter;
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlingError,
PusServiceHelper,
};
use satrs::request::GenericMessage;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::{
mode::Subservice,
verification::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider,
VerificationToken,
},
ActivePusRequestStd, ActiveRequestProvider, EcssTmSender, EcssTmtcError,
GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
},
request::UniqueApidTargetId,
spacepackets::{
ecss::{
tc::PusTcReader,
tm::{PusTmCreator, PusTmSecondaryHeader},
PusPacket,
},
SpHeader,
},
ComponentId,
};
use satrs_example::config::components::PUS_MODE_SERVICE;
use satrs_example::config::{mode_err, tmtc_err, CustomPusServiceId};
use super::{
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
PusTargetedRequestService, TargetedPusService,
};
#[derive(new)]
pub struct ModeReplyHandler {
owner_id: ComponentId,
}
impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<ModeReply>,
_tm_sender: &impl EcssTmSender,
) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for mode service 5: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<ModeReply>,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
match reply.message {
ModeReply::ModeReply(mode_reply) => {
let mut source_data: [u8; 12] = [0; 12];
mode_reply
.write_to_be_bytes(&mut source_data)
.expect("writing mode reply failed");
let req_id = verification::RequestId::from(reply.request_id());
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0);
let sec_header =
PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp);
let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true);
tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?;
verification_handler.completion_success(tm_sender, started_token, time_stamp)?;
}
ModeReply::CantReachMode(error_code) => {
verification_handler.completion_failure(
tm_sender,
started_token,
FailParams::new(time_stamp, &error_code, &[]),
)?;
}
ModeReply::WrongMode { expected, reached } => {
let mut error_info: [u8; 24] = [0; 24];
let mut written_len = expected
.write_to_be_bytes(&mut error_info[0..ModeAndSubmode::RAW_LEN])
.expect("writing expected mode failed");
written_len += reached
.write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..])
.expect("writing reached mode failed");
verification_handler.completion_failure(
tm_sender,
started_token,
FailParams::new(
time_stamp,
&mode_err::WRONG_MODE,
&error_info[..written_len],
),
)?;
}
};
Ok(true)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSender,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"HK",
)?;
Ok(())
}
}
#[derive(Default)]
pub struct ModeRequestConverter {}
impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
tm_sender: &(impl EcssTmSender + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> {
let subservice = tc.subservice();
let user_data = tc.user_data();
let not_enough_app_data = |expected: usize| {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
)
.expect("Sending start failure failed");
Err(GenericConversionError::NotEnoughAppData {
expected,
found: user_data.len(),
})
};
if user_data.len() < core::mem::size_of::<u32>() {
return not_enough_app_data(4);
}
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
let active_request =
ActivePusRequestStd::new(target_id_and_apid.into(), token, Duration::from_secs(30));
let subservice_typed = Subservice::try_from(subservice);
let invalid_subservice = || {
// Invalid subservice
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
)
.expect("Sending start failure failed");
Err(GenericConversionError::InvalidSubservice(subservice))
};
if subservice_typed.is_err() {
return invalid_subservice();
}
let subservice_typed = subservice_typed.unwrap();
match subservice_typed {
Subservice::TcSetMode => {
if user_data.len() < core::mem::size_of::<u32>() + ModeAndSubmode::RAW_LEN {
return not_enough_app_data(4 + ModeAndSubmode::RAW_LEN);
}
let mode_and_submode = ModeAndSubmode::from_be_bytes(&tc.user_data()[4..])
.expect("mode and submode extraction failed");
Ok((active_request, ModeRequest::SetMode(mode_and_submode)))
}
Subservice::TcReadMode => Ok((active_request, ModeRequest::ReadMode)),
Subservice::TcAnnounceMode => Ok((active_request, ModeRequest::AnnounceMode)),
Subservice::TcAnnounceModeRecursive => {
Ok((active_request, ModeRequest::AnnounceModeRecursive))
}
_ => invalid_subservice(),
}
}
}
pub fn create_mode_service_static(
tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_MODE_SERVICE.id(),
pus_action_rx,
tm_sender,
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
),
ModeRequestConverter::default(),
DefaultActiveRequestMap::default(),
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
mode_router,
reply_receiver,
);
ModeServiceWrapper {
service: mode_request_handler,
}
}
pub fn create_mode_service_dynamic(
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_MODE_SERVICE.id(),
pus_action_rx,
tm_funnel_tx,
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
EcssTcInVecConverter::default(),
),
ModeRequestConverter::default(),
DefaultActiveRequestMap::default(),
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
mode_router,
reply_receiver,
);
ModeServiceWrapper {
service: mode_request_handler,
}
}
pub struct ModeServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
pub(crate) service: PusTargetedRequestService<
MpscTcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
ModeRequestConverter,
ModeReplyHandler,
DefaultActiveRequestMap<ActivePusRequestStd>,
ActivePusRequestStd,
ModeRequest,
ModeReply,
>,
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for ModeServiceWrapper<TmSender, TcInMemConverter>
{
const SERVICE_ID: u8 = CustomPusServiceId::Mode as u8;
const SERVICE_STR: &'static str = "mode";
delegate::delegate! {
to self.service {
fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, PusPacketHandlingError>;
fn poll_and_handle_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError>;
fn check_for_request_timeouts(&mut self);
}
}
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};
use satrs::request::MessageMetadata;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::mode::Subservice,
request::GenericMessage,
spacepackets::{
ecss::tc::{PusTcCreator, PusTcSecondaryHeader},
SpHeader,
},
};
use satrs_example::config::tmtc_err;
use crate::pus::{
mode::ModeReplyHandler,
tests::{PusConverterTestbench, ReplyHandlerTestbench},
};
use super::ModeRequestConverter;
#[test]
fn mode_converter_read_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::ReadMode);
}
#[test]
fn mode_converter_set_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8);
let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN];
let mode_and_submode = ModeAndSubmode::new(2, 1);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
mode_and_submode
.write_to_be_bytes(&mut app_data[4..])
.unwrap();
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::SetMode(mode_and_submode));
}
#[test]
fn mode_converter_announce_mode() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceMode);
}
#[test]
fn mode_converter_announce_mode_recursively() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceModeRecursive);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1));
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

View File

@ -1,68 +1,146 @@
use std::sync::mpsc;
use std::time::Duration;
use log::{error, info, warn};
use satrs::pool::{PoolProvider, StaticMemoryPool, StoreAddr};
use crate::pus::create_verification_reporter;
use log::info;
use satrs::pool::{PoolProvider, StaticMemoryPool};
use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler_srv::PusService11SchedHandler;
use satrs::pus::verification::VerificationReporterWithSender;
use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusServiceHelper,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter,
EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
};
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::ChannelId;
use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID};
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool};
use satrs::ComponentId;
use satrs_example::config::components::PUS_SCHED_SERVICE;
use crate::tmtc::PusTcSourceProviderSharedPool;
use super::{DirectPusService, HandlingStatus};
pub trait TcReleaser {
fn release(&mut self, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool;
fn release(&mut self, sender_id: ComponentId, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool;
}
impl TcReleaser for PusTcSourceProviderSharedPool {
fn release(&mut self, enabled: bool, _info: &TcInfo, tc: &[u8]) -> bool {
impl TcReleaser for PacketSenderWithSharedPool {
fn release(
&mut self,
sender_id: ComponentId,
enabled: bool,
_info: &TcInfo,
tc: &[u8],
) -> bool {
if enabled {
let shared_pool = self.shared_pool.get_mut();
// Transfer TC from scheduler TC pool to shared TC pool.
let released_tc_addr = self
.shared_pool
.pool
let released_tc_addr = shared_pool
.0
.write()
.expect("locking pool failed")
.add(tc)
.expect("adding TC to shared pool failed");
self.tc_source
.send(released_tc_addr)
self.sender
.send(PacketInPool::new(sender_id, released_tc_addr))
.expect("sending TC to TC source failed");
}
true
}
}
impl TcReleaser for mpsc::Sender<Vec<u8>> {
fn release(&mut self, enabled: bool, _info: &TcInfo, tc: &[u8]) -> bool {
impl TcReleaser for mpsc::Sender<PacketAsVec> {
fn release(
&mut self,
sender_id: ComponentId,
enabled: bool,
_info: &TcInfo,
tc: &[u8],
) -> bool {
if enabled {
// Send released TC to centralized TC source.
self.send(tc.to_vec())
self.send(PacketAsVec::new(sender_id, tc.to_vec()))
.expect("sending TC to TC source failed");
}
true
}
}
pub struct Pus11Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub pus_11_handler:
PusService11SchedHandler<TcInMemConverter, VerificationReporterWithSender, PusScheduler>,
pub struct SchedulingServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
{
pub pus_11_handler: PusSchedServiceHandler<
MpscTcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
PusScheduler,
>,
pub sched_tc_pool: StaticMemoryPool,
pub releaser_buf: [u8; 4096],
pub tc_releaser: Box<dyn TcReleaser + Send>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
for SchedulingServiceWrapper<TmSender, TcInMemConverter>
{
const SERVICE_ID: u8 = PusServiceId::Verification as u8;
const SERVICE_STR: &'static str = "verification";
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let result = self.pus_11_handler.poll_and_handle_next_tc(
error_handler,
time_stamp,
&mut self.sched_tc_pool,
);
if let Err(e) = result {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
}
match result.unwrap() {
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
}
HandlingStatus::HandledOne
}
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
SchedulingServiceWrapper<TmSender, TcInMemConverter>
{
pub fn release_tcs(&mut self) {
let id = self.pus_11_handler.service_helper.id();
let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool {
self.tc_releaser.release(enabled, info, tc)
self.tc_releaser.release(id, enabled, info, tc)
};
self.pus_11_handler
@ -82,64 +160,27 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
info!("{released_tcs} TC(s) released from scheduler");
}
}
pub fn handle_next_packet(&mut self) -> bool {
match self.pus_11_handler.handle_one_tc(&mut self.sched_tc_pool) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS11 partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS11 invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS11: Subservice {subservice} not implemented");
}
PusPacketHandlerResult::Empty => {
return true;
}
},
Err(error) => {
error!("PUS packet handling error: {error:?}")
}
}
false
}
}
pub fn create_scheduler_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tc_releaser: PusTcSourceProviderSharedPool,
tm_sender: PacketSenderWithSharedPool,
tc_releaser: PacketSenderWithSharedPool,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
) -> Pus11Wrapper<EcssTcInSharedStoreConverter> {
let sched_srv_tm_sender = MpscTmInSharedPoolSender::new(
TmSenderId::PusSched as ChannelId,
"PUS_11_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let sched_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusSched as ChannelId,
"PUS_11_TC_RECV",
pus_sched_rx,
);
) -> SchedulingServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusService11SchedHandler::new(
let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new(
Box::new(sched_srv_receiver),
Box::new(sched_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_releaser.clone_backing_pool(), 2048),
PUS_SCHED_SERVICE.id(),
pus_sched_rx,
tm_sender,
create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_releaser.shared_packet_store().0.clone(), 2048),
),
scheduler,
);
Pus11Wrapper {
SchedulingServiceWrapper {
pus_11_handler,
sched_tc_pool,
releaser_buf: [0; 4096],
@ -148,35 +189,26 @@ pub fn create_scheduler_service_static(
}
pub fn create_scheduler_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
tc_source_sender: mpsc::Sender<Vec<u8>>,
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
tc_source_sender: mpsc::Sender<PacketAsVec>,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
) -> Pus11Wrapper<EcssTcInVecConverter> {
let sched_srv_tm_sender = MpscTmAsVecSender::new(
TmSenderId::PusSched as ChannelId,
"PUS_11_TM_SENDER",
tm_funnel_tx,
);
let sched_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusSched as ChannelId,
"PUS_11_TC_RECV",
pus_sched_rx,
);
) -> SchedulingServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
//let sched_srv_receiver =
//MpscTcReceiver::new(PUS_SCHED_SERVICE.raw(), "PUS_11_TC_RECV", pus_sched_rx);
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusService11SchedHandler::new(
let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new(
Box::new(sched_srv_receiver),
Box::new(sched_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_SCHED_SERVICE.id(),
pus_sched_rx,
tm_funnel_tx,
create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
EcssTcInVecConverter::default(),
),
scheduler,
);
Pus11Wrapper {
SchedulingServiceWrapper {
pus_11_handler,
sched_tc_pool,
releaser_buf: [0; 4096],

View File

@ -1,52 +1,95 @@
use satrs::pus::EcssTcInMemConverter;
use crate::pus::mode::ModeServiceWrapper;
use derive_new::new;
use satrs::{
pus::{EcssTcInMemConverter, EcssTmSender},
spacepackets::time::{cds, TimeWriter},
};
use super::{
action::Pus8Wrapper, event::Pus5Wrapper, hk::Pus3Wrapper, scheduler::Pus11Wrapper,
test::Service17CustomWrapper,
action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, DirectPusService,
HandlingStatus, TargetedPusService,
};
pub struct PusStack<TcInMemConverter: EcssTcInMemConverter> {
event_srv: Pus5Wrapper<TcInMemConverter>,
hk_srv: Pus3Wrapper<TcInMemConverter>,
action_srv: Pus8Wrapper<TcInMemConverter>,
schedule_srv: Pus11Wrapper<TcInMemConverter>,
test_srv: Service17CustomWrapper<TcInMemConverter>,
}
impl<TcInMemConverter: EcssTcInMemConverter> PusStack<TcInMemConverter> {
pub fn new(
hk_srv: Pus3Wrapper<TcInMemConverter>,
event_srv: Pus5Wrapper<TcInMemConverter>,
action_srv: Pus8Wrapper<TcInMemConverter>,
schedule_srv: Pus11Wrapper<TcInMemConverter>,
test_srv: Service17CustomWrapper<TcInMemConverter>,
) -> Self {
Self {
event_srv,
action_srv,
schedule_srv,
test_srv,
hk_srv,
}
// TODO: For better extensibility, we could create 2 vectors: One for direct PUS services and one
// for targeted services..
#[derive(new)]
pub struct PusStack<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
test_srv: TestCustomServiceWrapper<TmSender, TcInMemConverter>,
hk_srv_wrapper: HkServiceWrapper<TmSender, TcInMemConverter>,
event_srv: EventServiceWrapper<TmSender, TcInMemConverter>,
action_srv_wrapper: ActionServiceWrapper<TmSender, TcInMemConverter>,
schedule_srv: SchedulingServiceWrapper<TmSender, TcInMemConverter>,
mode_srv: ModeServiceWrapper<TmSender, TcInMemConverter>,
}
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
PusStack<TmSender, TcInMemConverter>
{
pub fn periodic_operation(&mut self) {
// Release all telecommands which reached their release time before calling the service
// handlers.
self.schedule_srv.release_tcs();
let timestamp = cds::CdsTime::now_with_u16_days()
.expect("time stamp generation error")
.to_vec()
.unwrap();
let mut loop_count = 0_u32;
// Hot loop which will run continuously until all request and reply handling is done.
loop {
let mut all_queues_empty = true;
let mut is_srv_finished = |srv_handler_finished: bool| {
if !srv_handler_finished {
all_queues_empty = false;
let mut nothing_to_do = true;
Self::direct_service_checker(&mut self.test_srv, &timestamp, &mut nothing_to_do);
Self::direct_service_checker(&mut self.schedule_srv, &timestamp, &mut nothing_to_do);
Self::direct_service_checker(&mut self.event_srv, &timestamp, &mut nothing_to_do);
Self::targeted_service_checker(
&mut self.action_srv_wrapper,
&timestamp,
&mut nothing_to_do,
);
Self::targeted_service_checker(
&mut self.hk_srv_wrapper,
&timestamp,
&mut nothing_to_do,
);
Self::targeted_service_checker(&mut self.mode_srv, &timestamp, &mut nothing_to_do);
if nothing_to_do {
// Timeout checking is only done once.
self.action_srv_wrapper.check_for_request_timeouts();
self.hk_srv_wrapper.check_for_request_timeouts();
self.mode_srv.check_for_request_timeouts();
break;
}
};
is_srv_finished(self.test_srv.handle_next_packet());
is_srv_finished(self.schedule_srv.handle_next_packet());
is_srv_finished(self.event_srv.handle_next_packet());
is_srv_finished(self.action_srv.handle_next_packet());
is_srv_finished(self.hk_srv.handle_next_packet());
if all_queues_empty {
// Safety mechanism to avoid infinite loops.
loop_count += 1;
if loop_count >= 500 {
log::warn!("reached PUS stack loop count 500, breaking");
break;
}
}
}
pub fn direct_service_checker<S: DirectPusService>(
service: &mut S,
timestamp: &[u8],
nothing_to_do: &mut bool,
) {
let handling_status = service.poll_and_handle_next_tc(timestamp);
if handling_status == HandlingStatus::HandledOne {
*nothing_to_do = false;
}
}
pub fn targeted_service_checker<S: TargetedPusService>(
service: &mut S,
timestamp: &[u8],
nothing_to_do: &mut bool,
) {
let request_handling = service.poll_and_handle_next_tc_default_handler(timestamp);
let reply_handling = service.poll_and_handle_next_reply_default_handler(timestamp);
if request_handling == HandlingStatus::HandledOne
|| reply_handling == HandlingStatus::HandledOne
{
*nothing_to_do = false;
}
}
}

View File

@ -1,150 +1,158 @@
use log::{info, warn};
use satrs::params::Params;
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
use crate::pus::create_verification_reporter;
use log::info;
use satrs::event_man::{EventMessage, EventMessageU32};
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{
FailParams, VerificationReporterWithSender, VerificationReportingProvider,
};
use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender,
MpscTmInSharedPoolSender, PusPacketHandlerResult, PusServiceHelper,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter,
EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, PusServiceHelper,
};
use satrs::pus::{EcssTcInSharedStoreConverter, PartialPusHandlingError};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::spacepackets::time::cds::TimeProvider;
use satrs::spacepackets::time::TimeWriter;
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::ChannelId;
use satrs::{events::EventU32, pus::EcssTcInSharedStoreConverter};
use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID, TEST_EVENT};
use std::sync::mpsc::{self, Sender};
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use satrs_example::config::components::PUS_TEST_SERVICE;
use satrs_example::config::{tmtc_err, TEST_EVENT};
use std::sync::mpsc;
use super::{DirectPusService, HandlingStatus};
pub fn create_test_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_sender: PacketSenderWithSharedPool,
tc_pool: SharedStaticMemoryPool,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
event_sender: mpsc::SyncSender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> Service17CustomWrapper<EcssTcInSharedStoreConverter> {
let test_srv_tm_sender = MpscTmInSharedPoolSender::new(
TmSenderId::PusTest as ChannelId,
"PUS_17_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let test_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusTest as ChannelId,
"PUS_17_TC_RECV",
pus_test_rx,
);
) -> TestCustomServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
Box::new(test_srv_receiver),
Box::new(test_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_TEST_SERVICE.id(),
pus_test_rx,
tm_sender,
create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
));
Service17CustomWrapper {
pus17_handler,
test_srv_event_sender: event_sender,
TestCustomServiceWrapper {
handler: pus17_handler,
event_tx: event_sender,
}
}
pub fn create_test_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
event_sender: mpsc::SyncSender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> Service17CustomWrapper<EcssTcInVecConverter> {
let test_srv_tm_sender = MpscTmAsVecSender::new(
TmSenderId::PusTest as ChannelId,
"PUS_17_TM_SENDER",
tm_funnel_tx.clone(),
);
let test_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusTest as ChannelId,
"PUS_17_TC_RECV",
pus_test_rx,
);
) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
Box::new(test_srv_receiver),
Box::new(test_srv_tm_sender),
PUS_APID,
verif_reporter.clone(),
PUS_TEST_SERVICE.id(),
pus_test_rx,
tm_funnel_tx,
create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
EcssTcInVecConverter::default(),
));
Service17CustomWrapper {
pus17_handler,
test_srv_event_sender: event_sender,
TestCustomServiceWrapper {
handler: pus17_handler,
event_tx: event_sender,
}
}
pub struct Service17CustomWrapper<TcInMemConverter: EcssTcInMemConverter> {
pub pus17_handler: PusService17TestHandler<TcInMemConverter, VerificationReporterWithSender>,
pub test_srv_event_sender: Sender<(EventU32, Option<Params>)>,
pub struct TestCustomServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
{
pub handler:
PusService17TestHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub event_tx: mpsc::SyncSender<EventMessageU32>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Service17CustomWrapper<TcInMemConverter> {
pub fn handle_next_packet(&mut self) -> bool {
let res = self.pus17_handler.handle_one_tc();
if res.is_err() {
warn!("PUS17 handler failed with error {:?}", res.unwrap_err());
return true;
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
for TestCustomServiceWrapper<TmSender, TcInMemConverter>
{
const SERVICE_ID: u8 = PusServiceId::Test as u8;
const SERVICE_STR: &'static str = "test";
fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus {
let error_handler = |partial_error: &PartialPusHandlingError| {
log::warn!(
"PUS {}({}) partial error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
partial_error
);
};
let res = self
.handler
.poll_and_handle_next_tc(error_handler, timestamp);
if let Err(e) = res {
log::warn!(
"PUS {}({}) error: {:?}",
Self::SERVICE_ID,
Self::SERVICE_STR,
e
);
// To avoid permanent loops on continuous errors.
return HandlingStatus::Empty;
}
match res.unwrap() {
PusPacketHandlerResult::RequestHandled => {
DirectPusPacketHandlerResult::Handled(handling_status) => {
if handling_status == HandlingStatus::HandledOne {
info!("Received PUS ping command TC[17,1]");
info!("Sent ping reply PUS TM[17,2]");
}
PusPacketHandlerResult::RequestHandledPartialSuccess(partial_err) => {
warn!(
"Handled PUS ping command with partial success: {:?}",
partial_err
return handling_status;
}
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
log::warn!(
"PUS {}({}) subservice {} not implemented",
Self::SERVICE_ID,
Self::SERVICE_STR,
subservice
);
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS17: Subservice {subservice} not implemented")
}
PusPacketHandlerResult::CustomSubservice(subservice, token) => {
DirectPusPacketHandlerResult::CustomSubservice(subservice, token) => {
let (tc, _) = PusTcReader::new(
self.pus17_handler
self.handler
.service_helper
.tc_in_mem_converter
.tc_slice_raw(),
)
.unwrap();
let time_stamper = TimeProvider::from_now_with_u16_days().unwrap();
let mut stamp_buf: [u8; 7] = [0; 7];
time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
if subservice == 128 {
info!("Generating test event");
self.test_srv_event_sender
.send((TEST_EVENT.into(), None))
info!("generating test event");
self.event_tx
.send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
.expect("Sending test event failed");
let start_token = self
.pus17_handler
match self.handler.service_helper.verif_reporter().start_success(
self.handler.service_helper.tm_sender(),
token,
timestamp,
) {
Ok(started_token) => {
if let Err(e) = self
.handler
.service_helper
.common
.verification_handler
.start_success(token, &stamp_buf)
.expect("Error sending start success");
self.pus17_handler
.service_helper
.common
.verification_handler
.completion_success(start_token, &stamp_buf)
.expect("Error sending completion success");
.verif_reporter()
.completion_success(
self.handler.service_helper.tm_sender(),
started_token,
timestamp,
)
{
error_handler(&PartialPusHandlingError::Verification(e));
}
}
Err(e) => {
error_handler(&PartialPusHandlingError::Verification(e));
}
}
} else {
let fail_data = [tc.subservice()];
self.pus17_handler
self.handler
.service_helper
.common
.verification_handler
.verif_reporter()
.start_failure(
self.handler.service_helper.tm_sender(),
token,
FailParams::new(
&stamp_buf,
timestamp,
&tmtc_err::INVALID_PUS_SUBSERVICE,
&fail_data,
),
@ -152,10 +160,7 @@ impl<TcInMemConverter: EcssTcInMemConverter> Service17CustomWrapper<TcInMemConve
.expect("Sending start failure verification failed");
}
}
PusPacketHandlerResult::Empty => {
return true;
}
}
false
}
HandlingStatus::HandledOne
}
}

Some files were not shown because too many files have changed in this diff Show More