Compare commits

...

13 Commits

Author SHA1 Message Date
Robin Mueller cba91eb21e update STM32F3 example code 2026-05-12 15:24:00 +02:00
muellerr b1253eaad4 Merge pull request 'Rework ACS' (#264) from rework-acs into main
Reviewed-on: #264
2026-03-18 11:19:24 +01:00
Robin Mueller ae4d26b8bd re-work ACS 2026-03-17 15:56:25 +01:00
muellerr cfcfabb5e3 Merge pull request 'fixes for switching' (#262) from fix-for-mgm-switching into main
Reviewed-on: #262
2026-03-12 13:48:04 +01:00
Robin Mueller 70f747ad86 fixes for switching 2026-03-12 13:45:19 +01:00
muellerr f44aac6ea2 Merge pull request 'probably need to re-work the mode model..' (#261) from continue-example-update into main
Reviewed-on: #261
2026-03-12 12:01:16 +01:00
Robin Mueller df517af85b probably need to re-work the mode model.. 2026-03-12 12:00:24 +01:00
muellerr ae9edf5888 Merge pull request 'minor clean up' (#260) from minor-cleanup into main
Reviewed-on: #260
2026-03-10 11:57:21 +01:00
Robin Mueller 512384026c minor clean up 2026-03-10 11:56:39 +01:00
muellerr 42c7a3b9ee Merge pull request 'Move to CCSDS + serde, rip out PUS' (#259) from move-to-ccsds-and-serde into main
Reviewed-on: #259
2026-03-10 11:56:16 +01:00
muellerr 1e15e3d501 move to CCSDS + serde for sat-rs example 2026-03-10 11:45:11 +01:00
muellerr d7e6732888 Merge pull request 'CCSDS scheduler' (#258) from ccsds-scheduler into main
Reviewed-on: #258
2025-11-27 16:09:52 +01:00
Robin Mueller c27569a526 new CCSDS packet scheduler 2025-11-27 16:02:39 +01:00
88 changed files with 4571 additions and 4275 deletions
+6 -1
View File
@@ -11,6 +11,9 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Install libudev-dev on Ubuntu
if: ${{ matrix.os == 'ubuntu-latest' }}
run: sudo apt update && sudo apt install -y libudev-dev
- run: cargo check
# Check example with static pool configuration
- run: cargo check -p satrs-example --no-default-features
@@ -23,6 +26,7 @@ jobs:
- uses: dtolnay/rust-toolchain@stable
- name: Install nextest
uses: taiki-e/install-action@nextest
- run: sudo apt update && sudo apt install -y libudev-dev
- run: cargo nextest run --all-features
- run: cargo test --doc --all-features
@@ -57,7 +61,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc -p satrs --all-features
- run: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc -p satrs --all-features --no-deps
clippy:
name: Clippy
@@ -67,4 +71,5 @@ jobs:
- uses: dtolnay/rust-toolchain@stable
with:
components: clippy
- run: sudo apt update && sudo apt install -y libudev-dev
- run: cargo clippy -- -D warnings
+3 -1
View File
@@ -4,7 +4,9 @@ members = [
"satrs",
"satrs-mib",
"satrs-example",
"satrs-minisim",
"satrs-example/models",
"satrs-example/client",
"satrs-example/minisim",
"satrs-shared",
"embedded-examples/embedded-client",
]
+142 -97
View File
@@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "aligned"
version = "0.4.2"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "377e4c0ba83e4431b10df45c1d4666f178ea9c552cac93e60c3a88bf32785923"
checksum = "ee4508988c62edf04abd8d92897fca0c2995d907ce1dfeaf369dac3716a40685"
dependencies = [
"as-slice",
]
@@ -90,7 +90,7 @@ dependencies = [
"arbitrary-int 1.3.0",
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -202,7 +202,7 @@ checksum = "e37549a379a9e0e6e576fd208ee60394ccb8be963889eebba3ffe0980364f472"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -255,7 +255,7 @@ dependencies = [
"ident_case",
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -266,7 +266,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81"
dependencies = [
"darling_core",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -298,7 +298,7 @@ dependencies = [
"proc-macro-error2",
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -322,25 +322,24 @@ dependencies = [
[[package]]
name = "defmt-test"
version = "0.4.0"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "24076cc7203c365e7febfcec15d6667a9ef780bd2c5fd3b2a197400df78f299b"
checksum = "1d326e211b94939affafdf96f5c1baf8745b960037dcf763f813597d32b03d51"
dependencies = [
"cortex-m-rt",
"cortex-m-semihosting",
"defmt 1.0.1",
"defmt-test-macros",
"semihosting",
]
[[package]]
name = "defmt-test-macros"
version = "0.3.2"
version = "0.3.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe5520fd36862f281c026abeaab153ebbc001717c29a9b8e5ba9704d8f3a879d"
checksum = "2a7563a5468e1a1bd97f44cb75b658c2feec75af2b1389e70f4c0677b8402edd"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -351,7 +350,7 @@ checksum = "6178a82cf56c836a3ba61a7935cdb1c49bfaa6fa4327cd5bf554a503087de26b"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -365,14 +364,15 @@ dependencies = [
[[package]]
name = "embassy-embedded-hal"
version = "0.5.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "554e3e840696f54b4c9afcf28a0f24da431c927f4151040020416e7393d6d0d8"
checksum = "b0641612053b2f34fc250bb63f6630ae75de46e02ade7f457268447081d709ce"
dependencies = [
"defmt 1.0.1",
"embassy-futures",
"embassy-hal-internal",
"embassy-hal-internal 0.4.0",
"embassy-sync",
"embassy-time",
"embedded-hal 0.2.7",
"embedded-hal 1.0.0",
"embedded-hal-async",
@@ -381,6 +381,12 @@ dependencies = [
"nb 1.1.0",
]
[[package]]
name = "embassy-executor-timer-queue"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2fc328bf943af66b80b98755db9106bf7e7471b0cf47dc8559cd9a6be504cc9c"
[[package]]
name = "embassy-futures"
version = "0.1.2"
@@ -389,9 +395,18 @@ checksum = "dc2d050bdc5c21e0862a89256ed8029ae6c290a93aecefc73084b3002cdebb01"
[[package]]
name = "embassy-hal-internal"
version = "0.3.0"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95285007a91b619dc9f26ea8f55452aa6c60f7115a4edc05085cd2bd3127cd7a"
checksum = "7f10ce10a4dfdf6402d8e9bd63128986b96a736b1a0a6680547ed2ac55d55dba"
dependencies = [
"num-traits",
]
[[package]]
name = "embassy-hal-internal"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "568659fc53866d3d85c60fa33723fb751aa69e71507634fc2c19e7649432fb75"
dependencies = [
"cortex-m",
"critical-section",
@@ -410,9 +425,9 @@ dependencies = [
[[package]]
name = "embassy-stm32"
version = "0.4.0"
version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0d972eab325cc96afee98f80a91ca6b00249b6356dc0fdbff68b70c200df9fae"
checksum = "486c0622deb5a519fc4d2cb8e3ef324f7568fcdfff201ff8fcab46557d663ceb"
dependencies = [
"aligned",
"bit_field",
@@ -426,10 +441,12 @@ dependencies = [
"document-features",
"embassy-embedded-hal",
"embassy-futures",
"embassy-hal-internal",
"embassy-hal-internal 0.5.0",
"embassy-net-driver",
"embassy-sync",
"embassy-time",
"embassy-time-driver",
"embassy-time-queue-utils",
"embassy-usb-driver",
"embassy-usb-synopsys-otg",
"embedded-can",
@@ -437,50 +454,54 @@ dependencies = [
"embedded-hal 1.0.0",
"embedded-hal-async",
"embedded-hal-nb",
"embedded-io",
"embedded-io-async",
"embedded-io 0.7.1",
"embedded-io-async 0.7.0",
"embedded-storage",
"embedded-storage-async",
"futures-util",
"heapless 0.9.1",
"nb 1.1.0",
"proc-macro2",
"quote",
"rand_core 0.6.4",
"rand_core 0.9.3",
"regex",
"sdio-host",
"static_assertions",
"stm32-fmc",
"stm32-metapac",
"trait-set",
"vcell",
"volatile-register",
]
[[package]]
name = "embassy-sync"
version = "0.7.2"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "73974a3edbd0bd286759b3d483540f0ebef705919a5f56f4fc7709066f71689b"
checksum = "7bbd85cf5a5ae56bdf26f618364af642d1d0a4e245cdd75cd9aabda382f65a81"
dependencies = [
"cfg-if",
"critical-section",
"defmt 1.0.1",
"embedded-io-async",
"embedded-io-async 0.7.0",
"futures-core",
"futures-sink",
"heapless 0.8.0",
"heapless 0.9.1",
]
[[package]]
name = "embassy-time"
version = "0.5.0"
version = "0.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f4fa65b9284d974dad7a23bb72835c4ec85c0b540d86af7fc4098c88cff51d65"
checksum = "592b0c143ec626e821d4d90da51a2bd91d559d6c442b7c74a47d368c9e23d97a"
dependencies = [
"cfg-if",
"critical-section",
"defmt 1.0.1",
"document-features",
"embassy-time-driver",
"embassy-time-queue-utils",
"embedded-hal 0.2.7",
"embedded-hal 1.0.0",
"embedded-hal-async",
@@ -489,13 +510,23 @@ dependencies = [
[[package]]
name = "embassy-time-driver"
version = "0.2.1"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a0a244c7dc22c8d0289379c8d8830cae06bb93d8f990194d0de5efb3b5ae7ba6"
checksum = "6ee71af1b3a0deaa53eaf2d39252f83504c853646e472400b763060389b9fcc9"
dependencies = [
"document-features",
]
[[package]]
name = "embassy-time-queue-utils"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "168297bf80aaf114b3c9ad589bf38b01b3009b9af7f97cd18086c5bbf96f5693"
dependencies = [
"embassy-executor-timer-queue",
"heapless 0.9.1",
]
[[package]]
name = "embassy-usb-driver"
version = "0.2.0"
@@ -503,14 +534,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17119855ccc2d1f7470a39756b12068454ae27a3eabb037d940b5c03d9c77b7a"
dependencies = [
"defmt 1.0.1",
"embedded-io-async",
"embedded-io-async 0.6.1",
]
[[package]]
name = "embassy-usb-synopsys-otg"
version = "0.3.1"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "288751f8eaa44a5cf2613f13cee0ca8e06e6638cb96e897e6834702c79084b23"
checksum = "cbe46f4083109c7ea12a03ca61095d1e87c76fec52c7ca9ee06a42935606dacb"
dependencies = [
"critical-section",
"defmt 1.0.1",
@@ -578,8 +609,14 @@ name = "embedded-io"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "edd0f118536f44f5ccd48bcb8b111bdc3de888b58c74639dfb034a357d0f206d"
[[package]]
name = "embedded-io"
version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9eb1aa714776b75c7e67e1da744b81a129b3ff919c8712b5e1b32252c1f07cc7"
dependencies = [
"defmt 0.3.100",
"defmt 1.0.1",
]
[[package]]
@@ -588,8 +625,17 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ff09972d4073aa8c299395be75161d582e7629cd663171d62af73c8d50dba3f"
dependencies = [
"defmt 0.3.100",
"embedded-io",
"embedded-io 0.6.1",
]
[[package]]
name = "embedded-io-async"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2564b9f813c544241430e147d8bc454815ef9ac998878d30cc3055449f7fd4c0"
dependencies = [
"defmt 1.0.1",
"embedded-io 0.7.1",
]
[[package]]
@@ -625,7 +671,7 @@ dependencies = [
"darling",
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -646,15 +692,6 @@ version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "fugit"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "17186ad64927d5ac8f02c1e77ccefa08ccd9eaa314d5a4772278aa204a22f7e7"
dependencies = [
"gcd",
]
[[package]]
name = "futures-core"
version = "0.3.31"
@@ -685,12 +722,6 @@ dependencies = [
"pin-utils",
]
[[package]]
name = "gcd"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d758ba1b47b00caf47f24925c0074ecb20d6dfcffe7f6d53395c0465674841a"
[[package]]
name = "generator"
version = "0.8.7"
@@ -892,7 +923,7 @@ checksum = "ff32365de1b6743cb203b710788263c44a03de03802daf96092f2da4fe6ba4d7"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -965,7 +996,7 @@ dependencies = [
"proc-macro-error-attr2",
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -998,6 +1029,18 @@ version = "0.9.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38"
[[package]]
name = "regex"
version = "1.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276"
dependencies = [
"aho-corasick",
"memchr",
"regex-automata",
"regex-syntax",
]
[[package]]
name = "regex-automata"
version = "0.4.13"
@@ -1055,20 +1098,7 @@ dependencies = [
"proc-macro-error2",
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "rtic-monotonics"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99f9923ce32637ee40c0cb28fbbc2fad880d8aebea2d545918c6971ae9be3d17"
dependencies = [
"cfg-if",
"cortex-m",
"fugit",
"portable-atomic",
"rtic-time",
"syn 2.0.108",
]
[[package]]
@@ -1087,20 +1117,6 @@ dependencies = [
"rtic-common",
]
[[package]]
name = "rtic-time"
version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "61485474f5a23247ae1d4875f2bfe860be9b3030dbf87c232e50799e021429a1"
dependencies = [
"critical-section",
"embedded-hal 1.0.0",
"embedded-hal-async",
"fugit",
"futures-util",
"rtic-common",
]
[[package]]
name = "rustc_version"
version = "0.2.3"
@@ -1138,13 +1154,13 @@ dependencies = [
"defmt-rtt",
"defmt-test",
"embassy-stm32",
"embassy-time",
"embedded-hal 1.0.0",
"enumset",
"heapless 0.9.1",
"panic-probe",
"postcard",
"rtic",
"rtic-monotonics",
"rtic-sync",
"serde",
"spacepackets",
@@ -1170,6 +1186,12 @@ version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b328e2cb950eeccd55b7f55c3a963691455dcd044cfb5354f0c5e68d2c2d6ee2"
[[package]]
name = "semihosting"
version = "0.1.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8e4abf97879f4e80db69a9fba7bd64998e9bdad25f58ef045a778e191172fd4"
[[package]]
name = "semver"
version = "0.9.0"
@@ -1218,7 +1240,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -1245,7 +1267,8 @@ checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
[[package]]
name = "spacepackets"
version = "0.17.0"
source = "git+https://egit.irs.uni-stuttgart.de/rust/spacepackets.git#2bc61677105765e69cc96bb1ff9960557c00fa8e"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94979a990b4333f43667cba9fe9e72b9c4e9ada82e09fbe8fb250844358590cc"
dependencies = [
"arbitrary-int 2.0.0",
"bitbybit",
@@ -1293,24 +1316,35 @@ dependencies = [
[[package]]
name = "stm32-fmc"
version = "0.3.2"
version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c7f0639399e2307c2446c54d91d4f1596343a1e1d5cab605b9cce11d0ab3858c"
checksum = "72692594faa67f052e5e06dd34460951c21e83bc55de4feb8d2666e2f15480a2"
dependencies = [
"embedded-hal 0.2.7",
"embedded-hal 1.0.0",
]
[[package]]
name = "stm32-metapac"
version = "18.0.0"
version = "21.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fd8ec3a292a0d9fc4798416a61b21da5ae50341b2e7b8d12e662bf305366097"
checksum = "e74b78632cea498cfb28386a29f8bfae7476d6570a78733eb5fecbee66c2f4ce"
dependencies = [
"cortex-m",
"cortex-m-rt",
"defmt 0.3.100",
]
[[package]]
name = "syn"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "syn"
version = "2.0.108"
@@ -1339,7 +1373,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -1400,6 +1434,17 @@ dependencies = [
"tracing-log",
]
[[package]]
name = "trait-set"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b79e2e9c9ab44c6d7c20d5976961b47e8f49ac199154daa514b77cd1ab536625"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "unicode-ident"
version = "1.0.22"
@@ -1487,7 +1532,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -1498,7 +1543,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
[[package]]
@@ -1576,5 +1621,5 @@ checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 2.0.108",
]
@@ -14,7 +14,8 @@ defmt-rtt = { version = "1" }
panic-probe = { version = "1", features = ["print-defmt"] }
embedded-hal = "1"
cortex-m-semihosting = "0.5.0"
embassy-stm32 = { version = "0.4", features = ["defmt", "stm32f303vc", "unstable-pac"] }
embassy-stm32 = { version = "0.6", features = ["defmt", "stm32f303vc", "memory-x", "unstable-pac", "time-driver-any"] }
embassy-time = { version = "0.5", features = ["defmt", "generic-queue-16", "defmt-timestamp-uptime-ms"]}
enumset = "1"
heapless = "0.9"
spacepackets = { version = "0.17", default-features = false, features = ["defmt", "serde"] }
@@ -27,10 +28,9 @@ serde = { version = "1", default-features = false, features = ["derive"] }
rtic = { version = "2", features = ["thumbv7-backend"] }
rtic-sync = { version = "1" }
rtic-monotonics = { version = "2", features = ["cortex-m-systick"] }
[dev-dependencies]
defmt-test = "0.4"
defmt-test = "0.5"
# cargo test
[profile.test]
@@ -1,33 +0,0 @@
/* Linker script for the STM32F303VCT6 */
MEMORY
{
/* NOTE 1 K = 1 KiBi = 1024 bytes */
FLASH : ORIGIN = 0x08000000, LENGTH = 256K
RAM : ORIGIN = 0x20000000, LENGTH = 40K
}
/* This is where the call stack will be allocated. */
/* The stack is of the full descending type. */
/* You may want to use this variable to locate the call stack and static
variables in different memory regions. Below is shown the default value */
/* _stack_start = ORIGIN(RAM) + LENGTH(RAM); */
/* You can use this symbol to customize the location of the .text section */
/* If omitted the .text section will be placed right after the .vector_table
section */
/* This is required only on microcontrollers that store some configuration right
after the vector table */
/* _stext = ORIGIN(FLASH) + 0x400; */
/* Example of putting non-initialized variables into custom RAM locations. */
/* This assumes you have defined a region RAM2 above, and in the Rust
sources added the attribute `#[link_section = ".ram2bss"]` to the data
you want to place there. */
/* Note that the section will not be zero-initialized by the runtime! */
/* SECTIONS {
.ram2bss (NOLOAD) : ALIGN(4) {
*(.ram2bss);
. = ALIGN(4);
} > RAM2
} INSERT AFTER .bss;
*/
@@ -6,11 +6,8 @@ use rtic::app;
#[app(device = embassy_stm32)]
mod app {
use rtic_monotonics::fugit::ExtU32;
use rtic_monotonics::Monotonic as _;
use satrs_stm32f3_disco_rtic::{Direction, LedPinSet, Leds};
rtic_monotonics::systick_monotonic!(Mono, 1000);
use embassy_time::Timer;
use satrs_stm32f3_disco_rtic::{Direction, LedPinSet, Leds};
#[shared]
struct Shared {}
@@ -22,7 +19,7 @@ mod app {
}
#[init]
fn init(cx: init::Context) -> (Shared, Local) {
fn init(_cx: init::Context) -> (Shared, Local) {
let p = embassy_stm32::init(Default::default());
defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery using RTICv2");
@@ -39,8 +36,6 @@ mod app {
};
let leds = Leds::new(led_pin_set);
// Initialize the systick interrupt & obtain the token to prove that we did
Mono::start(cx.core.SYST, 8_000_000);
blinky::spawn().expect("failed to spawn blinky task");
(
Shared {},
@@ -55,7 +50,7 @@ mod app {
async fn blinky(cx: blinky::Context) {
loop {
cx.local.leds.blink_next(cx.local.current_dir);
Mono::delay(200.millis()).await;
Timer::after_millis(200).await;
}
}
}
@@ -1,6 +1,9 @@
#![no_main]
#![no_std]
use defmt_rtt as _;
use panic_probe as _;
use arbitrary_int::u11;
use core::time::Duration;
use embassy_stm32::gpio::Output;
@@ -2,21 +2,11 @@
#![no_main]
use arbitrary_int::{u11, u14};
use cortex_m_semihosting::debug::{self, EXIT_FAILURE, EXIT_SUCCESS};
use satrs_stm32f3_disco_rtic::{create_tm_packet, tm_size, CcsdsPacketId, Request, Response};
use spacepackets::{CcsdsPacketCreationError, SpHeader};
use defmt_rtt as _; // global logger
use panic_probe as _;
use satrs_stm32f3_disco_rtic::{create_tm_packet, tm_size, Request, Response};
use spacepackets::{CcsdsPacketCreationError, CcsdsPacketIdAndPsc, SpHeader};
use rtic::app;
#[allow(unused_imports)]
use rtic_monotonics::fugit::{MillisDurationU32, TimerInstantU32};
use rtic_monotonics::systick::prelude::*;
use crate::app::Mono;
const UART_BAUD: u32 = 115200;
const DEFAULT_BLINK_FREQ_MS: u32 = 1000;
const TX_HANDLER_FREQ_MS: u32 = 20;
@@ -48,7 +38,7 @@ pub enum TmSendError {
#[derive(Debug, defmt::Format)]
pub struct RequestWithTcId {
pub request: Request,
pub tc_id: CcsdsPacketId,
pub tc_id: CcsdsPacketIdAndPsc,
}
#[app(device = embassy_stm32)]
@@ -57,18 +47,19 @@ mod app {
use super::*;
use arbitrary_int::u14;
use embassy_time::Timer;
use rtic::Mutex;
use rtic_sync::{
channel::{Receiver, Sender},
make_channel,
};
use satrs_stm32f3_disco_rtic::{CcsdsPacketId, LedPinSet, Request, Response};
use satrs_stm32f3_disco_rtic::{LedPinSet, Request, Response};
use spacepackets::CcsdsPacketReader;
systick_monotonic!(Mono, 1000);
embassy_stm32::bind_interrupts!(struct Irqs {
USART2 => embassy_stm32::usart::InterruptHandler<embassy_stm32::peripherals::USART2>;
DMA1_CHANNEL6 => embassy_stm32::dma::InterruptHandler<embassy_stm32::peripherals::DMA1_CH6>;
DMA1_CHANNEL7 => embassy_stm32::dma::InterruptHandler<embassy_stm32::peripherals::DMA1_CH7>;
});
#[shared]
@@ -86,15 +77,13 @@ mod app {
}
#[init]
fn init(cx: init::Context) -> (Shared, Local) {
fn init(_cx: init::Context) -> (Shared, Local) {
static DMA_BUF: static_cell::ConstStaticCell<[u8; TC_DMA_BUF_LEN]> =
static_cell::ConstStaticCell::new([0; TC_DMA_BUF_LEN]);
let p = embassy_stm32::init(Default::default());
let (req_sender, req_receiver) = make_channel!(RequestWithTcId, 16);
// Initialize the systick interrupt & obtain the token to prove that we did
Mono::start(cx.core.SYST, 8_000_000);
defmt::info!("sat-rs demo application for the STM32F3-Discovery with RTICv2");
let led_pin_set = LedPinSet {
@@ -112,7 +101,7 @@ mod app {
let mut config = embassy_stm32::usart::Config::default();
config.baudrate = UART_BAUD;
let uart = embassy_stm32::usart::Uart::new(
p.USART2, p.PA3, p.PA2, Irqs, p.DMA1_CH7, p.DMA1_CH6, config,
p.USART2, p.PA3, p.PA2, p.DMA1_CH7, p.DMA1_CH6, Irqs, config,
)
.unwrap();
@@ -142,10 +131,7 @@ mod app {
loop {
cx.local.leds.blink_next(cx.local.current_dir);
let current_blink_freq = cx.shared.blink_freq.lock(|current| *current);
Mono::delay(MillisDurationU32::from_ticks(
current_blink_freq.as_millis() as u32,
))
.await;
Timer::after_millis(current_blink_freq.as_millis() as u64).await;
}
}
@@ -169,7 +155,7 @@ mod app {
.unwrap();
continue;
}
Mono::delay(TX_HANDLER_FREQ_MS.millis()).await;
Timer::after_millis(TX_HANDLER_FREQ_MS as u64).await;
}
}
@@ -198,9 +184,8 @@ mod app {
&decoder.dest()[0..packet_size],
) {
Ok(packet) => {
let packet_id = packet.packet_id();
let psc = packet.psc();
let tc_packet_id = CcsdsPacketId { packet_id, psc };
let tc_packet_id =
CcsdsPacketIdAndPsc::new_from_ccsds_packet(&packet);
if let Ok(request) =
postcard::from_bytes::<Request>(packet.packet_data())
{
@@ -260,7 +245,7 @@ mod app {
fn handle_ping_request(
cx: &mut req_handler::Context,
tc_packet_id: CcsdsPacketId,
tc_packet_id: CcsdsPacketIdAndPsc,
) -> Result<(), TmSendError> {
defmt::info!("Received PUS ping telecommand, sending ping reply");
send_tm(tc_packet_id, Response::CommandDone, *cx.local.seq_count)?;
@@ -270,7 +255,7 @@ mod app {
fn handle_change_blink_frequency_request(
cx: &mut req_handler::Context,
tc_packet_id: CcsdsPacketId,
tc_packet_id: CcsdsPacketIdAndPsc,
duration: Duration,
) -> Result<(), TmSendError> {
defmt::info!(
@@ -287,14 +272,14 @@ mod app {
}
fn send_tm(
tc_packet_id: CcsdsPacketId,
tc_packet_id: CcsdsPacketIdAndPsc,
response: Response,
current_seq_count: u14,
) -> Result<(), TmSendError> {
let sp_header = SpHeader::new_for_unseg_tc(PUS_APID, current_seq_count, 0);
let tm_header = satrs_stm32f3_disco_rtic::TmHeader {
tc_packet_id: Some(tc_packet_id),
uptime_millis: Mono::now().duration_since_epoch().to_millis(),
uptime_millis: embassy_time::Instant::now().as_millis() as u32,
};
let mut tm_packet = TmPacket::new();
let tm_size = tm_size(&tm_header, &response);
+3 -3
View File
@@ -14,12 +14,12 @@ test:
embedded:
cargo check -p satrs --target=thumbv7em-none-eabihf --no-default-features
fmt:
cargo fmt --all
check-fmt:
cargo fmt --all -- --check
fmt:
cargo fmt --all
clippy:
cargo clippy -- -D warnings
+11 -15
View File
@@ -1,7 +1,7 @@
[package]
name = "satrs-example"
version = "0.1.1"
edition = "2021"
edition = "2024"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
default-run = "satrs-example"
homepage = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
@@ -18,28 +18,24 @@ csv = "1"
num_enum = "0.7"
thiserror = "2"
lazy_static = "1"
strum = { version = "0.27", features = ["derive"] }
strum = { version = "0.28", features = ["derive"] }
derive-new = "0.7"
cfg-if = "1"
arbitrary-int = "2"
bitbybit = "1.4"
bitbybit = "2"
postcard = "1"
ctrlc = "3"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
[dependencies.satrs]
path = "../satrs"
features = ["test_util"]
[dependencies.satrs-minisim]
path = "../satrs-minisim"
[dependencies.satrs-mib]
version = "0.1.1"
path = "../satrs-mib"
satrs = { path = "../satrs", features = ["test_util"] }
models = { path = "./models" }
satrs-minisim = { path = "./minisim" }
satrs-mib = { path = "../satrs-mib" }
[features]
default = ["heap_tmtc"]
heap_tmtc = []
# default = ["heap_tmtc"]
# heap_tmtc = []
[dev-dependencies]
env_logger = "0.11"
+19
View File
@@ -0,0 +1,19 @@
[package]
name = "client"
version = "0.1.0"
edition = "2024"
[dependencies]
clap = { version = "4", features = ["derive"] }
log = "0.4"
fern = "0.7"
humantime = "2"
serde = { version = "1" }
satrs-example = { path = ".." }
models = { path = "../models" }
spacepackets = { version = "0.17", git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git", default-features = false }
bitbybit = "2"
arbitrary-int = "2"
ctrlc = { version = "3.5" }
postcard = { version = "1" }
anyhow = "1"
+334
View File
@@ -0,0 +1,334 @@
use anyhow::bail;
use arbitrary_int::u11;
use clap::Parser as _;
use models::{Apid, MessageType, TcHeader, mgm::request::HkRequest};
use satrs_example::config::{OBSW_SERVER_ADDR, SERVER_PORT};
use spacepackets::{CcsdsPacketIdAndPsc, SpacePacketHeader};
use std::{
net::{IpAddr, SocketAddr, UdpSocket},
sync::{
Arc,
atomic::{AtomicBool, Ordering},
},
time::{Duration, SystemTime},
};
#[derive(clap::Parser)]
pub struct Cli {
#[arg(short, long)]
ping: bool,
#[arg(short, long)]
test_event: bool,
#[command(subcommand)]
commands: Option<Commands>,
}
#[derive(clap::Subcommand)]
enum Commands {
Mgm0(MgmArgs),
Mgm1(MgmArgs),
MgmAssy(MgmAssemblyArgs),
}
impl Commands {
#[inline]
pub fn target_id(&self) -> models::ComponentId {
match self {
Commands::Mgm0(_mgm_args) => models::ComponentId::AcsMgm0,
Commands::Mgm1(_mgm_args) => models::ComponentId::AcsMgm1,
Commands::MgmAssy(_mgm_assembly_args) => models::ComponentId::AcsMgmAssembly,
}
}
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, clap::Parser)]
struct MgmArgs {
#[arg(short, long)]
ping: bool,
#[arg(long)]
request_hk: bool,
#[arg(short, long)]
mode: Option<DeviceModeSelect>,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, clap::Parser)]
struct MgmAssemblyArgs {
#[arg(short, long)]
ping: bool,
#[arg(short, long)]
mode: Option<AssemblyModeSelect>,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, clap::ValueEnum)]
pub enum DeviceModeSelect {
Off,
Normal,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, clap::ValueEnum)]
pub enum AssemblyModeSelect {
NoModeKeeping,
Off,
Normal,
}
fn setup_logger(level: log::LevelFilter) -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| {
out.finish(format_args!(
"[{} {} {}] {}",
humantime::format_rfc3339_seconds(SystemTime::now()),
record.level(),
record.target(),
message
))
})
.level(level)
.chain(std::io::stdout())
.chain(fern::log_file("output.log")?)
.apply()?;
Ok(())
}
fn main() -> anyhow::Result<()> {
setup_logger(log::LevelFilter::Debug).unwrap();
let kill_signal = Arc::new(AtomicBool::new(false));
let ctrl_kill_signal = kill_signal.clone();
ctrlc::set_handler(move || ctrl_kill_signal.store(true, Ordering::Relaxed)).unwrap();
let cli = Cli::parse();
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed");
client.set_nonblocking(true)?;
client.set_read_timeout(Some(Duration::from_millis(200)))?;
if cli.ping {
let request = models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Tmtc as u16)),
TcHeader::new(models::ComponentId::Controller, models::MessageType::Ping),
models::control::request::Request::Ping,
);
let sent_tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&request.sp_header);
log::info!("sending ping request with TC ID {:#010x}", sent_tc_id.raw());
let request_packet = request.to_vec();
client.send_to(&request_packet, addr).unwrap();
}
if cli.test_event {
let request = models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Tmtc as u16)),
TcHeader::new(models::ComponentId::Controller, models::MessageType::Event),
models::control::request::Request::TestEvent,
);
let sent_tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&request.sp_header);
log::info!(
"sending event request with TC ID {:#010x}",
sent_tc_id.raw()
);
let request_packet = request.to_vec();
client.send_to(&request_packet, addr).unwrap();
}
if let Some(cmd) = cli.commands {
let target_id = cmd.target_id();
match cmd {
Commands::Mgm0(args) | Commands::Mgm1(args) => {
if args.ping {
let request = models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Acs as u16)),
TcHeader::new(cmd.target_id(), models::MessageType::Ping),
models::mgm::request::Request::Ping,
);
let sent_tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&request.sp_header);
log::info!(
"sending {:?} ping request with TC ID {:#010x}",
target_id,
sent_tc_id.raw()
);
let request_packet = request.to_vec();
client.send_to(&request_packet, addr).unwrap();
}
if args.request_hk {
let request = models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Acs as u16)),
TcHeader::new(target_id, models::MessageType::Hk),
models::mgm::request::Request::Hk(HkRequest {
id: models::mgm::request::HkId::Sensor,
req_type: models::HkRequestType::OneShot,
}),
);
let sent_tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&request.sp_header);
log::info!(
"sending {:?} HK request with TC ID {:#010x}",
target_id,
sent_tc_id.raw()
);
let request_packet = request.to_vec();
client.send_to(&request_packet, addr).unwrap();
}
if let Some(mode) = args.mode {
let dev_mode = match mode {
DeviceModeSelect::Off => models::DeviceMode::Off,
DeviceModeSelect::Normal => models::DeviceMode::Normal,
};
let request = models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Acs as u16)),
TcHeader::new(target_id, models::MessageType::Mode),
models::mgm::request::Request::Mode(
models::mgm::request::ModeRequest::SetMode(dev_mode),
),
);
let sent_tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&request.sp_header);
log::info!(
"sending {:?} HK request with TC ID {:#010x}",
target_id,
sent_tc_id.raw()
);
let request_packet = request.to_vec();
client.send_to(&request_packet, addr).unwrap();
}
}
Commands::MgmAssy(mgm_assembly_args) => {
if mgm_assembly_args.ping {
let request = models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Acs as u16)),
TcHeader::new(cmd.target_id(), models::MessageType::Ping),
models::mgm::request::Request::Ping,
);
let sent_tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&request.sp_header);
log::info!(
"sending {:?} ping request with TC ID {:#010x}",
target_id,
sent_tc_id.raw()
);
let request_packet = request.to_vec();
client.send_to(&request_packet, addr).unwrap();
}
if let Some(mode) = mgm_assembly_args.mode {
let assembly_mode = match mode {
AssemblyModeSelect::NoModeKeeping => {
models::mgm_assembly::AssemblyMode::NoModeKeeping
}
AssemblyModeSelect::Off => {
models::mgm_assembly::AssemblyMode::Device(models::DeviceMode::Off)
}
AssemblyModeSelect::Normal => {
models::mgm_assembly::AssemblyMode::Device(models::DeviceMode::Normal)
}
};
let request = models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Acs as u16)),
TcHeader::new(target_id, models::MessageType::Mode),
models::mgm_assembly::request::Request::Mode(
models::mgm_assembly::request::ModeRequest::SetMode(assembly_mode),
),
);
let sent_tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&request.sp_header);
log::info!(
"sending {:?} HK request with TC ID {:#010x}",
target_id,
sent_tc_id.raw()
);
let request_packet = request.to_vec();
client.send_to(&request_packet, addr).unwrap();
}
}
}
}
let mut recv_buf: Box<[u8; 2048]> = Box::new([0; 2048]);
log::info!("entering listening loop");
loop {
if kill_signal.load(std::sync::atomic::Ordering::Relaxed) {
log::info!("received kill signal, exiting");
break;
}
match client.recv(recv_buf.as_mut_slice()) {
Ok(received_bytes) => handle_raw_tm_packet(&recv_buf.as_slice()[0..received_bytes])?,
Err(e) => {
if e.kind() == std::io::ErrorKind::WouldBlock
|| e.kind() == std::io::ErrorKind::TimedOut
{
continue;
}
log::warn!("UDP reception error: {}", e)
}
}
}
Ok(())
}
fn handle_raw_tm_packet(data: &[u8]) -> anyhow::Result<()> {
match spacepackets::CcsdsPacketReader::new_with_checksum(data) {
Ok(packet) => {
let tm_header_result =
postcard::take_from_bytes::<models::TmHeader>(packet.user_data());
if let Err(e) = tm_header_result {
bail!("Failed to deserialize TM header: {}", e);
}
let (tm_header, remainder) = tm_header_result.unwrap();
if let Some(tc_id) = tm_header.tc_id {
log::info!(
"Received TM with APID {} and from sender {:?} for TC ID {:#010x}",
packet.apid(),
tm_header.sender_id,
tc_id.raw()
);
} else {
log::info!(
"Received unsolicited TM with APID {} and from sender {:?}",
packet.apid(),
tm_header.sender_id,
);
}
if tm_header.message_type == MessageType::Event {
let response = postcard::from_bytes::<models::Event>(remainder);
log::info!(
"Received event from {:?}: {:?}",
tm_header.sender_id,
response.unwrap()
);
return Ok(());
}
match tm_header.sender_id {
models::ComponentId::EpsPcdu => {
let response =
postcard::from_bytes::<models::pcdu::response::Response>(remainder);
log::info!("Received response from PCDU: {:?}", response.unwrap());
}
models::ComponentId::Controller => {
let response =
postcard::from_bytes::<models::control::response::Response>(remainder);
log::info!("Received response from controller: {:?}", response.unwrap());
}
models::ComponentId::AcsSubsystem => todo!(),
models::ComponentId::AcsMgmAssembly => {
let response =
postcard::from_bytes::<models::mgm_assembly::response::Response>(remainder);
log::info!(
"Received response from MGM Assembly: {:?}",
response.unwrap()
);
}
models::ComponentId::AcsMgm0 => {
let response =
postcard::from_bytes::<models::mgm::response::Response>(remainder);
log::info!("Received response from MGM0: {:?}", response.unwrap());
}
models::ComponentId::AcsMgm1 => {
let response =
postcard::from_bytes::<models::mgm::response::Response>(remainder);
log::info!("Received response from MGM1: {:?}", response.unwrap());
}
models::ComponentId::EpsSubsystem => todo!(),
models::ComponentId::UdpServer => todo!(),
models::ComponentId::TcpServer => todo!(),
models::ComponentId::Ground => todo!(),
models::ComponentId::EventManager => {}
}
}
Err(_) => todo!(),
}
Ok(())
}
@@ -11,16 +11,14 @@ serde_json = "1"
log = "0.4"
thiserror = "2"
fern = "0.7"
strum = { version = "0.27", features = ["derive"] }
strum = { version = "0.28", features = ["derive"] }
num_enum = "0.7"
humantime = "2"
tai-time = { version = "0.3", features = ["serde"] }
nexosim = { version = "0.3.1" }
[dependencies.nexosim]
version = "0.3.1"
[dependencies.satrs]
path = "../satrs"
satrs = { path = "../../satrs" }
models = { path = "../models" }
[dev-dependencies]
delegate = "0.13"
@@ -1,10 +1,10 @@
use std::{f32::consts::PI, sync::mpsc, time::Duration};
use models::pcdu::SwitchStateBinary;
use nexosim::{
model::{Context, Model},
ports::Output,
};
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
acs::{
lis3mdl::MgmLis3MdlReply, MgmReplyCommon, MgmReplyProvider, MgmSensorValuesMicroTesla,
@@ -179,13 +179,12 @@ impl Model for MagnetorquerModel {}
pub mod tests {
use std::time::Duration;
use satrs::power::SwitchStateBinary;
use models::pcdu::{SwitchId, SwitchStateBinary};
use satrs_minisim::{
acs::{
lis3mdl::{self, MgmLis3MdlReply},
MgmRequestLis3Mdl, MgtDipole, MgtHkSet, MgtReply, MgtRequest,
},
eps::PcduSwitch,
SerializableSimMsgPayload, SimComponent, SimMessageProvider, SimRequest,
};
@@ -215,7 +214,7 @@ pub mod tests {
#[test]
fn test_basic_mgm_request_switched_on() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgm);
switch_device_on(&mut sim_testbench, SwitchId::Mgm0);
let mut request = SimRequest::new_with_epoch_time(MgmRequestLis3Mdl::RequestSensorData);
sim_testbench
@@ -279,7 +278,7 @@ pub mod tests {
#[test]
fn test_basic_mgt_request_is_on() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgt);
switch_device_on(&mut sim_testbench, SwitchId::Mgt);
let request = SimRequest::new_with_epoch_time(MgtRequest::RequestHk);
sim_testbench
@@ -324,7 +323,7 @@ pub mod tests {
#[test]
fn test_basic_mgt_request_is_on_and_torquing() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgt);
switch_device_on(&mut sim_testbench, SwitchId::Mgt);
let commanded_dipole = MgtDipole {
x: -200,
y: 200,
@@ -1,14 +1,11 @@
use std::{sync::mpsc, time::Duration};
use models::pcdu::{SwitchId, SwitchMapBinaryWrapper, SwitchStateBinary};
use nexosim::{
model::{Context, Model},
ports::Output,
};
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
eps::{PcduReply, PcduSwitch, SwitchMapBinaryWrapper},
SimReply,
};
use satrs_minisim::{eps::PcduReply, SimReply};
pub const SWITCH_INFO_DELAY_MS: u64 = 10;
@@ -45,10 +42,12 @@ impl PcduModel {
self.reply_sender.send(reply).unwrap();
}
pub async fn switch_device(
&mut self,
switch_and_target_state: (PcduSwitch, SwitchStateBinary),
) {
pub async fn switch_device(&mut self, switch_and_target_state: (SwitchId, SwitchStateBinary)) {
log::info!(
"switching {:?} to {:?}",
switch_and_target_state.0,
switch_and_target_state.1
);
let val = self
.switcher_map
.0
@@ -56,12 +55,13 @@ impl PcduModel {
.unwrap_or_else(|| panic!("switch {:?} not found", switch_and_target_state.0));
*val = switch_and_target_state.1;
match switch_and_target_state.0 {
PcduSwitch::Mgm => {
SwitchId::Mgm0 => {
self.mgm_0_switch.send(switch_and_target_state.1).await;
}
PcduSwitch::Mgt => {
SwitchId::Mgt => {
self.mgt_switch.send(switch_and_target_state.1).await;
}
SwitchId::Mgm1 => todo!(),
}
}
}
@@ -73,16 +73,16 @@ pub(crate) mod tests {
use super::*;
use std::time::Duration;
use models::pcdu::SwitchMapBinary;
use satrs_minisim::{
eps::{PcduRequest, SwitchMapBinary},
SerializableSimMsgPayload, SimComponent, SimMessageProvider, SimRequest,
eps::PcduRequest, SerializableSimMsgPayload, SimComponent, SimMessageProvider, SimRequest,
};
use crate::test_helpers::SimTestbench;
fn switch_device(
sim_testbench: &mut SimTestbench,
switch: PcduSwitch,
switch: SwitchId,
target: SwitchStateBinary,
) {
let request = SimRequest::new_with_epoch_time(PcduRequest::SwitchDevice {
@@ -97,10 +97,10 @@ pub(crate) mod tests {
}
#[allow(dead_code)]
pub(crate) fn switch_device_off(sim_testbench: &mut SimTestbench, switch: PcduSwitch) {
pub(crate) fn switch_device_off(sim_testbench: &mut SimTestbench, switch: SwitchId) {
switch_device(sim_testbench, switch, SwitchStateBinary::Off);
}
pub(crate) fn switch_device_on(sim_testbench: &mut SimTestbench, switch: PcduSwitch) {
pub(crate) fn switch_device_on(sim_testbench: &mut SimTestbench, switch: SwitchId) {
switch_device(sim_testbench, switch, SwitchStateBinary::On);
}
@@ -128,7 +128,7 @@ pub(crate) mod tests {
}
}
fn test_pcdu_switching_single_switch(switch: PcduSwitch, target: SwitchStateBinary) {
fn test_pcdu_switching_single_switch(switch: SwitchId, target: SwitchStateBinary) {
let mut sim_testbench = SimTestbench::new();
switch_device(&mut sim_testbench, switch, target);
let mut switcher_map = get_all_off_switch_map();
@@ -165,17 +165,17 @@ pub(crate) mod tests {
#[test]
fn test_pcdu_switching_mgm_on() {
test_pcdu_switching_single_switch(PcduSwitch::Mgm, SwitchStateBinary::On);
test_pcdu_switching_single_switch(SwitchId::Mgm0, SwitchStateBinary::On);
}
#[test]
fn test_pcdu_switching_mgt_on() {
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::On);
test_pcdu_switching_single_switch(SwitchId::Mgt, SwitchStateBinary::On);
}
#[test]
fn test_pcdu_switching_mgt_off() {
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::On);
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::Off);
test_pcdu_switching_single_switch(SwitchId::Mgt, SwitchStateBinary::On);
test_pcdu_switching_single_switch(SwitchId::Mgt, SwitchStateBinary::Off);
}
}
@@ -1,5 +1,4 @@
use nexosim::time::MonotonicTime;
use num_enum::{IntoPrimitive, TryFromPrimitive};
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)]
@@ -162,73 +161,7 @@ impl From<SimRequestError> for SimCtrlReply {
pub mod eps {
use super::*;
use satrs::power::{SwitchState, SwitchStateBinary};
use std::collections::HashMap;
use strum::{EnumIter, IntoEnumIterator};
pub type SwitchMap = HashMap<PcduSwitch, SwitchState>;
pub type SwitchMapBinary = HashMap<PcduSwitch, SwitchStateBinary>;
pub struct SwitchMapWrapper(pub SwitchMap);
pub struct SwitchMapBinaryWrapper(pub SwitchMapBinary);
#[derive(
Debug,
Copy,
Clone,
PartialEq,
Eq,
Serialize,
Deserialize,
Hash,
EnumIter,
IntoPrimitive,
TryFromPrimitive,
)]
#[repr(u16)]
pub enum PcduSwitch {
Mgm = 0,
Mgt = 1,
}
impl Default for SwitchMapBinaryWrapper {
fn default() -> Self {
let mut switch_map = SwitchMapBinary::default();
for entry in PcduSwitch::iter() {
switch_map.insert(entry, SwitchStateBinary::Off);
}
Self(switch_map)
}
}
impl Default for SwitchMapWrapper {
fn default() -> Self {
let mut switch_map = SwitchMap::default();
for entry in PcduSwitch::iter() {
switch_map.insert(entry, SwitchState::Unknown);
}
Self(switch_map)
}
}
impl SwitchMapWrapper {
pub fn new_with_init_switches_off() -> Self {
let mut switch_map = SwitchMap::default();
for entry in PcduSwitch::iter() {
switch_map.insert(entry, SwitchState::Off);
}
Self(switch_map)
}
pub fn from_binary_switch_map_ref(switch_map: &SwitchMapBinary) -> Self {
Self(
switch_map
.iter()
.map(|(key, value)| (*key, SwitchState::from(*value)))
.collect(),
)
}
}
use models::pcdu::{SwitchId, SwitchMapBinary, SwitchStateBinary};
#[derive(Debug, Copy, Clone)]
#[repr(u8)]
@@ -240,7 +173,7 @@ pub mod eps {
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum PcduRequest {
SwitchDevice {
switch: PcduSwitch,
switch: SwitchId,
state: SwitchStateBinary,
},
RequestSwitchInfo,
@@ -264,7 +197,7 @@ pub mod eps {
pub mod acs {
use std::time::Duration;
use satrs::power::SwitchStateBinary;
use models::pcdu::SwitchStateBinary;
use super::*;
@@ -91,11 +91,8 @@ impl SimUdpServer {
self.sender_addr = Some(src);
let sim_req = SimRequest::from_raw_data(&self.req_buf[..bytes_read]);
if sim_req.is_err() {
log::warn!(
"received UDP request with invalid format: {}",
sim_req.unwrap_err()
);
if let Err(e) = sim_req {
log::warn!("received UDP request with invalid format: {}", e);
return processed_requests;
}
self.request_sender.send(sim_req.unwrap()).unwrap();
+15
View File
@@ -0,0 +1,15 @@
[package]
name = "models"
version = "0.1.0"
edition = "2024"
[dependencies]
serde = { version = "1", features = ["derive"] }
spacepackets = { version = "0.17", git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git", default-features = false }
satrs = { path = "../../satrs" }
num_enum = { version = "0.7" }
strum = { version = "0.28", features = ["derive"] }
postcard = { version = "1" }
thiserror = { version = "2" }
bitbybit = "2"
arbitrary-int = "2"
+130
View File
@@ -0,0 +1,130 @@
use crate::TmHeader;
use serde::Serialize;
use spacepackets::{
CcsdsPacketCreationError, CcsdsPacketCreatorWithReservedData, SpHeader, SpacePacketHeader,
ccsds_packet_len_for_user_data_len_with_checksum,
};
use crate::TcHeader;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CcsdsTcPacketOwned {
pub sp_header: SpacePacketHeader,
pub tc_header: TcHeader,
pub payload: alloc::vec::Vec<u8>,
}
impl CcsdsTcPacketOwned {
pub fn new_with_request<R: serde::Serialize>(
sp_header: SpacePacketHeader,
tc_header: TcHeader,
request: R,
) -> Self {
let request_serialized = postcard::to_allocvec(&request).unwrap();
Self::new(sp_header, tc_header, request_serialized)
}
pub fn new(
sp_header: SpacePacketHeader,
tc_header: TcHeader,
payload: alloc::vec::Vec<u8>,
) -> Self {
Self {
sp_header,
tc_header,
payload,
}
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, CcsdsCreationError> {
let response_len =
postcard::experimental::serialized_size(&self.tc_header)? + self.payload.len();
let mut ccsds_tc = CcsdsPacketCreatorWithReservedData::new_tc_with_checksum(
self.sp_header,
response_len,
buf,
)?;
let user_data = ccsds_tc.packet_data_mut();
let ser_len = postcard::to_slice(&self.tc_header, user_data)?.len();
user_data[ser_len..ser_len + self.payload.len()].copy_from_slice(&self.payload);
let ccsds_packet_len = ccsds_tc.finish();
Ok(ccsds_packet_len)
}
pub fn len_written(&self) -> usize {
ccsds_packet_len_for_user_data_len_with_checksum(
postcard::experimental::serialized_size(&self.tc_header).unwrap() as usize
+ postcard::experimental::serialized_size(&self.payload).unwrap() as usize,
)
.unwrap()
}
pub fn to_vec(&self) -> alloc::vec::Vec<u8> {
let mut buf = alloc::vec![0u8; self.len_written()];
let len = self.write_to_bytes(&mut buf).unwrap();
buf.truncate(len);
buf
}
}
#[derive(Debug, thiserror::Error)]
pub enum CcsdsCreationError {
#[error("CCSDS packet creation error: {0}")]
CcsdsPacketCreation(#[from] CcsdsPacketCreationError),
#[error("postcard error: {0}")]
Postcard(#[from] postcard::Error),
#[error("timestamp generation error")]
Time,
}
/// Unserialized owned TM packet which can be cloned and sent around.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CcsdsTmPacketOwned {
pub sp_header: SpacePacketHeader,
pub tm_header: TmHeader,
pub payload: alloc::vec::Vec<u8>,
}
impl CcsdsTmPacketOwned {
pub fn new_with_serde_payload(
sp_header: SpHeader,
tm_header: &TmHeader,
payload: &impl Serialize,
) -> Result<Self, postcard::Error> {
Ok(CcsdsTmPacketOwned {
sp_header,
tm_header: *tm_header,
payload: postcard::to_allocvec(&payload)?,
})
}
pub fn write_to_bytes(&self, buf: &mut [u8]) -> Result<usize, CcsdsCreationError> {
let response_len =
postcard::experimental::serialized_size(&self.tm_header)? + self.payload.len();
let mut ccsds_tm = CcsdsPacketCreatorWithReservedData::new_tm_with_checksum(
self.sp_header,
response_len,
buf,
)?;
let user_data = ccsds_tm.packet_data_mut();
let ser_len = postcard::to_slice(&self.tm_header, user_data)?.len();
user_data[ser_len..ser_len + self.payload.len()].copy_from_slice(&self.payload);
let ccsds_packet_len = ccsds_tm.finish();
Ok(ccsds_packet_len)
}
pub fn len_written(&self) -> usize {
ccsds_packet_len_for_user_data_len_with_checksum(
postcard::experimental::serialized_size(&self.tm_header).unwrap() as usize
+ postcard::experimental::serialized_size(&self.payload).unwrap() as usize,
)
.unwrap()
}
pub fn to_vec(&self) -> alloc::vec::Vec<u8> {
let mut buf = alloc::vec![0u8; self.len_written()];
let len = self.write_to_bytes(&mut buf).unwrap();
buf.truncate(len);
buf
}
}
+39
View File
@@ -0,0 +1,39 @@
use crate::Message;
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum Event {
TestEvent,
}
impl Message for Event {
fn message_type(&self) -> crate::MessageType {
crate::MessageType::Event
}
}
pub mod request {
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum Request {
Ping,
TestEvent,
}
}
pub mod response {
use crate::Message;
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum Response {
Ok,
Event(super::Event),
}
impl Message for Response {
fn message_type(&self) -> crate::MessageType {
match self {
Response::Ok => crate::MessageType::Verification,
Response::Event(_event) => crate::MessageType::Event,
}
}
}
}
+196
View File
@@ -0,0 +1,196 @@
extern crate alloc;
use core::str::FromStr;
use spacepackets::{
CcsdsPacketIdAndPsc,
time::cds::{CdsTime, MIN_CDS_FIELD_LEN},
};
pub mod ccsds;
pub mod control;
pub mod mgm;
pub mod mgm_assembly;
pub mod pcdu;
#[derive(
Debug,
Copy,
Clone,
PartialEq,
Eq,
Hash,
serde::Serialize,
serde::Deserialize,
num_enum::TryFromPrimitive,
num_enum::IntoPrimitive,
)]
#[repr(u64)]
pub enum ComponentId {
Controller,
AcsSubsystem,
AcsMgmAssembly,
AcsMgm0,
AcsMgm1,
EpsSubsystem,
EpsPcdu,
UdpServer,
TcpServer,
EventManager,
Ground,
}
#[derive(Debug, PartialEq, Eq, strum::EnumIter)]
#[bitbybit::bitenum(u11)]
pub enum Apid {
Tmtc = 1,
Cfdp = 2,
Acs = 3,
Eps = 6,
}
#[derive(Debug, Copy, Clone, serde::Serialize, serde::Deserialize)]
pub enum Event {
ControllerEvent(control::Event),
}
impl Message for Event {
fn message_type(&self) -> MessageType {
MessageType::Event
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[non_exhaustive]
pub struct TmHeader {
pub sender_id: ComponentId,
pub target_id: ComponentId,
pub message_type: MessageType,
/// Telemetry can either be sent unsolicited, or as a response to telecommands.
pub tc_id: Option<CcsdsPacketIdAndPsc>,
/// Raw CDS short timestamp.
pub timestamp: Option<[u8; 7]>,
}
impl TmHeader {
pub fn new(
sender_id: ComponentId,
target_id: ComponentId,
message_type: MessageType,
tc_id: Option<CcsdsPacketIdAndPsc>,
cds_timestamp: &CdsTime,
) -> Self {
// Can not fail, CDS short always requires 7 bytes.
let mut stamp_buf: [u8; MIN_CDS_FIELD_LEN] = [0; MIN_CDS_FIELD_LEN];
cds_timestamp.write_to_bytes(&mut stamp_buf).unwrap();
Self {
sender_id,
target_id,
tc_id,
message_type,
timestamp: Some(stamp_buf),
}
}
pub fn new_for_unsolicited_tm(
sender_id: ComponentId,
target_id: ComponentId,
message_type: MessageType,
cds_timestamp: &CdsTime,
) -> Self {
Self::new(sender_id, target_id, message_type, None, cds_timestamp)
}
pub fn new_for_tc_response(
sender_id: ComponentId,
target_id: ComponentId,
message_type: MessageType,
tc_id: CcsdsPacketIdAndPsc,
cds_timestamp: &CdsTime,
) -> Self {
Self::new(
sender_id,
target_id,
message_type,
Some(tc_id),
cds_timestamp,
)
}
pub fn from_bytes_postcard(data: &[u8]) -> Result<(Self, &[u8]), postcard::Error> {
postcard::take_from_bytes::<TmHeader>(data)
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[non_exhaustive]
pub struct TcHeader {
pub target_id: ComponentId,
pub request_type: MessageType,
}
impl TcHeader {
pub fn new(target_id: ComponentId, request_type: MessageType) -> Self {
Self {
target_id,
request_type,
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum MessageType {
Ping,
Mode,
Hk,
Action,
Event,
Verification,
}
pub trait Message {
fn message_type(&self) -> MessageType;
}
/// Generic device mode which covers the requirements of most devices.
///
/// The states are related both to the physical and the logical state of the device. Some
/// device handlers control the power supply of their own device and an off state might also
/// mean that the device is physically off.
#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq, Copy, Clone)]
pub enum DeviceMode {
Off = 0,
On = 1,
/// Normal operation mode where periodic polling might be done as well.
Normal = 2,
}
impl FromStr for DeviceMode {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"off" => Ok(DeviceMode::Off),
"on" => Ok(DeviceMode::On),
"normal" => Ok(DeviceMode::Normal),
_ => Err(()),
}
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[non_exhaustive]
pub enum HkRequestType {
OneShot,
/// Enable periodic HK generation with a specified frequency.
EnablePeriodic(core::time::Duration),
DisablePeriodic,
/// Modify periodic HK generation interval.
ModifyInterval(core::time::Duration),
}
#[cfg(test)]
mod tests {}
+90
View File
@@ -0,0 +1,90 @@
pub mod request {
use crate::{DeviceMode, HkRequestType, Message};
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum ModeRequest {
SetMode(DeviceMode),
ReadMode,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, serde::Serialize, serde::Deserialize)]
pub enum HkId {
Sensor,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, serde::Serialize, serde::Deserialize)]
pub struct HkRequest {
pub id: HkId,
pub req_type: HkRequestType,
}
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum Request {
Ping,
Hk(HkRequest),
Mode(ModeRequest),
}
impl Request {
fn message_type(&self) -> crate::MessageType {
match self {
Request::Ping => crate::MessageType::Verification,
Request::Hk(_hk_request) => crate::MessageType::Hk,
Request::Mode(_mode) => crate::MessageType::Mode,
}
}
}
impl Message for Request {
fn message_type(&self) -> crate::MessageType {
self.message_type()
}
}
}
#[derive(Default, Debug, Copy, Clone, serde::Serialize, serde::Deserialize)]
pub struct MgmData {
pub valid: bool,
pub x: f32,
pub y: f32,
pub z: f32,
}
pub mod response {
use crate::{DeviceMode, Message, mgm::MgmData};
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum HkResponse {
MgmData(MgmData),
}
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, Copy)]
pub enum ModeResponse {
/// New mode has been set.
Mode(DeviceMode),
/// Setting a mode timed out.
SetModeTimeout,
}
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum Response {
Ok,
Hk(HkResponse),
Mode(ModeResponse),
}
impl Response {
fn message_type(&self) -> crate::MessageType {
match self {
Response::Ok => crate::MessageType::Verification,
Response::Hk(_hk_response) => crate::MessageType::Hk,
Response::Mode(_mode_failure) => crate::MessageType::Mode,
}
}
}
impl Message for Response {
fn message_type(&self) -> crate::MessageType {
self.message_type()
}
}
}
+109
View File
@@ -0,0 +1,109 @@
use core::str::FromStr;
use crate::DeviceMode;
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, Copy, PartialEq, Eq)]
pub enum AssemblyMode {
/// The assembly mode ressembles the modes of the devices it controls. It also tries to keep
/// the children in the correct mode by re-commanding them into the correct mode.
Device(DeviceMode),
/// Mode keeping disabled.
NoModeKeeping,
}
impl FromStr for AssemblyMode {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.to_lowercase().as_str() {
"off" => Ok(AssemblyMode::Device(DeviceMode::Off)),
"on" => Ok(AssemblyMode::Device(DeviceMode::On)),
"normal" => Ok(AssemblyMode::Device(DeviceMode::Normal)),
"no_mode_keeping" => Ok(AssemblyMode::NoModeKeeping),
_ => Err(()),
}
}
}
pub mod request {
use crate::{HkRequestType, Message, mgm_assembly::AssemblyMode};
#[derive(Debug, PartialEq, Eq, Clone, Copy, serde::Serialize, serde::Deserialize)]
pub enum HkId {
Sensor,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum ModeRequest {
SetMode(AssemblyMode),
ReadMode,
}
#[derive(Debug, PartialEq, Eq, Clone, Copy, serde::Serialize, serde::Deserialize)]
pub struct HkRequest {
pub id: HkId,
pub req_type: HkRequestType,
}
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum Request {
Ping,
Mode(ModeRequest),
}
impl Request {
fn message_type(&self) -> crate::MessageType {
match self {
Request::Ping => crate::MessageType::Verification,
Request::Mode(_mode) => crate::MessageType::Mode,
}
}
}
impl Message for Request {
fn message_type(&self) -> crate::MessageType {
self.message_type()
}
}
}
pub mod response {
use crate::{DeviceMode, Message};
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug, PartialEq, Eq)]
pub enum ModeCommandFailure {
Timeout,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
pub enum ModeReport {
/// Mode of the assembly.
Mode(super::AssemblyMode),
/// Timeout failure setting the children modes.
SetModeTimeout([Option<DeviceMode>; 2]),
/// Children are in wrong mode after commanding.
WrongMode([Option<DeviceMode>; 2]),
/// An assembly tried modekeeping but can not keep its mode.
CanNotKeepMode([Option<DeviceMode>; 2]),
}
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug, PartialEq, Eq)]
pub enum Response {
Ok,
Mode(ModeReport),
}
impl Response {
fn message_type(&self) -> crate::MessageType {
match self {
Response::Ok => crate::MessageType::Verification,
Response::Mode(_mode_report) => crate::MessageType::Mode,
}
}
}
impl Message for Response {
fn message_type(&self) -> crate::MessageType {
self.message_type()
}
}
}
+147
View File
@@ -0,0 +1,147 @@
use std::collections::HashMap;
use strum::IntoEnumIterator as _;
#[bitbybit::bitfield(u16, debug, default = 0x0)]
#[derive(serde::Serialize, serde::Deserialize)]
pub struct SwitchesBitfield {
#[bit(2, rw)]
magnetorquer: bool,
#[bit(1, rw)]
mgm1: bool,
#[bit(0, rw)]
mgm0: bool,
}
#[derive(
Debug,
Copy,
Clone,
PartialEq,
Eq,
serde::Serialize,
serde::Deserialize,
Hash,
strum::EnumIter,
num_enum::IntoPrimitive,
num_enum::TryFromPrimitive,
)]
#[repr(u16)]
pub enum SwitchId {
Mgm0 = 0,
Mgm1 = 1,
Mgt = 2,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone, serde::Serialize, serde::Deserialize)]
pub enum SwitchState {
Off = 0,
On = 1,
Unknown = 2,
Faulty = 3,
}
impl From<SwitchStateBinary> for SwitchState {
fn from(value: SwitchStateBinary) -> Self {
match value {
SwitchStateBinary::Off => SwitchState::Off,
SwitchStateBinary::On => SwitchState::On,
}
}
}
#[derive(Debug, Eq, PartialEq, Copy, Clone, serde::Serialize, serde::Deserialize)]
pub enum SwitchStateBinary {
Off = 0,
On = 1,
}
pub type SwitchMapBinary = HashMap<SwitchId, SwitchStateBinary>;
pub struct SwitchMapBinaryWrapper(pub SwitchMapBinary);
impl Default for SwitchMapBinaryWrapper {
fn default() -> Self {
let mut switch_map = SwitchMapBinary::default();
for entry in SwitchId::iter() {
switch_map.insert(entry, SwitchStateBinary::Off);
}
Self(switch_map)
}
}
pub struct SwitchRequest {
pub switch_id: SwitchId,
pub target_state: SwitchStateBinary,
}
impl SwitchRequest {
pub fn new(switch_id: SwitchId, target_state: SwitchStateBinary) -> Self {
Self {
switch_id,
target_state,
}
}
pub fn switch_id(&self) -> SwitchId {
self.switch_id
}
pub fn target_state(&self) -> SwitchStateBinary {
self.target_state
}
}
pub mod request {
use crate::{DeviceMode, Message};
use super::*;
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum Request {
Mode(DeviceMode),
Ping,
GetSwitches,
EnableSwitches(SwitchesBitfield),
DisableSwitches(SwitchesBitfield),
}
impl Request {
pub fn message_type(&self) -> crate::MessageType {
match self {
Request::Mode(_mode) => crate::MessageType::Mode,
Request::Ping => crate::MessageType::Verification,
Request::GetSwitches => crate::MessageType::Action,
Request::EnableSwitches(_switches) | Request::DisableSwitches(_switches) => {
crate::MessageType::Action
}
}
}
}
impl Message for Request {
fn message_type(&self) -> crate::MessageType {
self.message_type()
}
}
}
pub mod response {
use super::*;
use crate::Message;
#[derive(serde::Serialize, serde::Deserialize, Clone, Copy, Debug)]
pub enum Response {
Ok,
Switches(SwitchesBitfield),
}
impl Message for Response {
fn message_type(&self) -> crate::MessageType {
match self {
Response::Ok => crate::MessageType::Verification,
Response::Switches(_switches) => crate::MessageType::Action,
}
}
}
}
-1
View File
@@ -1 +0,0 @@
// TODO: Write the assembly
File diff suppressed because it is too large Load Diff
+634
View File
@@ -0,0 +1,634 @@
use std::{sync::mpsc, time::Duration};
use models::{
ComponentId, DeviceMode,
mgm_assembly::{AssemblyMode, request, response},
};
use satrs::spacepackets::CcsdsPacketIdAndPsc;
use satrs_example::{ModeHelper, TmtcQueues};
use crate::ccsds::pack_ccsds_tm_packet_for_now;
pub struct ParentQueueHelper {
pub request_rx: mpsc::Receiver<request::ModeRequest>,
pub report_tx: mpsc::SyncSender<response::ModeReport>,
}
/// Helper component for communication with a parent component, which is usually as assembly.
pub struct ChildrenQueueHelper {
pub request_tx_queues: [mpsc::SyncSender<models::mgm::request::ModeRequest>; 2],
pub report_rx_queues: [mpsc::Receiver<models::mgm::response::ModeResponse>; 2],
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum TransitionState {
#[default]
Idle,
AwaitingReplies,
}
#[derive(Debug, Default, Copy, Clone)]
pub struct MgmInfo {
reply_received: bool,
mode: Option<DeviceMode>,
}
/// MGM assembly component.
pub struct Assembly {
mode_helper: ModeHelper<AssemblyMode, TransitionState>,
/// This boolean is used for the distinction between transitions commanded by the parent
/// or by ground, and transitions which were commanded autonomously as part of children
/// mode keeping.
mode_keeping_transition: bool,
tmtc_queues: TmtcQueues,
mgm_modes: [MgmInfo; 2],
parent_queues: ParentQueueHelper,
pub(crate) children_queues: ChildrenQueueHelper,
}
impl Assembly {
pub const ID: ComponentId = ComponentId::AcsMgmAssembly;
pub fn new(
parent_queues: ParentQueueHelper,
children_queues: ChildrenQueueHelper,
tmtc_queues: TmtcQueues,
mode_timeout: Duration,
) -> Self {
Self {
mode_helper: ModeHelper::new(AssemblyMode::NoModeKeeping, mode_timeout),
mode_keeping_transition: false,
tmtc_queues,
mgm_modes: [MgmInfo::default(); 2],
parent_queues,
children_queues,
}
}
pub fn periodic_operation(&mut self) {
self.handle_telecommands();
self.handle_parent_mode_queue();
self.handle_children_mode_queues();
if self.mode_helper.transition_active() {
self.handle_mode_transition();
}
}
pub fn handle_telecommands(&mut self) {
loop {
match self.tmtc_queues.tc_rx.try_recv() {
Ok(packet) => {
let tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&packet.sp_header);
match postcard::from_bytes::<models::mgm_assembly::request::Request>(
&packet.payload,
) {
Ok(request) => match request {
models::mgm_assembly::request::Request::Ping => {
self.send_telemetry(Some(tc_id), response::Response::Ok)
}
models::mgm_assembly::request::Request::Mode(request) => {
match request {
request::ModeRequest::SetMode(assembly_mode) => {
self.start_transition(false, assembly_mode, Some(tc_id))
}
request::ModeRequest::ReadMode => self.send_telemetry(
Some(tc_id),
response::Response::Mode(response::ModeReport::Mode(
self.mode(),
)),
),
}
}
},
Err(e) => {
log::warn!("failed to deserialize request: {}", e);
}
}
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => log::warn!("packet sender disconnected"),
},
}
}
}
pub fn send_telemetry(
&self,
tc_id: Option<CcsdsPacketIdAndPsc>,
response: models::mgm_assembly::response::Response,
) {
match pack_ccsds_tm_packet_for_now(Self::ID, tc_id, &response) {
Ok(packet) => {
if let Err(e) = self.tmtc_queues.tm_tx.send(packet) {
log::warn!("failed to send TM packet: {}", e);
}
}
Err(e) => {
log::warn!("failed to pack TM packet: {}", e);
}
}
}
pub fn handle_parent_mode_queue(&mut self) {
loop {
match self.parent_queues.request_rx.try_recv() {
Ok(request) => match request {
request::ModeRequest::SetMode(assembly_mode) => match assembly_mode {
AssemblyMode::Device(_device_mode) => {
self.start_transition(false, assembly_mode, None);
}
AssemblyMode::NoModeKeeping => {
self.mode_helper.current = AssemblyMode::NoModeKeeping;
}
},
request::ModeRequest::ReadMode => self
.parent_queues
.report_tx
.send(response::ModeReport::Mode(self.mode_helper.current))
.unwrap(),
},
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("packet sender disconnected")
}
},
}
}
}
pub fn handle_children_mode_queues(&mut self) {
let mut mode_report_received = false;
for (idx, rx) in self.children_queues.report_rx_queues.iter_mut().enumerate() {
loop {
match rx.try_recv() {
Ok(report) => match report {
models::mgm::response::ModeResponse::Mode(device_mode) => {
self.mgm_modes[idx].mode = Some(device_mode);
self.mgm_modes[idx].reply_received = true;
mode_report_received = true;
}
models::mgm::response::ModeResponse::SetModeTimeout => {
// Ignore, handle this with our own timeout.
log::warn!("MGM {} mode timeout", idx);
}
},
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
log::warn!("packet sender disconnected")
}
},
}
}
}
if !mode_report_received {
return;
}
// Transition is active, check for completion.
if self.mode_helper.transition_active()
&& self.mgm_modes.iter().all(|i| i.reply_received)
&& let AssemblyMode::Device(device_mode) = self.mode_helper.target.unwrap()
{
// If at least one child reached the correct mode, we are done.
if self.mgm_modes.iter().any(|i| i.mode == Some(device_mode)) {
self.handle_mode_reached(true);
} else {
let report = if self.mode_keeping_transition {
response::ModeReport::CanNotKeepMode(self.mgm_modes.map(|info| info.mode))
} else {
response::ModeReport::WrongMode(self.mgm_modes.map(|info| info.mode))
};
self.handle_mode_transition_failure(report);
}
}
// Mode keeping active: Check children modes.
if let AssemblyMode::Device(device_mode) = self.mode_helper.current
&& self
.mgm_modes
.iter()
.all(|info| info.mode != Some(device_mode))
{
// Children lost mode. Try to command them back to the correct
// mode.
self.start_transition(true, self.mode_helper.current, None);
}
}
pub fn handle_mode_transition(&mut self) {
if self.mode_helper.target.is_none() {
self.handle_mode_reached(true);
return;
}
let target = self.mode_helper.target.unwrap();
let device_mode = match target {
AssemblyMode::Device(device_mode) => device_mode,
AssemblyMode::NoModeKeeping => {
self.handle_mode_reached(true);
return;
}
};
if self.mode_helper.transition_state == TransitionState::Idle {
self.command_children(device_mode);
self.mode_helper.transition_state = TransitionState::AwaitingReplies;
}
if self.mode_helper.transition_state == TransitionState::AwaitingReplies
&& self.mode_helper.timed_out()
{
let report = if self.mode_keeping_transition {
response::ModeReport::CanNotKeepMode(self.mgm_modes.map(|info| info.mode))
} else {
response::ModeReport::SetModeTimeout(self.mgm_modes.map(|info| info.mode))
};
self.handle_mode_transition_failure(report);
}
}
pub fn handle_mode_reached(&mut self, success: bool) {
let tc_commander = self.mode_helper.finish(success);
self.announce_mode();
if tc_commander.is_some() {
self.send_telemetry(tc_commander, response::Response::Ok);
}
self.parent_queues
.report_tx
.send(response::ModeReport::Mode(self.mode_helper.current))
.unwrap();
}
pub fn handle_mode_transition_failure(&mut self, report: response::ModeReport) {
if self.mode_helper.tc_commander.is_some() {
self.send_telemetry(
self.mode_helper.tc_commander,
response::Response::Mode(response::ModeReport::SetModeTimeout(
self.mgm_modes.map(|info| info.mode),
)),
);
}
self.parent_queues.report_tx.send(report).unwrap();
self.mode_helper.finish(false);
}
pub fn command_children(&self, mode: DeviceMode) {
for tx in &self.children_queues.request_tx_queues {
tx.send(models::mgm::request::ModeRequest::SetMode(mode))
.unwrap();
}
}
pub fn start_transition(
&mut self,
mode_keeping: bool,
target: AssemblyMode,
tc_id: Option<CcsdsPacketIdAndPsc>,
) {
self.mode_keeping_transition = mode_keeping;
self.mode_helper.tc_commander = tc_id;
self.mgm_modes
.iter_mut()
.for_each(|m| m.reply_received = false);
self.mode_helper.start(target);
}
fn announce_mode(&self) {
// TODO: Event?
log::info!(
"{:?} announcing mode: {:?}",
Self::ID,
self.mode_helper.current
);
}
#[inline]
pub fn mode(&self) -> AssemblyMode {
self.mode_helper.current
}
#[inline]
#[cfg(test)]
fn mode_transition_active(&self) -> bool {
self.mode_helper.transition_active()
}
}
#[cfg(test)]
mod tests {
use std::sync::mpsc::TryRecvError;
use arbitrary_int::u11;
use models::{
Apid, Message, MessageType, TcHeader,
ccsds::{CcsdsTcPacketOwned, CcsdsTmPacketOwned},
mgm_assembly,
};
use satrs::spacepackets::SpacePacketHeader;
use super::*;
pub struct Testbench {
subsystem_req_tx: mpsc::SyncSender<request::ModeRequest>,
subsystem_report_rx: mpsc::Receiver<response::ModeReport>,
mgm_request_rx: [mpsc::Receiver<models::mgm::request::ModeRequest>; 2],
mgm_report_tx: [mpsc::SyncSender<models::mgm::response::ModeResponse>; 2],
tc_tx: mpsc::SyncSender<CcsdsTcPacketOwned>,
tm_rx: mpsc::Receiver<CcsdsTmPacketOwned>,
assembly: Assembly,
}
impl Testbench {
pub fn new() -> Self {
let (subsystem_req_tx, subsystem_req_rx) = mpsc::sync_channel(5);
let (subsystem_report_tx, subsystem_report_rx) = mpsc::sync_channel(5);
let (mgm_0_mode_request_tx, mgm_0_mode_request_rx) = mpsc::sync_channel(5);
let (mgm_1_mode_request_tx, mgm_1_mode_request_rx) = mpsc::sync_channel(5);
let (mgm_0_mode_report_tx, mgm_0_mode_report_rx) = mpsc::sync_channel(5);
let (mgm_1_mode_report_tx, mgm_1_mode_report_rx) = mpsc::sync_channel(5);
let (tc_tx, tc_rx) = mpsc::sync_channel(5);
let (tm_tx, tm_rx) = mpsc::sync_channel(5);
Self {
subsystem_req_tx,
subsystem_report_rx,
mgm_request_rx: [mgm_0_mode_request_rx, mgm_1_mode_request_rx],
mgm_report_tx: [mgm_0_mode_report_tx, mgm_1_mode_report_tx],
tc_tx,
tm_rx,
assembly: Assembly::new(
ParentQueueHelper {
request_rx: subsystem_req_rx,
report_tx: subsystem_report_tx,
},
ChildrenQueueHelper {
request_tx_queues: [mgm_0_mode_request_tx, mgm_1_mode_request_tx],
report_rx_queues: [mgm_0_mode_report_rx, mgm_1_mode_report_rx],
},
TmtcQueues { tc_rx, tm_tx },
Duration::from_millis(20),
),
}
}
pub fn assert_all_queues_empty(&self) {
assert!(
matches!(self.tm_rx.try_recv().unwrap_err(), TryRecvError::Empty),
"TM queue not empty"
);
assert!(
matches!(
self.subsystem_report_rx.try_recv().unwrap_err(),
TryRecvError::Empty
),
"subsystem report queue not empty"
);
for rx in self.mgm_request_rx.iter() {
assert!(
matches!(rx.try_recv().unwrap_err(), TryRecvError::Empty),
"mgm request queue not empty"
)
}
}
}
pub fn create_request_tc(
request: models::mgm_assembly::request::Request,
) -> models::ccsds::CcsdsTcPacketOwned {
models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Acs as u16)),
TcHeader::new(Assembly::ID, request.message_type()),
request,
)
}
#[test]
fn basic_test() {
let mut tb = Testbench::new();
tb.assert_all_queues_empty();
tb.assembly.periodic_operation();
tb.assert_all_queues_empty();
assert_eq!(tb.assembly.mode(), AssemblyMode::NoModeKeeping);
}
#[test]
fn test_tc_commanded_transition() {
let mut tb = Testbench::new();
tb.tc_tx
.send(create_request_tc(mgm_assembly::request::Request::Mode(
request::ModeRequest::SetMode(AssemblyMode::Device(DeviceMode::Normal)),
)))
.unwrap();
tb.assembly.periodic_operation();
assert!(tb.assembly.mode_transition_active());
for rx in tb.mgm_request_rx.iter() {
let request = rx.try_recv().unwrap();
assert_eq!(
request,
models::mgm::request::ModeRequest::SetMode(DeviceMode::Normal)
);
}
// Confirm the mode is set.
for tx in tb.mgm_report_tx.iter() {
tx.send(models::mgm::response::ModeResponse::Mode(
DeviceMode::Normal,
))
.unwrap();
}
tb.assembly.periodic_operation();
assert!(!tb.assembly.mode_transition_active());
assert_eq!(tb.assembly.mode(), AssemblyMode::Device(DeviceMode::Normal));
let response = tb.tm_rx.try_recv().unwrap();
assert_eq!(response.tm_header.sender_id, Assembly::ID);
assert_eq!(response.tm_header.message_type, MessageType::Verification);
let response: response::Response = postcard::from_bytes(&response.payload).unwrap();
assert_eq!(response, response::Response::Ok);
}
#[test]
fn test_parent_commanded_transition() {
let mut tb = Testbench::new();
tb.subsystem_req_tx
.send(request::ModeRequest::SetMode(AssemblyMode::Device(
DeviceMode::Normal,
)))
.unwrap();
tb.assembly.periodic_operation();
assert!(tb.assembly.mode_transition_active());
for rx in tb.mgm_request_rx.iter() {
let request = rx.try_recv().unwrap();
assert_eq!(
request,
models::mgm::request::ModeRequest::SetMode(DeviceMode::Normal)
);
}
// Confirm the mode is set.
for tx in tb.mgm_report_tx.iter() {
tx.send(models::mgm::response::ModeResponse::Mode(
DeviceMode::Normal,
))
.unwrap();
}
tb.assembly.periodic_operation();
assert!(!tb.assembly.mode_transition_active());
assert_eq!(tb.assembly.mode(), AssemblyMode::Device(DeviceMode::Normal));
let report = tb.subsystem_report_rx.try_recv().unwrap();
assert_eq!(
report,
response::ModeReport::Mode(AssemblyMode::Device(DeviceMode::Normal))
);
}
#[test]
fn test_one_mgm_is_sufficient() {
let mut tb = Testbench::new();
tb.subsystem_req_tx
.send(request::ModeRequest::SetMode(AssemblyMode::Device(
DeviceMode::Normal,
)))
.unwrap();
tb.assembly.periodic_operation();
assert!(tb.assembly.mode_transition_active());
for rx in tb.mgm_request_rx.iter() {
let request = rx.try_recv().unwrap();
assert_eq!(
request,
models::mgm::request::ModeRequest::SetMode(DeviceMode::Normal)
);
}
// One device is sufficient.
tb.mgm_report_tx[0]
.send(models::mgm::response::ModeResponse::Mode(
DeviceMode::Normal,
))
.unwrap();
tb.mgm_report_tx[1]
.send(models::mgm::response::ModeResponse::Mode(DeviceMode::Off))
.unwrap();
tb.assembly.periodic_operation();
assert!(!tb.assembly.mode_transition_active());
assert_eq!(tb.assembly.mode(), AssemblyMode::Device(DeviceMode::Normal));
let report = tb.subsystem_report_rx.try_recv().unwrap();
assert_eq!(
report,
response::ModeReport::Mode(AssemblyMode::Device(DeviceMode::Normal))
);
}
#[test]
fn test_mode_commanding_fails() {
let mut tb = Testbench::new();
tb.subsystem_req_tx
.send(request::ModeRequest::SetMode(AssemblyMode::Device(
DeviceMode::Normal,
)))
.unwrap();
tb.assembly.periodic_operation();
assert!(tb.assembly.mode_transition_active());
for rx in tb.mgm_request_rx.iter() {
let request = rx.try_recv().unwrap();
assert_eq!(
request,
models::mgm::request::ModeRequest::SetMode(DeviceMode::Normal)
);
}
// Confirm the mode is set.
for tx in tb.mgm_report_tx.iter() {
tx.send(models::mgm::response::ModeResponse::Mode(DeviceMode::Off))
.unwrap();
}
tb.assembly.periodic_operation();
assert!(!tb.assembly.mode_transition_active());
assert_eq!(tb.assembly.mode(), AssemblyMode::NoModeKeeping);
let report = tb.subsystem_report_rx.try_recv().unwrap();
assert_eq!(
report,
response::ModeReport::WrongMode([Some(DeviceMode::Off), Some(DeviceMode::Off)])
);
}
#[test]
fn test_mode_keeping_fails() {
let mut tb = Testbench::new();
tb.subsystem_req_tx
.send(request::ModeRequest::SetMode(AssemblyMode::Device(
DeviceMode::Normal,
)))
.unwrap();
tb.assembly.periodic_operation();
assert!(tb.assembly.mode_transition_active());
for rx in tb.mgm_request_rx.iter() {
let request = rx.try_recv().unwrap();
assert_eq!(
request,
models::mgm::request::ModeRequest::SetMode(DeviceMode::Normal)
);
}
// Confirm the mode is set.
for tx in tb.mgm_report_tx.iter() {
tx.send(models::mgm::response::ModeResponse::Mode(
DeviceMode::Normal,
))
.unwrap();
}
tb.assembly.periodic_operation();
assert!(!tb.assembly.mode_transition_active());
assert_eq!(tb.assembly.mode(), AssemblyMode::Device(DeviceMode::Normal));
let report = tb.subsystem_report_rx.try_recv().unwrap();
assert_eq!(
report,
response::ModeReport::Mode(AssemblyMode::Device(DeviceMode::Normal))
);
for tx in tb.mgm_report_tx.iter() {
tx.send(models::mgm::response::ModeResponse::Mode(DeviceMode::Off))
.unwrap();
}
// This should start mode keeping.
tb.assembly.periodic_operation();
assert!(tb.assembly.mode_transition_active());
for rx in tb.mgm_request_rx.iter() {
let request = rx.try_recv().unwrap();
assert_eq!(
request,
models::mgm::request::ModeRequest::SetMode(DeviceMode::Normal)
);
}
// Let the mode keeping fail.
for tx in tb.mgm_report_tx.iter() {
tx.send(models::mgm::response::ModeResponse::Mode(DeviceMode::Off))
.unwrap();
}
tb.assembly.periodic_operation();
let report = tb.subsystem_report_rx.try_recv().unwrap();
assert_eq!(
report,
response::ModeReport::CanNotKeepMode([Some(DeviceMode::Off), Some(DeviceMode::Off)])
);
}
}
+3 -1
View File
@@ -1,4 +1,6 @@
pub mod assembly;
pub mod ctrl;
pub mod mgm;
pub mod mgm_assembly;
pub mod subsystem;
-87
View File
@@ -1,87 +0,0 @@
use arbitrary_int::u11;
use satrs::pus::verification::RequestId;
use satrs::spacepackets::ecss::tc::PusTcCreator;
use satrs::spacepackets::ecss::tm::PusTmReader;
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs::spacepackets::SpHeader;
use satrs_example::config::{OBSW_SERVER_ADDR, SERVER_PORT};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::Duration;
fn main() {
let mut buf = [0; 32];
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let pus_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(u11::new(0x02)),
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed");
let tc_req_id = RequestId::new(&pus_tc);
println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}");
let size = pus_tc
.write_to_bytes(&mut buf)
.expect("Creating PUS TC failed");
client
.send_to(&buf[0..size], addr)
.unwrap_or_else(|_| panic!("Sending to {addr:?} failed"));
client
.set_read_timeout(Some(Duration::from_secs(2)))
.expect("Setting read timeout failed");
loop {
let res = client.recv(&mut buf);
match res {
Ok(_len) => {
let pus_tm = PusTmReader::new(&buf, 7).expect("Parsing PUS TM failed");
if pus_tm.service_type_id() == 17 && pus_tm.message_subtype_id() == 2 {
println!("Received PUS Ping Reply TM[17,2]")
} else if pus_tm.service_type_id() == 1 {
if pus_tm.source_data().is_empty() {
println!("Invalid verification TM, no source data");
}
let src_data = pus_tm.source_data();
if src_data.len() < 4 {
println!("Invalid verification TM source data, less than 4 bytes")
}
let req_id = RequestId::from_bytes(src_data).unwrap();
let subtype_id = pus_tm.message_subtype_id();
if subtype_id == 1 {
println!("Received TM[1,1] acceptance success for request ID {req_id}")
} else if subtype_id == 2 {
println!("Received TM[1,2] acceptance failure for request ID {req_id}")
} else if subtype_id == 3 {
println!("Received TM[1,3] start success for request ID {req_id}")
} else if subtype_id == 4 {
println!("Received TM[1,2] start failure for request ID {req_id}")
} else if subtype_id == 5 {
println!("Received TM[1,5] step success for request ID {req_id}")
} else if subtype_id == 6 {
println!("Received TM[1,6] step failure for request ID {req_id}")
} else if subtype_id == 7 {
println!("Received TM[1,7] completion success for request ID {req_id}")
} else if subtype_id == 8 {
println!("Received TM[1,8] completion failure for request ID {req_id}");
}
} else {
println!(
"Received TM[{}, {}] with {} bytes",
pus_tm.service_type_id(),
pus_tm.message_subtype_id(),
size
);
}
}
Err(ref e)
if e.kind() == std::io::ErrorKind::WouldBlock
|| e.kind() == std::io::ErrorKind::TimedOut =>
{
println!("No reply received for 2 seconds");
break;
}
_ => {
println!("UDP receive error {:?}", res.unwrap_err());
}
}
}
}
-66
View File
@@ -1,66 +0,0 @@
#![allow(dead_code)]
use crossbeam_channel::{bounded, Receiver, Sender};
use std::sync::atomic::{AtomicU16, Ordering};
use std::thread;
use zerocopy::{FromBytes, Immutable, IntoBytes, NetworkEndian, Unaligned, U16};
trait FieldDataProvider: Send {
fn get_data(&self) -> &[u8];
}
struct FixedFieldDataWrapper {
data: [u8; 8],
}
impl FixedFieldDataWrapper {
pub fn from_two_u32(p0: u32, p1: u32) -> Self {
let mut data = [0; 8];
data[0..4].copy_from_slice(p0.to_be_bytes().as_slice());
data[4..8].copy_from_slice(p1.to_be_bytes().as_slice());
Self { data }
}
}
impl FieldDataProvider for FixedFieldDataWrapper {
fn get_data(&self) -> &[u8] {
self.data.as_slice()
}
}
type FieldDataTraitObj = Box<dyn FieldDataProvider>;
struct ExampleMgmSet {
mgm_vec: [f32; 3],
temperature: u16,
}
#[derive(FromBytes, IntoBytes, Immutable, Unaligned)]
#[repr(C)]
struct ExampleMgmSetZc {
mgm_vec: [u8; 12],
temperatur: U16<NetworkEndian>,
}
fn main() {
let (s0, r0): (Sender<FieldDataTraitObj>, Receiver<FieldDataTraitObj>) = bounded(5);
let data_wrapper = FixedFieldDataWrapper::from_two_u32(2, 3);
s0.send(Box::new(data_wrapper)).unwrap();
let jh0 = thread::spawn(move || {
let data = r0.recv().unwrap();
let raw = data.get_data();
println!("Received data {raw:?}");
});
let jh1 = thread::spawn(|| {});
jh0.join().unwrap();
jh1.join().unwrap();
//let mut max_val: u16 = u16::MAX;
//max_val += 1;
//println!("Max val: {}", max_val);
let atomic_u16: AtomicU16 = AtomicU16::new(u16::MAX);
atomic_u16.fetch_add(1, Ordering::SeqCst);
println!(
"atomic after overflow: {}",
atomic_u16.load(Ordering::SeqCst)
);
}
+34
View File
@@ -0,0 +1,34 @@
use arbitrary_int::u11;
use models::{Apid, ComponentId, Message, TmHeader, ccsds::CcsdsTmPacketOwned};
use satrs::spacepackets::{
CcsdsPacketIdAndPsc, SpHeader,
time::{StdTimestampError, cds::CdsTime},
};
use serde::Serialize;
#[derive(Debug, thiserror::Error)]
pub enum CcsdsTmCreationError {
#[error("postcard error: {0}")]
Postcard(#[from] postcard::Error),
#[error("timestamp error: {0}")]
Time(#[from] StdTimestampError),
}
pub fn pack_ccsds_tm_packet_for_now(
sender_id: ComponentId,
tc_id: Option<CcsdsPacketIdAndPsc>,
payload: &(impl Serialize + Message),
) -> Result<CcsdsTmPacketOwned, CcsdsTmCreationError> {
let now = CdsTime::now_with_u16_days()?;
let sp_header = SpHeader::new_from_apid(u11::new(Apid::Tmtc as u16));
let tm_header = TmHeader::new(
sender_id,
ComponentId::Ground,
payload.message_type(),
tc_id,
&now,
);
Ok(CcsdsTmPacketOwned::new_with_serde_payload(
sp_header, &tm_header, payload,
)?)
}
+6 -11
View File
@@ -7,13 +7,10 @@ use satrs::{
use satrs_mib::res_code::ResultU16Info;
use satrs_mib::resultcode;
use std::{collections::HashSet, net::Ipv4Addr};
use strum::IntoEnumIterator;
use strum::IntoEnumIterator as _;
use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::{
events_legacy::{EventU32TypedSev, SeverityInfo},
pool::{StaticMemoryPool, StaticPoolConfig},
};
use satrs::pool::{StaticMemoryPool, StaticPoolConfig};
#[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)]
@@ -39,19 +36,17 @@ pub enum GroupId {
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
pub const SERVER_PORT: u16 = 7301;
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(0, 0);
lazy_static! {
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {
let mut set = HashSet::new();
for id in crate::ids::Apid::iter() {
for id in models::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, u11::new(id as u16)));
}
set
};
pub static ref APID_VALIDATOR: HashSet<u16> = {
let mut set = HashSet::new();
for id in crate::ids::Apid::iter() {
for id in models::Apid::iter() {
set.insert(id as u16);
}
set
@@ -174,7 +169,7 @@ pub mod pool {
pub mod tasks {
pub const FREQ_MS_UDP_TMTC: u64 = 200;
pub const FREQ_MS_AOCS: u64 = 500;
pub const FREQ_MS_PUS_STACK: u64 = 200;
pub const FREQ_MS_AOCS: u64 = 200;
pub const FREQ_MS_CONTROLLER: u64 = 200;
pub const SIM_CLIENT_IDLE_DELAY_MS: u64 = 5;
}
+84
View File
@@ -0,0 +1,84 @@
use models::{
ComponentId,
ccsds::{CcsdsTcPacketOwned, CcsdsTmPacketOwned},
control,
};
use satrs::spacepackets::CcsdsPacketIdAndPsc;
use crate::ccsds::pack_ccsds_tm_packet_for_now;
pub struct Controller {
pub tc_rx: std::sync::mpsc::Receiver<CcsdsTcPacketOwned>,
pub tm_tx: std::sync::mpsc::SyncSender<CcsdsTmPacketOwned>,
pub event_ctrl_tx: std::sync::mpsc::SyncSender<control::Event>,
}
impl Controller {
pub fn new(
tc_rx: std::sync::mpsc::Receiver<CcsdsTcPacketOwned>,
tm_tx: std::sync::mpsc::SyncSender<CcsdsTmPacketOwned>,
event_ctrl_tx: std::sync::mpsc::SyncSender<control::Event>,
) -> Self {
Self {
tc_rx,
tm_tx,
event_ctrl_tx,
}
}
pub fn periodic_operation(&mut self) {
self.handle_telecommands();
}
pub fn handle_telecommands(&mut self) {
loop {
match self.tc_rx.try_recv() {
Ok(packet) => {
let tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&packet.sp_header);
match postcard::from_bytes::<control::request::Request>(&packet.payload) {
Ok(request) => {
log::info!(
"received request {:?} with TC ID {:#010x}",
request,
tc_id.raw()
);
match request {
control::request::Request::Ping => self
.send_telemetry(Some(tc_id), control::response::Response::Ok),
control::request::Request::TestEvent => {
self.event_ctrl_tx.send(control::Event::TestEvent).unwrap()
}
}
}
Err(e) => {
log::warn!("failed to deserialize request: {}", e);
}
}
}
Err(e) => match e {
std::sync::mpsc::TryRecvError::Empty => break,
std::sync::mpsc::TryRecvError::Disconnected => {
log::warn!("packet sender disconnected")
}
},
}
}
}
pub fn send_telemetry(
&self,
tc_id: Option<CcsdsPacketIdAndPsc>,
response: control::response::Response,
) {
match pack_ccsds_tm_packet_for_now(ComponentId::Controller, tc_id, &response) {
Ok(packet) => {
if let Err(e) = self.tm_tx.send(packet) {
log::warn!("failed to send TM packet: {}", e);
}
}
Err(e) => {
log::warn!("failed to pack TM packet: {}", e);
}
}
}
}
+90 -29
View File
@@ -1,16 +1,15 @@
use derive_new::new;
use models::pcdu::{SwitchId, SwitchRequest, SwitchState, SwitchStateBinary};
use std::{cell::RefCell, collections::VecDeque, sync::mpsc, time::Duration};
use satrs::{
power::{
PowerSwitchInfo, PowerSwitcherCommandSender, SwitchRequest, SwitchState, SwitchStateBinary,
},
queue::GenericSendError,
request::{GenericMessage, MessageMetadata},
};
use satrs_minisim::eps::{PcduSwitch, SwitchMapWrapper};
use thiserror::Error;
use crate::eps::pcdu::SwitchMapWrapper;
use self::pcdu::SharedSwitchSet;
pub mod pcdu;
@@ -22,6 +21,7 @@ pub struct PowerSwitchHelper {
}
#[derive(Debug, Error, Copy, Clone, PartialEq, Eq)]
#[allow(dead_code)]
pub enum SwitchCommandingError {
#[error("send error: {0}")]
Send(#[from] GenericSendError),
@@ -31,18 +31,72 @@ pub enum SwitchCommandingError {
pub enum SwitchInfoError {
/// This is a configuration error which should not occur.
#[error("switch ID not in map")]
SwitchIdNotInMap(PcduSwitch),
SwitchIdNotInMap(SwitchId),
#[error("switch set invalid")]
SwitchSetInvalid,
}
impl PowerSwitchInfo<PcduSwitch> for PowerSwitchHelper {
impl PowerSwitchHelper {
pub fn send_switch_on_cmd(
&self,
requestor_info: satrs::request::MessageMetadata,
switch_id: SwitchId,
) -> Result<(), GenericSendError> {
self.switcher_tx.send(GenericMessage::new(
requestor_info,
SwitchRequest::new(switch_id, SwitchStateBinary::On),
))?;
Ok(())
}
#[allow(dead_code)]
pub fn send_switch_off_cmd(
&self,
requestor_info: satrs::request::MessageMetadata,
switch_id: SwitchId,
) -> Result<(), GenericSendError> {
self.switcher_tx.send(GenericMessage::new(
requestor_info,
SwitchRequest::new(switch_id, SwitchStateBinary::Off),
))?;
Ok(())
}
pub fn switch_state(&self, switch_id: SwitchId) -> Result<SwitchState, SwitchInfoError> {
let switch_set = self
.shared_switch_set
.lock()
.expect("failed to lock switch set");
if !switch_set.valid {
return Err(SwitchInfoError::SwitchSetInvalid);
}
if let Some(state) = switch_set.switch_map.get(&switch_id) {
return Ok(*state);
}
Err(SwitchInfoError::SwitchIdNotInMap(switch_id))
}
#[allow(dead_code)]
fn switch_delay_ms(&self) -> Duration {
// Here, we could set device specific switch delays theoretically. Set it to this value
// for now.
Duration::from_millis(1000)
}
pub fn is_switch_on(&self, switch_id: SwitchId) -> bool {
if let Ok(state) = self.switch_state(switch_id) {
state == SwitchState::On
} else {
false
}
}
}
/*
impl PowerSwitchInfo<SwitchId> for PowerSwitchHelper {
type Error = SwitchInfoError;
fn switch_state(
&self,
switch_id: PcduSwitch,
) -> Result<satrs::power::SwitchState, Self::Error> {
fn switch_state(&self, switch_id: SwitchId) -> Result<SwitchState, Self::Error> {
let switch_set = self
.shared_switch_set
.lock()
@@ -63,43 +117,51 @@ impl PowerSwitchInfo<PcduSwitch> for PowerSwitchHelper {
Duration::from_millis(1000)
}
}
*/
impl PowerSwitcherCommandSender<PcduSwitch> for PowerSwitchHelper {
/*
impl PowerSwitcherCommandSender<SwitchId> for PowerSwitchHelper {
type Error = SwitchCommandingError;
fn send_switch_on_cmd(
&self,
requestor_info: satrs::request::MessageMetadata,
switch_id: PcduSwitch,
switch_id: SwitchId,
) -> Result<(), Self::Error> {
self.switcher_tx
.send_switch_on_cmd(requestor_info, switch_id)?;
self.switcher_tx.send(GenericMessage::new(
requestor_info,
SwitchRequest::new(switch_id, SwitchStateBinary::On),
));
Ok(())
}
fn send_switch_off_cmd(
&self,
requestor_info: satrs::request::MessageMetadata,
switch_id: PcduSwitch,
switch_id: SwitchId,
) -> Result<(), Self::Error> {
self.switcher_tx
.send_switch_off_cmd(requestor_info, switch_id)?;
self.switcher_tx.send(GenericMessage::new(
requestor_info,
SwitchRequest::new(switch_id, SwitchStateBinary::Off),
));
Ok(())
}
}
*/
#[allow(dead_code)]
#[derive(new)]
pub struct SwitchRequestInfo {
pub requestor_info: MessageMetadata,
pub switch_id: PcduSwitch,
pub target_state: satrs::power::SwitchStateBinary,
pub switch_id: SwitchId,
pub target_state: SwitchStateBinary,
}
// Test switch helper which can be used for unittests.
#[allow(dead_code)]
pub struct TestSwitchHelper {
pub switch_requests: RefCell<VecDeque<SwitchRequestInfo>>,
pub switch_info_requests: RefCell<VecDeque<PcduSwitch>>,
pub switch_info_requests: RefCell<VecDeque<SwitchId>>,
#[allow(dead_code)]
pub switch_delay_request_count: u32,
pub next_switch_delay: Duration,
@@ -120,13 +182,11 @@ impl Default for TestSwitchHelper {
}
}
impl PowerSwitchInfo<PcduSwitch> for TestSwitchHelper {
/*
impl PowerSwitchInfo<SwitchId> for TestSwitchHelper {
type Error = SwitchInfoError;
fn switch_state(
&self,
switch_id: PcduSwitch,
) -> Result<satrs::power::SwitchState, Self::Error> {
fn switch_state(&self, switch_id: SwitchId) -> Result<satrs::power::SwitchState, Self::Error> {
let mut switch_info_requests_mut = self.switch_info_requests.borrow_mut();
switch_info_requests_mut.push_back(switch_id);
if !self.switch_map_valid {
@@ -144,13 +204,13 @@ impl PowerSwitchInfo<PcduSwitch> for TestSwitchHelper {
}
}
impl PowerSwitcherCommandSender<PcduSwitch> for TestSwitchHelper {
impl PowerSwitcherCommandSender<SwitchId> for TestSwitchHelper {
type Error = SwitchCommandingError;
fn send_switch_on_cmd(
&self,
requestor_info: MessageMetadata,
switch_id: PcduSwitch,
switch_id: SwitchId,
) -> Result<(), Self::Error> {
let mut switch_requests_mut = self.switch_requests.borrow_mut();
switch_requests_mut.push_back(SwitchRequestInfo {
@@ -170,7 +230,7 @@ impl PowerSwitcherCommandSender<PcduSwitch> for TestSwitchHelper {
fn send_switch_off_cmd(
&self,
requestor_info: MessageMetadata,
switch_id: PcduSwitch,
switch_id: SwitchId,
) -> Result<(), Self::Error> {
let mut switch_requests_mut = self.switch_requests.borrow_mut();
switch_requests_mut.push_back(SwitchRequestInfo {
@@ -187,11 +247,12 @@ impl PowerSwitcherCommandSender<PcduSwitch> for TestSwitchHelper {
Ok(())
}
}
*/
#[allow(dead_code)]
impl TestSwitchHelper {
// Helper function which can be used to force a switch to another state for test purposes.
pub fn set_switch_state(&mut self, switch: PcduSwitch, state: SwitchState) {
pub fn set_switch_state(&mut self, switch: SwitchId, state: SwitchState) {
self.switch_map.get_mut().0.insert(switch, state);
}
}
+299 -291
View File
@@ -1,43 +1,109 @@
use std::{
cell::RefCell,
collections::VecDeque,
sync::{mpsc, Arc, Mutex},
collections::{HashMap, VecDeque},
sync::{Arc, Mutex, mpsc},
};
use derive_new::new;
use models::{
ComponentId, DeviceMode,
ccsds::{CcsdsTcPacketOwned, CcsdsTmPacketOwned},
pcdu::{
self, SwitchId, SwitchMapBinary, SwitchMapBinaryWrapper, SwitchRequest, SwitchState,
SwitchStateBinary, SwitchesBitfield,
},
};
use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::{
hk::{HkRequest, HkRequestVariant},
mode::{
ModeAndSubmode, ModeError, ModeProvider, ModeReply, ModeRequestHandler,
ModeRequestHandlerMpscBounded,
},
mode_tree::{ModeChild, ModeNode},
power::SwitchRequest,
pus::{EcssTmSender, PusTmVariant},
queue::GenericSendError,
request::{GenericMessage, MessageMetadata, UniqueApidTargetId},
spacepackets::ByteConversionError,
};
use satrs_example::{
config::components::NO_SENDER,
ids::{eps::PCDU, generic_pus::PUS_MODE},
DeviceMode, TimestampHelper,
};
use satrs::{request::GenericMessage, spacepackets::CcsdsPacketIdAndPsc};
use satrs_example::TimestampHelper;
use satrs_minisim::{
eps::{
PcduReply, PcduRequest, PcduSwitch, SwitchMap, SwitchMapBinaryWrapper, SwitchMapWrapper,
},
SerializableSimMsgPayload, SimReply, SimRequest,
eps::{PcduReply, PcduRequest},
};
use serde::{Deserialize, Serialize};
use strum::IntoEnumIterator as _;
use crate::{
hk::PusHkHelper,
pus::hk::{HkReply, HkReplyVariant},
requests::CompositeRequest,
tmtc::sender::TmTcSender,
};
use crate::ccsds::pack_ccsds_tm_packet_for_now;
#[derive(Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SwitchSet {
pub valid: bool,
pub switch_map: SwitchMap,
}
impl SwitchSet {
pub fn new(switch_map: SwitchMap) -> Self {
Self {
valid: true,
switch_map,
}
}
pub fn new_with_init_switches_unknown() -> Self {
let wrapper = SwitchMapWrapper::default();
Self::new(wrapper.0)
}
pub fn as_bitfield(&self) -> Option<SwitchesBitfield> {
for entry in SwitchId::iter() {
if !self.switch_map.contains_key(&entry) {
return None;
}
}
Some(
SwitchesBitfield::builder()
.with_magnetorquer(*self.switch_map.get(&SwitchId::Mgt).unwrap() == SwitchState::On)
.with_mgm1(*self.switch_map.get(&SwitchId::Mgm1).unwrap() == SwitchState::On)
.with_mgm0(*self.switch_map.get(&SwitchId::Mgm0).unwrap() == SwitchState::On)
.build(),
)
}
#[allow(dead_code)]
pub fn set_switch_state(&mut self, switch_id: SwitchId, state: SwitchState) -> bool {
if !self.switch_map.contains_key(&switch_id) {
return false;
}
*self.switch_map.get_mut(&switch_id).unwrap() = state;
true
}
}
pub type SwitchMap = HashMap<SwitchId, SwitchState>;
pub struct SwitchMapWrapper(pub SwitchMap);
impl Default for SwitchMapWrapper {
fn default() -> Self {
let mut switch_map = SwitchMap::default();
for entry in SwitchId::iter() {
switch_map.insert(entry, SwitchState::Unknown);
}
Self(switch_map)
}
}
impl SwitchMapWrapper {
#[allow(dead_code)]
pub fn new_with_init_switches_off() -> Self {
let mut switch_map = SwitchMap::default();
for entry in SwitchId::iter() {
switch_map.insert(entry, SwitchState::Off);
}
Self(switch_map)
}
pub fn from_binary_switch_map_ref(switch_map: &SwitchMapBinary) -> Self {
Self(
switch_map
.iter()
.map(|(key, value)| (*key, SwitchState::from(*value)))
.collect(),
)
}
}
pub type SharedSwitchSet = Arc<Mutex<SwitchSet>>;
pub trait SerialInterface {
type Error: core::fmt::Debug;
@@ -194,49 +260,50 @@ pub enum OpCode {
PollAndRecvReplies = 1,
}
#[derive(Clone, PartialEq, Eq, Default, Serialize, Deserialize)]
pub struct SwitchSet {
pub valid: bool,
pub switch_map: SwitchMap,
}
pub type SharedSwitchSet = Arc<Mutex<SwitchSet>>;
/// Example PCDU device handler.
#[derive(new)]
#[allow(clippy::too_many_arguments)]
pub struct PcduHandler<ComInterface: SerialInterface> {
id: UniqueApidTargetId,
dev_str: &'static str,
mode_node: ModeRequestHandlerMpscBounded,
composite_request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
hk_reply_tx: mpsc::SyncSender<GenericMessage<HkReply>>,
switch_request_rx: mpsc::Receiver<GenericMessage<SwitchRequest>>,
tm_sender: TmTcSender,
tc_rx: std::sync::mpsc::Receiver<CcsdsTcPacketOwned>,
tm_tx: mpsc::SyncSender<CcsdsTmPacketOwned>,
pub com_interface: ComInterface,
shared_switch_map: Arc<Mutex<SwitchSet>>,
#[new(value = "PusHkHelper::new(id)")]
hk_helper: PusHkHelper,
#[new(value = "ModeAndSubmode::new(satrs_example::DeviceMode::Off as u32, 0)")]
mode_and_submode: ModeAndSubmode,
#[new(default)]
mode: DeviceMode,
stamp_helper: TimestampHelper,
#[new(value = "[0; 256]")]
tm_buf: [u8; 256],
}
impl<ComInterface: SerialInterface> PcduHandler<ComInterface> {
pub fn new(
tc_rx: std::sync::mpsc::Receiver<CcsdsTcPacketOwned>,
tm_tx: std::sync::mpsc::SyncSender<CcsdsTmPacketOwned>,
switch_request_rx: mpsc::Receiver<GenericMessage<SwitchRequest>>,
com_interface: ComInterface,
shared_switch_map: Arc<Mutex<SwitchSet>>,
init_mode: DeviceMode,
) -> Self {
Self {
dev_str: "PCDU",
tc_rx,
switch_request_rx,
tm_tx,
com_interface,
shared_switch_map,
stamp_helper: TimestampHelper::default(),
// Start in normal mode by default. Assume that the PCDU itself is on by default.
mode: init_mode,
}
}
pub fn periodic_operation(&mut self, op_code: OpCode) {
match op_code {
OpCode::RegularOp => {
self.stamp_helper.update_from_now();
// Handle requests.
self.handle_composite_requests();
self.handle_mode_requests();
self.handle_telecommands();
self.handle_switch_requests();
// Poll the switch states and/or telemetry regularly here.
if self.mode() == DeviceMode::Normal as u32 || self.mode() == DeviceMode::On as u32
{
if self.mode() == DeviceMode::Normal || self.mode() == DeviceMode::On {
self.handle_periodic_commands();
}
}
@@ -246,75 +313,122 @@ impl<ComInterface: SerialInterface> PcduHandler<ComInterface> {
}
}
pub fn handle_composite_requests(&mut self) {
loop {
match self.composite_request_rx.try_recv() {
Ok(ref msg) => match &msg.message {
CompositeRequest::Hk(hk_request) => {
self.handle_hk_request(&msg.requestor_info, hk_request)
}
// TODO: This object does not have actions (yet).. Still send back completion failure
// reply.
CompositeRequest::Action(_action_req) => {}
},
#[inline]
pub fn mode(&self) -> DeviceMode {
self.mode
}
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::warn!(
"{}: failed to receive composite request: {:?}",
self.dev_str,
e
);
} else {
break;
pub fn handle_telecommands(&mut self) {
loop {
match self.tc_rx.try_recv() {
Ok(packet) => {
let tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&packet.sp_header);
match postcard::from_bytes::<pcdu::request::Request>(&packet.payload) {
Ok(request) => {
log::info!(
"received request {:?} with TC ID {:#010x}",
request,
tc_id.raw()
);
match request {
pcdu::request::Request::Ping => {
self.send_tm(Some(tc_id), pcdu::response::Response::Ok)
}
pcdu::request::Request::GetSwitches => self.send_tm(
Some(tc_id),
pcdu::response::Response::Switches(
self.shared_switch_map
.lock()
.unwrap()
.as_bitfield()
.expect("could not build switches response"),
),
),
pcdu::request::Request::EnableSwitches(switches) => {
self.handle_switches_bitfield_request(
switches,
SwitchStateBinary::On,
);
}
pcdu::request::Request::DisableSwitches(switches) => {
self.handle_switches_bitfield_request(
switches,
SwitchStateBinary::Off,
);
}
pcdu::request::Request::Mode(device_mode) => {
self.switch_mode(tc_id, device_mode)
}
}
}
Err(e) => {
log::warn!("failed to deserialize request: {}", e);
}
}
}
Err(e) => match e {
std::sync::mpsc::TryRecvError::Empty => break,
std::sync::mpsc::TryRecvError::Disconnected => {
log::warn!("packet sender disconnected")
}
},
}
}
}
pub fn handle_hk_request(&mut self, requestor_info: &MessageMetadata, hk_request: &HkRequest) {
match hk_request.variant {
HkRequestVariant::OneShot => {
if hk_request.unique_id == SetId::SwitcherSet as u32 {
if let Ok(hk_tm) = self.hk_helper.generate_hk_report_packet(
self.stamp_helper.stamp(),
SetId::SwitcherSet as u32,
&mut |hk_buf| {
// Send TM down as JSON.
let switch_map_snapshot = self
.shared_switch_map
.lock()
.expect("failed to lock switch map")
.clone();
let switch_map_json = serde_json::to_string(&switch_map_snapshot)
.expect("failed to serialize switch map");
if switch_map_json.len() > hk_buf.len() {
log::error!("switch map JSON too large for HK buffer");
return Err(ByteConversionError::ToSliceTooSmall {
found: hk_buf.len(),
expected: switch_map_json.len(),
});
}
Ok(switch_map_json.len())
},
&mut self.tm_buf,
) {
self.tm_sender
.send_tm(self.id.id(), PusTmVariant::Direct(hk_tm))
.expect("failed to send HK TM");
self.hk_reply_tx
.send(GenericMessage::new(
*requestor_info,
HkReply::new(hk_request.unique_id, HkReplyVariant::Ack),
))
.expect("failed to send HK reply");
}
pub fn handle_switches_bitfield_request(
&mut self,
switches: SwitchesBitfield,
state: SwitchStateBinary,
) {
if switches.mgm0() {
self.handle_device_switching(SwitchId::Mgm0, state);
}
if switches.mgm1() {
self.handle_device_switching(SwitchId::Mgm1, state);
}
if switches.magnetorquer() {
self.handle_device_switching(SwitchId::Mgt, state);
}
}
pub fn send_tm(&self, tc_id: Option<CcsdsPacketIdAndPsc>, response: pcdu::response::Response) {
match pack_ccsds_tm_packet_for_now(ComponentId::EpsPcdu, tc_id, &response) {
Ok(packet) => {
if let Err(e) = self.tm_tx.send(packet) {
log::warn!("failed to send TM packet: {}", e);
}
}
HkRequestVariant::EnablePeriodic => todo!(),
HkRequestVariant::DisablePeriodic => todo!(),
HkRequestVariant::ModifyCollectionInterval(_) => todo!(),
Err(e) => {
log::warn!("failed to pack TM packet: {}", e);
}
}
}
fn switch_mode(&mut self, requestor: CcsdsPacketIdAndPsc, mode: DeviceMode) {
log::info!("{}: transitioning to mode {:?}", self.dev_str, mode);
self.mode = mode;
if self.mode() == DeviceMode::Off {
self.shared_switch_map.lock().unwrap().valid = false;
}
log::info!("{} announcing mode: {:?}", self.dev_str, self.mode);
self.send_telemetry(Some(requestor), pcdu::response::Response::Ok);
}
pub fn send_telemetry(
&self,
tc_id: Option<CcsdsPacketIdAndPsc>,
response: pcdu::response::Response,
) {
match pack_ccsds_tm_packet_for_now(ComponentId::EpsPcdu, tc_id, &response) {
Ok(packet) => {
if let Err(e) = self.tm_tx.send(packet) {
log::warn!("failed to send TM packet: {}", e);
}
}
Err(e) => {
log::warn!("failed to pack TM packet: {}", e);
}
}
}
@@ -326,6 +440,7 @@ impl<ComInterface: SerialInterface> PcduHandler<ComInterface> {
}
}
/*
pub fn handle_mode_requests(&mut self) {
loop {
// TODO: Only allow one set mode request per cycle?
@@ -356,23 +471,28 @@ impl<ComInterface: SerialInterface> PcduHandler<ComInterface> {
}
}
}
*/
pub fn handle_device_switching(&mut self, switch_id: SwitchId, state: SwitchStateBinary) {
let pcdu_req = PcduRequest::SwitchDevice {
switch: switch_id,
state,
};
let pcdu_req_ser = serde_json::to_string(&pcdu_req).unwrap();
self.com_interface
.send(pcdu_req_ser.as_bytes())
.expect("failed to send switch request to PCDU");
}
pub fn handle_switch_requests(&mut self) {
loop {
match self.switch_request_rx.try_recv() {
Ok(switch_req) => match PcduSwitch::try_from(switch_req.message.switch_id()) {
Ok(pcdu_switch) => {
let pcdu_req = PcduRequest::SwitchDevice {
switch: pcdu_switch,
state: switch_req.message.target_state(),
};
let pcdu_req_ser = serde_json::to_string(&pcdu_req).unwrap();
self.com_interface
.send(pcdu_req_ser.as_bytes())
.expect("failed to send switch request to PCDU");
}
Err(e) => todo!("failed to convert switch ID {:?} to typed PCDU switch", e),
},
Ok(switch_req) => {
self.handle_device_switching(
switch_req.message.switch_id(),
switch_req.message.target_state(),
);
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
@@ -406,115 +526,33 @@ impl<ComInterface: SerialInterface> PcduHandler<ComInterface> {
}
}
impl<ComInterface: SerialInterface> ModeProvider for PcduHandler<ComInterface> {
fn mode_and_submode(&self) -> ModeAndSubmode {
self.mode_and_submode
}
}
impl<ComInterface: SerialInterface> ModeRequestHandler for PcduHandler<ComInterface> {
type Error = ModeError;
fn start_transition(
&mut self,
requestor: MessageMetadata,
mode_and_submode: ModeAndSubmode,
_forced: bool,
) -> Result<(), satrs::mode::ModeError> {
log::info!(
"{}: transitioning to mode {:?}",
self.dev_str,
mode_and_submode
);
self.mode_and_submode = mode_and_submode;
if mode_and_submode.mode() == DeviceMode::Off as u32 {
self.shared_switch_map.lock().unwrap().valid = false;
}
self.handle_mode_reached(Some(requestor))?;
Ok(())
}
fn announce_mode(&self, _requestor_info: Option<MessageMetadata>, _recursive: bool) {
log::info!(
"{} announcing mode: {:?}",
self.dev_str,
self.mode_and_submode
);
}
fn handle_mode_reached(
&mut self,
requestor: Option<MessageMetadata>,
) -> Result<(), Self::Error> {
self.announce_mode(requestor, false);
if let Some(requestor) = requestor {
if requestor.sender_id() == NO_SENDER {
return Ok(());
}
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
);
} else {
self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode()))?;
}
}
Ok(())
}
fn send_mode_reply(
&self,
requestor: MessageMetadata,
reply: ModeReply,
) -> Result<(), Self::Error> {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
);
}
self.mode_node
.send_mode_reply(requestor, reply)
.map_err(|_| GenericSendError::RxDisconnected)?;
Ok(())
}
fn handle_mode_info(
&mut self,
_requestor_info: MessageMetadata,
_info: ModeAndSubmode,
) -> Result<(), Self::Error> {
Ok(())
}
}
impl<ComInterface: SerialInterface> ModeNode for PcduHandler<ComInterface> {
fn id(&self) -> satrs::ComponentId {
PCDU.into()
}
}
impl<ComInterface: SerialInterface> ModeChild for PcduHandler<ComInterface> {
type Sender = mpsc::SyncSender<GenericMessage<ModeReply>>;
fn add_mode_parent(&mut self, id: satrs::ComponentId, reply_sender: Self::Sender) {
self.mode_node.add_message_target(id, reply_sender);
}
}
#[cfg(test)]
mod tests {
use std::sync::mpsc;
use arbitrary_int::u21;
use satrs::{
mode::ModeRequest, power::SwitchStateBinary, request::GenericMessage, tmtc::PacketAsVec,
use arbitrary_int::u11;
use models::{
Apid, TcHeader,
pcdu::{SwitchMapBinary, SwitchStateBinary},
};
use satrs::{
mode::{ModeReply, ModeRequest},
request::{GenericMessage, MessageMetadata},
spacepackets::SpacePacketHeader,
};
use satrs_example::ids::{self, Apid};
use satrs_minisim::eps::SwitchMapBinary;
use super::*;
pub fn create_request_tc(
request: models::pcdu::request::Request,
) -> models::ccsds::CcsdsTcPacketOwned {
models::ccsds::CcsdsTcPacketOwned::new_with_request(
SpacePacketHeader::new_from_apid(u11::new(Apid::Eps as u16)),
TcHeader::new(ComponentId::EpsPcdu, request.message_type()),
request,
)
}
#[derive(Default)]
pub struct SerialInterfaceTest {
pub inner: SerialInterfaceDummy,
@@ -550,47 +588,37 @@ mod tests {
}
}
#[allow(dead_code)]
pub struct PcduTestbench {
pub mode_request_tx: mpsc::SyncSender<GenericMessage<ModeRequest>>,
pub mode_reply_rx_to_pus: mpsc::Receiver<GenericMessage<ModeReply>>,
pub mode_reply_rx_to_parent: mpsc::Receiver<GenericMessage<ModeReply>>,
pub composite_request_tx: mpsc::Sender<GenericMessage<CompositeRequest>>,
pub hk_reply_rx: mpsc::Receiver<GenericMessage<HkReply>>,
pub tm_rx: mpsc::Receiver<PacketAsVec>,
pub tc_tx: mpsc::SyncSender<CcsdsTcPacketOwned>,
pub tm_rx: mpsc::Receiver<CcsdsTmPacketOwned>,
pub switch_request_tx: mpsc::Sender<GenericMessage<SwitchRequest>>,
pub handler: PcduHandler<SerialInterfaceTest>,
}
impl PcduTestbench {
pub fn new() -> Self {
let (mode_request_tx, mode_request_rx) = mpsc::sync_channel(5);
let (mode_reply_tx_to_pus, mode_reply_rx_to_pus) = mpsc::sync_channel(5);
let (mode_reply_tx_to_parent, mode_reply_rx_to_parent) = mpsc::sync_channel(5);
let mode_node = ModeRequestHandlerMpscBounded::new(PCDU.into(), mode_request_rx);
let (composite_request_tx, composite_request_rx) = mpsc::channel();
let (hk_reply_tx, hk_reply_rx) = mpsc::sync_channel(10);
let (tm_tx, tm_rx) = mpsc::sync_channel::<PacketAsVec>(5);
let (mode_request_tx, _mode_request_rx) = mpsc::sync_channel(5);
let (_mode_reply_tx_to_parent, mode_reply_rx_to_parent) = mpsc::sync_channel(5);
let (tc_tx, tc_rx) = mpsc::sync_channel(5);
let (tm_tx, tm_rx) = mpsc::sync_channel(5);
let (switch_request_tx, switch_reqest_rx) = mpsc::channel();
let shared_switch_map = Arc::new(Mutex::new(SwitchSet::default()));
let mut handler = PcduHandler::new(
UniqueApidTargetId::new(Apid::Eps.raw_value(), u21::new(0)),
"TEST_PCDU",
mode_node,
composite_request_rx,
hk_reply_tx,
let shared_switch_map =
Arc::new(Mutex::new(SwitchSet::new_with_init_switches_unknown()));
let handler = PcduHandler::new(
tc_rx,
tm_tx.clone(),
switch_reqest_rx,
TmTcSender::Heap(tm_tx.clone()),
SerialInterfaceTest::default(),
shared_switch_map,
DeviceMode::Off,
);
handler.add_mode_parent(ids::eps::SUBSYSTEM.into(), mode_reply_tx_to_parent);
handler.add_mode_parent(PUS_MODE.into(), mode_reply_tx_to_pus);
Self {
mode_request_tx,
mode_reply_rx_to_pus,
mode_reply_rx_to_parent,
composite_request_tx,
hk_reply_rx,
tc_tx,
tm_rx,
switch_request_tx,
handler,
@@ -610,7 +638,7 @@ mod tests {
pub fn verify_switch_req_was_sent(
&self,
expected_queue_len: usize,
switch_id: PcduSwitch,
switch_id: SwitchId,
target_state: SwitchStateBinary,
) {
// Check that there is now communication happening.
@@ -651,11 +679,7 @@ mod tests {
testbench.handler.com_interface.reply_queue.borrow().len(),
0
);
assert_eq!(
testbench.handler.mode_and_submode().mode(),
DeviceMode::Off as u32
);
assert_eq!(testbench.handler.mode_and_submode().submode(), 0_u16);
assert_eq!(testbench.handler.mode(), DeviceMode::Off);
testbench.handler.periodic_operation(OpCode::RegularOp);
testbench
.handler
@@ -666,39 +690,27 @@ mod tests {
testbench.handler.com_interface.reply_queue.borrow().len(),
0
);
assert_eq!(
testbench.handler.mode_and_submode().mode(),
DeviceMode::Off as u32
);
assert_eq!(testbench.handler.mode_and_submode().submode(), 0_u16);
assert_eq!(testbench.handler.mode(), DeviceMode::Off);
}
#[test]
fn test_normal_mode() {
let mut testbench = PcduTestbench::new();
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,
},
))
.expect("failed to send mode request");
.tc_tx
.send(create_request_tc(pcdu::request::Request::Mode(
DeviceMode::Normal,
)))
.unwrap();
let switch_map_shared = testbench.handler.shared_switch_map.lock().unwrap();
assert!(!switch_map_shared.valid);
assert!(switch_map_shared.valid);
drop(switch_map_shared);
testbench.handler.periodic_operation(OpCode::RegularOp);
testbench
.handler
.periodic_operation(OpCode::PollAndRecvReplies);
// Check correctness of mode.
assert_eq!(
testbench.handler.mode_and_submode().mode(),
DeviceMode::Normal as u32
);
assert_eq!(testbench.handler.mode_and_submode().submode(), 0);
assert_eq!(testbench.handler.mode(), DeviceMode::Normal);
testbench.verify_switch_info_req_was_sent(1);
testbench.verify_switch_reply_received(1, SwitchMapBinaryWrapper::default().0);
@@ -712,20 +724,16 @@ mod tests {
fn test_switch_request_handling() {
let mut testbench = PcduTestbench::new();
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,
},
))
.expect("failed to send mode request");
.tc_tx
.send(create_request_tc(pcdu::request::Request::Mode(
DeviceMode::Normal,
)))
.unwrap();
testbench
.switch_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, ids::acs::MGM0.id()),
SwitchRequest::new(0, SwitchStateBinary::On),
MessageMetadata::new(0, ComponentId::AcsMgm0 as u32),
SwitchRequest::new(SwitchId::Mgm0, SwitchStateBinary::On),
))
.expect("failed to send switch request");
testbench.handler.periodic_operation(OpCode::RegularOp);
@@ -733,11 +741,11 @@ mod tests {
.handler
.periodic_operation(OpCode::PollAndRecvReplies);
testbench.verify_switch_req_was_sent(2, PcduSwitch::Mgm, SwitchStateBinary::On);
testbench.verify_switch_req_was_sent(2, SwitchId::Mgm0, SwitchStateBinary::On);
testbench.verify_switch_info_req_was_sent(1);
let mut switch_map = SwitchMapBinaryWrapper::default().0;
*switch_map
.get_mut(&PcduSwitch::Mgm)
.get_mut(&SwitchId::Mgm0)
.expect("switch state setting failed") = SwitchStateBinary::On;
testbench.verify_switch_reply_received(1, switch_map);
+35
View File
@@ -0,0 +1,35 @@
use models::{ComponentId, Event, Message, ccsds::CcsdsTmPacketOwned, control};
use crate::ccsds::pack_ccsds_tm_packet_for_now;
// TODO: We should add the capability to enable/disable the TM generation of individual events and
// event groups as well.
pub struct EventManager {
pub ctrl_rx: std::sync::mpsc::Receiver<control::Event>,
pub tm_tx: std::sync::mpsc::SyncSender<CcsdsTmPacketOwned>,
}
impl EventManager {
pub fn periodic_operation(&mut self) {
if let Ok(event) = self.ctrl_rx.try_recv() {
self.event_to_tm(ComponentId::Controller, &Event::ControllerEvent(event));
}
}
pub fn event_to_tm(
&mut self,
sender_id: ComponentId,
event: &(impl serde::Serialize + Message),
) {
match pack_ccsds_tm_packet_for_now(sender_id, None, event) {
Ok(packet) => {
if let Err(e) = self.tm_tx.send(packet) {
log::warn!("error sending event TM packet: {:?}", e);
}
}
Err(e) => {
log::warn!("error packing event TM packet: {:?}", e);
}
}
}
}
-109
View File
@@ -1,109 +0,0 @@
//! This is an auto-generated configuration module.
use satrs::request::UniqueApidTargetId;
#[derive(Debug, PartialEq, Eq, strum::EnumIter)]
#[bitbybit::bitenum(u11)]
pub enum Apid {
Sched = 1,
GenericPus = 2,
Acs = 3,
Cfdp = 4,
Tmtc = 5,
Eps = 6,
}
pub mod acs {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
Subsystem = 1,
Assembly = 2,
Mgm0 = 3,
Mgm1 = 4,
}
pub const SUBSYSTEM: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Subsystem.raw_value());
pub const ASSEMBLY: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Assembly.raw_value());
pub const MGM0: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Mgm0.raw_value());
pub const MGM1: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Mgm1.raw_value());
}
pub mod eps {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
Pcdu = 0,
Subsystem = 1,
}
pub const PCDU: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Eps.raw_value(), Id::Pcdu.raw_value());
pub const SUBSYSTEM: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Eps.raw_value(), Id::Subsystem.raw_value());
}
pub mod generic_pus {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
PusEventManagement = 0,
PusRouting = 1,
PusTest = 2,
PusAction = 3,
PusMode = 4,
PusHk = 5,
}
pub const PUS_EVENT_MANAGEMENT: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusEventManagement.raw_value(),
);
pub const PUS_ROUTING: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusRouting.raw_value(),
);
pub const PUS_TEST: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusTest.raw_value(),
);
pub const PUS_ACTION: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusAction.raw_value(),
);
pub const PUS_MODE: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusMode.raw_value(),
);
pub const PUS_HK: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::GenericPus.raw_value(), Id::PusHk.raw_value());
}
pub mod sched {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
PusSched = 0,
}
pub const PUS_SCHED: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Sched.raw_value(), Id::PusSched.raw_value());
}
pub mod tmtc {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
UdpServer = 0,
TcpServer = 1,
}
pub const UDP_SERVER: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Tmtc.raw_value(), Id::UdpServer.raw_value());
pub const TCP_SERVER: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Tmtc.raw_value(), Id::TcpServer.raw_value());
}
@@ -7,8 +7,8 @@ use std::{
use satrs::pus::HandlingStatus;
use satrs_minisim::{
udp::SIM_CTRL_PORT, SerializableSimMsgPayload, SimComponent, SimMessageProvider, SimReply,
SimRequest,
SerializableSimMsgPayload, SimComponent, SimMessageProvider, SimReply, SimRequest,
udp::SIM_CTRL_PORT,
};
use satrs_minisim::{SimCtrlReply, SimCtrlRequest};
@@ -187,16 +187,17 @@ pub mod tests {
collections::HashMap,
net::{SocketAddr, UdpSocket},
sync::{
Arc,
atomic::{AtomicBool, Ordering},
mpsc, Arc,
mpsc,
},
time::Duration,
};
use satrs_minisim::{
eps::{PcduReply, PcduRequest},
SerializableSimMsgPayload, SimComponent, SimCtrlReply, SimCtrlRequest, SimMessageProvider,
SimReply, SimRequest,
eps::{PcduReply, PcduRequest},
};
use super::SimClientUdp;
+7 -9
View File
@@ -129,15 +129,13 @@ impl TcpTask {
}
pub fn periodic_operation(&mut self) {
loop {
let result = self
.0
.handle_all_connections(Some(Duration::from_millis(400)));
match result {
Ok(_conn_result) => (),
Err(e) => {
warn!("TCP server error: {e:?}");
}
let result = self
.0
.handle_all_connections(Some(Duration::from_millis(400)));
match result {
Ok(_conn_result) => (),
Err(e) => {
warn!("TCP server error: {e:?}");
}
}
}
+61 -76
View File
@@ -1,66 +1,29 @@
#![allow(dead_code)]
use std::collections::VecDeque;
use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc;
use std::sync::{Arc, Mutex, mpsc};
use log::{info, warn};
use log::warn;
use models::ccsds::CcsdsTmPacketOwned;
use satrs::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use satrs::pus::HandlingStatus;
use satrs::queue::GenericSendError;
use satrs::tmtc::PacketAsVec;
use satrs::pool::{PoolProviderWithGuards, SharedStaticMemoryPool};
use satrs::tmtc::PacketInPool;
use crate::tmtc::sender::TmTcSender;
pub trait UdpTmHandler {
pub trait UdpTmHandlerProvider {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr);
}
pub struct StaticUdpTmHandler {
pub tm_rx: mpsc::Receiver<PacketInPool>,
pub tm_store: SharedStaticMemoryPool,
pub struct UdpTmHandlerWithChannel {
pub tm_rx: mpsc::Receiver<CcsdsTmPacketOwned>,
}
impl UdpTmHandler for StaticUdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, &recv_addr: &SocketAddr) {
while let Ok(pus_tm_in_pool) = self.tm_rx.try_recv() {
let store_lock = self.tm_store.write();
if store_lock.is_err() {
warn!("Locking TM store failed");
continue;
}
let mut store_lock = store_lock.unwrap();
let pg = store_lock.read_with_guard(pus_tm_in_pool.store_addr);
let read_res = pg.read_as_vec();
if read_res.is_err() {
warn!("Error reading TM pool data");
continue;
}
let buf = read_res.unwrap();
let result = socket.send_to(&buf, recv_addr);
if let Err(e) = result {
warn!("Sending TM with UDP socket failed: {e}")
}
}
}
}
pub struct DynamicUdpTmHandler {
pub tm_rx: mpsc::Receiver<PacketAsVec>,
}
impl UdpTmHandler for DynamicUdpTmHandler {
impl UdpTmHandlerProvider for UdpTmHandlerWithChannel {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr) {
while let Ok(tm) = self.tm_rx.try_recv() {
if tm.packet.len() > 9 {
let service = tm.packet[7];
let subservice = tm.packet[8];
info!("Sending PUS TM[{service},{subservice}]")
} else {
info!("Sending PUS TM");
}
let result = socket.send_to(&tm.packet, recv_addr);
log::debug!("sending TM from sender {:?}", tm.tm_header.sender_id);
let result = socket.send_to(&tm.to_vec(), recv_addr);
if let Err(e) = result {
warn!("Sending TM with UDP socket failed: {e}")
}
@@ -68,12 +31,49 @@ impl UdpTmHandler for DynamicUdpTmHandler {
}
}
pub struct UdpTmtcServer<TmHandler: UdpTmHandler> {
pub udp_tc_server: UdpTcServer<TmTcSender, GenericSendError>,
pub tm_handler: TmHandler,
#[derive(Default, Debug, Clone)]
pub struct TestTmHandler {
addrs_to_send_to: Arc<Mutex<VecDeque<SocketAddr>>>,
}
impl<TmHandler: UdpTmHandler> UdpTmtcServer<TmHandler> {
impl UdpTmHandlerProvider for TestTmHandler {
fn send_tm_to_udp_client(&mut self, _socket: &UdpSocket, recv_addr: &SocketAddr) {
self.addrs_to_send_to.lock().unwrap().push_back(*recv_addr);
}
}
pub enum UdpTmHandler {
Normal(UdpTmHandlerWithChannel),
Test(TestTmHandler),
}
impl From<UdpTmHandlerWithChannel> for UdpTmHandler {
fn from(handler: UdpTmHandlerWithChannel) -> Self {
UdpTmHandler::Normal(handler)
}
}
impl From<TestTmHandler> for UdpTmHandler {
fn from(handler: TestTmHandler) -> Self {
UdpTmHandler::Test(handler)
}
}
impl UdpTmHandlerProvider for UdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr) {
match self {
UdpTmHandler::Normal(handler) => handler.send_tm_to_udp_client(socket, recv_addr),
UdpTmHandler::Test(handler) => handler.send_tm_to_udp_client(socket, recv_addr),
}
}
}
pub struct UdpTmtcServer {
pub udp_tc_server: UdpTcServer<TmTcSender, GenericSendError>,
pub tm_handler: UdpTmHandler,
}
impl UdpTmtcServer {
pub fn periodic_operation(&mut self) {
loop {
if self.poll_tc_server() == HandlingStatus::Empty {
@@ -107,43 +107,28 @@ impl<TmHandler: UdpTmHandler> UdpTmtcServer<TmHandler> {
#[cfg(test)]
mod tests {
use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::{
collections::VecDeque,
net::IpAddr,
sync::{Arc, Mutex},
};
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use models::Apid;
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs::{
spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
SpHeader,
},
ComponentId,
spacepackets::{
SpHeader,
ecss::{WritablePusPacket, tc::PusTcCreator},
},
};
use satrs_example::config::OBSW_SERVER_ADDR;
use satrs_example::ids;
use crate::tmtc::sender::{MockSender, TmTcSender};
use crate::tmtc::sender::MockSender;
use super::*;
const UDP_SERVER_ID: ComponentId = 0x05;
#[derive(Default, Debug, Clone)]
pub struct TestTmHandler {
addrs_to_send_to: Arc<Mutex<VecDeque<SocketAddr>>>,
}
impl UdpTmHandler for TestTmHandler {
fn send_tm_to_udp_client(&mut self, _socket: &UdpSocket, recv_addr: &SocketAddr) {
self.addrs_to_send_to.lock().unwrap().push_back(*recv_addr);
}
}
#[test]
fn test_basic() {
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
@@ -154,7 +139,7 @@ mod tests {
let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
let mut udp_dyn_server = UdpTmtcServer {
udp_tc_server,
tm_handler,
tm_handler: tm_handler.into(),
};
udp_dyn_server.periodic_operation();
let queue = udp_dyn_server
@@ -179,9 +164,9 @@ mod tests {
let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
let mut udp_dyn_server = UdpTmtcServer {
udp_tc_server,
tm_handler,
tm_handler: tm_handler.into(),
};
let sph = SpHeader::new_for_unseg_tc(ids::Apid::GenericPus.raw_value(), u14::ZERO, 0);
let sph = SpHeader::new_for_unseg_tc(Apid::Tmtc.raw_value(), u14::ZERO, 0);
let ping_tc = PusTcCreator::new_simple(
sph,
MessageTypeId::new(17, 1),
@@ -1,6 +1,3 @@
use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u11;
use satrs::event_man_legacy::{EventMessageU32, EventRoutingError};
@@ -34,6 +31,7 @@ impl EventTmHook for EventApidSetter {
}
}
/*
/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event
/// packets. It also handles the verification completion of PUS event service requests.
pub struct PusEventHandler<TmSender: EcssTmSender> {
@@ -292,3 +290,4 @@ mod tests {
// TODO: Add test.
}
}
*/
@@ -39,6 +39,7 @@ pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> Verifi
VerificationReporter::new(owner_id, &verif_cfg)
}
/*
/// Simple router structure which forwards PUS telecommands to dedicated handlers.
pub struct PusTcMpscRouter {
pub test_tc_sender: mpsc::SyncSender<EcssTcAndToken>,
@@ -187,6 +188,7 @@ impl PusTcDistributor {
Ok(HandlingStatus::HandledOne)
}
}
*/
pub trait TargetedPusService {
const SERVICE_ID: u8;
@@ -5,7 +5,7 @@ use crate::pus::create_verification_reporter;
use crate::tmtc::sender::TmTcSender;
use log::info;
use satrs::pool::{PoolProvider, StaticMemoryPool};
use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler::{PusSchedulerAlloc, TcInfo};
use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
@@ -86,7 +86,7 @@ pub struct SchedulingServiceWrapper {
TmTcSender,
EcssTcCacher,
VerificationReporter,
PusScheduler,
PusSchedulerAlloc,
>,
pub sched_tc_pool: StaticMemoryPool,
pub releaser_buf: [u8; 4096],
@@ -179,7 +179,7 @@ pub fn create_scheduler_service(
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
) -> SchedulingServiceWrapper {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
let scheduler = PusSchedulerAlloc::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new(
@@ -15,8 +15,8 @@ use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::ComponentId;
use satrs_example::config::tmtc_err;
use satrs_example::ids;
/*
#[derive(Clone, Debug)]
#[non_exhaustive]
pub enum CompositeRequest {
@@ -153,3 +153,4 @@ impl PusRequestRouter<ModeRequest> for GenericRequestRouter {
Err(GenericRoutingError::UnknownTargetId(target_id))
}
}
*/
+124 -7
View File
@@ -1,13 +1,28 @@
use satrs::spacepackets::time::cds::CdsTime;
extern crate alloc;
use std::{
sync::mpsc,
time::{Duration, Instant},
};
pub use models::ComponentId;
use models::ccsds::{CcsdsTcPacketOwned, CcsdsTmPacketOwned};
use satrs::spacepackets::{CcsdsPacketIdAndPsc, time::cds::CdsTime};
pub mod config;
pub mod ids;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum DeviceMode {
Off = 0,
On = 1,
Normal = 2,
/// Simple type modelling packet stored in the heap. This structure is intended to
/// be used when sending a packet via a message queue, so it also contains the sender ID.
#[derive(Debug, PartialEq, Eq, Clone)]
pub struct PacketAsVec {
pub sender_id: ComponentId,
pub packet: Vec<u8>,
}
impl PacketAsVec {
pub fn new(sender_id: ComponentId, packet: Vec<u8>) -> Self {
Self { sender_id, packet }
}
}
pub struct TimestampHelper {
@@ -38,3 +53,105 @@ impl Default for TimestampHelper {
}
}
}
/// Helper structure for periodic HK generation of a single set.
#[derive(Debug)]
pub struct HkHelperSingleSet {
pub enabled: bool,
pub frequency: Duration,
pub last_generated: Option<Instant>,
}
impl HkHelperSingleSet {
#[inline]
pub const fn new(enabled: bool, init_frequency: Duration) -> Self {
Self {
enabled,
frequency: init_frequency,
last_generated: None,
}
}
#[inline]
pub const fn enabled(&self) -> bool {
self.enabled
}
/// Check whether a new HK packet needs to be generated.
pub fn needs_generation(&mut self) -> bool {
if !self.enabled {
return false;
}
if self.last_generated.is_none() {
self.last_generated = Some(Instant::now());
return true;
}
let last_generated = self.last_generated.unwrap();
if Instant::now() - last_generated >= self.frequency {
self.last_generated = Some(Instant::now());
return true;
}
false
}
}
pub struct TmtcQueues {
pub tc_rx: mpsc::Receiver<CcsdsTcPacketOwned>,
pub tm_tx: mpsc::SyncSender<CcsdsTmPacketOwned>,
}
#[derive(Debug)]
pub struct ModeHelper<Mode, TransitionState> {
pub current: Mode,
pub target: Option<Mode>,
pub tc_commander: Option<CcsdsPacketIdAndPsc>,
pub transition_start: Option<Instant>,
pub timeout: Duration,
pub transition_state: TransitionState,
}
impl<Mode: Copy + Clone, TransitionState: Default> ModeHelper<Mode, TransitionState> {
pub fn new(init_mode: Mode, timeout: Duration) -> Self {
Self {
current: init_mode,
target: Default::default(),
tc_commander: Default::default(),
transition_start: None,
timeout,
transition_state: Default::default(),
}
}
pub fn start(&mut self, target: Mode) {
self.target = Some(target);
self.transition_start = Some(Instant::now());
self.transition_state = TransitionState::default();
}
#[inline]
pub fn transition_active(&self) -> bool {
self.target.is_some()
}
pub fn timed_out(&self) -> bool {
if self.target.is_none() {
return false;
}
if let Some(transition_start) = self.transition_start {
return Instant::now() - transition_start >= self.timeout;
}
false
}
pub fn finish(&mut self, success: bool) -> Option<CcsdsPacketIdAndPsc> {
self.target?;
if success {
self.current = self.target.take().unwrap();
} else {
self.target = None;
}
self.transition_state = Default::default();
self.transition_start = None;
self.tc_commander.take()
}
}
+10 -1
View File
@@ -1,4 +1,13 @@
use std::str::FromStr as _;
pub fn setup_logger() -> Result<(), fern::InitError> {
// Read the RUST_LOG environment variable and parse it.
// If the variable is not set or is invalid, default to Debug.
let mut log_level = log::LevelFilter::Info;
if let Ok(log_level_str) = std::env::var("RUST_LOG") {
log_level = log::LevelFilter::from_str(&log_level_str).unwrap_or(log::LevelFilter::Info);
}
fern::Dispatch::new()
.format(|out, message, record| {
out.finish(format_args!(
@@ -9,7 +18,7 @@ pub fn setup_logger() -> Result<(), fern::InitError> {
message
))
})
.level(log::LevelFilter::Debug)
.level(log_level)
.chain(std::io::stdout())
.chain(fern::log_file("output.log")?)
.apply()?;
+185 -306
View File
@@ -1,16 +1,18 @@
use std::{
net::{IpAddr, SocketAddr},
sync::{mpsc, Arc, Mutex},
sync::{
Arc, Mutex,
atomic::{AtomicBool, Ordering},
mpsc,
},
thread,
time::Duration,
};
use acs::mgm::{MgmHandlerLis3Mdl, SpiDummyInterface, SpiSimInterface, SpiSimInterfaceWrapper};
use eps::{
pcdu::{PcduHandler, SerialInterfaceDummy, SerialInterfaceToSim, SerialSimInterfaceWrapper},
PowerSwitchHelper,
pcdu::{PcduHandler, SerialInterfaceDummy, SerialInterfaceToSim, SerialSimInterfaceWrapper},
};
use events::EventHandler;
use interface::{
sim_client_udp::create_sim_client,
tcp::{SyncTcpTmSource, TcpTask},
@@ -18,256 +20,117 @@ use interface::{
};
use log::info;
use logger::setup_logger;
use pus::{
action::create_action_service,
event::create_event_service,
hk::create_hk_service,
mode::create_mode_service,
scheduler::{create_scheduler_service, TcReleaser},
stack::PusStack,
test::create_test_service,
PusTcDistributor, PusTcMpscRouter,
};
use requests::GenericRequestRouter;
use models::{ComponentId, DeviceMode};
use satrs::{
hal::std::{tcp_server::ServerConfig, udp_server::UdpTcServer},
mode::{Mode, ModeAndSubmode, ModeRequest, ModeRequestHandlerMpscBounded},
mode_tree::connect_mode_nodes,
pus::{event_man::EventRequestWithToken, EcssTcCacher, HandlingStatus},
mode::{Mode, ModeAndSubmode, ModeRequest},
pus::HandlingStatus,
request::{GenericMessage, MessageMetadata},
spacepackets::time::cds::CdsTime,
};
use satrs_example::{
TmtcQueues,
config::{
components::NO_SENDER,
pool::create_sched_tc_pool,
tasks::{FREQ_MS_AOCS, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, SIM_CLIENT_IDLE_DELAY_MS},
OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT,
components::NO_SENDER,
tasks::{FREQ_MS_AOCS, FREQ_MS_CONTROLLER, FREQ_MS_UDP_TMTC, SIM_CLIENT_IDLE_DELAY_MS},
},
ids::{
acs::*,
eps::*,
tmtc::{TCP_SERVER, UDP_SERVER},
},
DeviceMode,
};
use tmtc::sender::TmTcSender;
use tmtc::{tc_source::TcSourceTask, tm_sink::TmSink};
cfg_if::cfg_if! {
if #[cfg(feature = "heap_tmtc")] {
use interface::udp::DynamicUdpTmHandler;
use satrs::pus::EcssTcVecCacher;
use tmtc::{tc_source::TcSourceTaskDynamic, tm_sink::TmSinkDynamic};
} else {
use std::sync::RwLock;
use interface::udp::StaticUdpTmHandler;
use satrs::pus::EcssTcInSharedPoolCacher;
use satrs::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use satrs_example::config::pool::create_static_pools;
use tmtc::{
tc_source::TcSourceTaskStatic,
tm_sink::TmSinkStatic,
};
}
}
use crate::{
acs::{mgm, mgm_assembly},
control::Controller,
eps::pcdu::SwitchSet,
event_manager::EventManager,
interface::udp::UdpTmHandlerWithChannel,
tmtc::tc_source::CcsdsDistributor,
};
mod acs;
mod ccsds;
mod control;
mod eps;
mod events;
mod hk;
mod event_manager;
mod interface;
mod logger;
mod pus;
mod requests;
mod spi;
mod tmtc;
fn main() {
setup_logger().expect("setting up logging with fern failed");
println!("Running OBSW example");
static KILL_SIGNAL: AtomicBool = AtomicBool::new(false);
cfg_if::cfg_if! {
if #[cfg(not(feature = "heap_tmtc"))] {
let (tm_pool, tc_pool) = create_static_pools();
let shared_tm_pool = Arc::new(RwLock::new(tm_pool));
let shared_tc_pool = Arc::new(RwLock::new(tc_pool));
let shared_tm_pool_wrapper = SharedPacketPool::new(&shared_tm_pool);
let shared_tc_pool_wrapper = SharedPacketPool::new(&shared_tc_pool);
}
}
setup_logger().expect("setting up logging with fern failed");
println!("Runng OBSW example");
ctrlc::set_handler(move || {
log::info!("Received Ctrl-C, shutting down");
KILL_SIGNAL.store(true, Ordering::Relaxed);
})
.expect("Error setting Ctrl-C handler");
let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50);
let (tm_sink_tx, tm_sink_rx) = mpsc::sync_channel(50);
let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50);
cfg_if::cfg_if! {
if #[cfg(not(feature = "heap_tmtc"))] {
let tm_sender = TmTcSender::Static(
PacketSenderWithSharedPool::new(tm_sink_tx.clone(), shared_tm_pool_wrapper.clone())
);
} else if #[cfg(feature = "heap_tmtc")] {
let tm_sender = TmTcSender::Heap(tm_sink_tx.clone());
}
}
let (sim_request_tx, sim_request_rx) = mpsc::channel();
let (mgm_0_sim_reply_tx, mgm_0_sim_reply_rx) = mpsc::channel();
let (mgm_1_sim_reply_tx, mgm_1_sim_reply_rx) = mpsc::channel();
let (pcdu_sim_reply_tx, pcdu_sim_reply_rx) = mpsc::channel();
let mut opt_sim_client = create_sim_client(sim_request_rx);
let (mgm_0_handler_composite_tx, mgm_0_handler_composite_rx) = mpsc::sync_channel(10);
let (mgm_1_handler_composite_tx, mgm_1_handler_composite_rx) = mpsc::sync_channel(10);
let (pcdu_handler_composite_tx, pcdu_handler_composite_rx) = mpsc::sync_channel(30);
let (mgm_0_handler_mode_tx, mgm_0_handler_mode_rx) = mpsc::sync_channel(5);
let (mgm_1_handler_mode_tx, mgm_1_handler_mode_rx) = mpsc::sync_channel(5);
let (pcdu_handler_mode_tx, pcdu_handler_mode_rx) = mpsc::sync_channel(5);
let (mgm_0_handler_tc_tx, mgm_0_handler_tc_rx) = mpsc::sync_channel(10);
let (mgm_1_handler_tc_tx, mgm_1_handler_tc_rx) = mpsc::sync_channel(10);
let (mgm_assembly_tc_tx, mgm_assembly_tc_rx) = mpsc::sync_channel(10);
let (pcdu_handler_tc_tx, pcdu_handler_tc_rx) = mpsc::sync_channel(30);
let (controller_tc_tx, controller_tc_rx) = mpsc::sync_channel(10);
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let mut request_map = GenericRequestRouter::default();
request_map
.composite_router_map
.insert(MGM0.id(), mgm_0_handler_composite_tx);
request_map
.composite_router_map
.insert(MGM1.id(), mgm_1_handler_composite_tx);
request_map
.composite_router_map
.insert(PCDU.id(), pcdu_handler_composite_tx);
// These message handles need to go into the MGM assembly and ACS subsystem.
let (_mgm_assembly_request_tx, mgm_assembly_request_rx) = mpsc::sync_channel(5);
let (mgm_assembly_report_tx, _mgm_assembly_report_rx) = mpsc::sync_channel(5);
// This helper structure is used by all telecommand providers which need to send telecommands
// to the TC source.
cfg_if::cfg_if! {
if #[cfg(not(feature = "heap_tmtc"))] {
let tc_sender_with_shared_pool =
PacketSenderWithSharedPool::new(tc_source_tx, shared_tc_pool_wrapper.clone());
let tc_in_mem_converter =
EcssTcCacher::Static(EcssTcInSharedPoolCacher::new(shared_tc_pool, 4096));
} else if #[cfg(feature = "heap_tmtc")] {
let tc_in_mem_converter = EcssTcCacher::Heap(EcssTcVecCacher::default());
}
}
// These message handles need to go into the MGM assembly and MGM devices.
let (mgm_0_mode_request_tx, mgm_0_mode_request_rx) = mpsc::sync_channel(5);
let (mgm_1_mode_request_tx, mgm_1_mode_request_rx) = mpsc::sync_channel(5);
let (mgm_0_mode_report_tx, mgm_0_mode_report_rx) = mpsc::sync_channel(5);
let (mgm_1_mode_report_tx, mgm_1_mode_report_rx) = mpsc::sync_channel(5);
// Create event handling components
// These sender handles are used to send event requests, for example to enable or disable
// certain events.
let (event_tx, event_rx) = mpsc::sync_channel(100);
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
let (pcdu_handler_mode_tx, _pcdu_handler_mode_rx) = mpsc::sync_channel(5);
// The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation.
let mut event_handler = EventHandler::new(tm_sink_tx.clone(), event_rx, event_request_rx);
let (pus_test_tx, pus_test_rx) = mpsc::sync_channel(20);
let (pus_event_tx, pus_event_rx) = mpsc::sync_channel(10);
let (pus_sched_tx, pus_sched_rx) = mpsc::sync_channel(50);
let (pus_hk_tx, pus_hk_rx) = mpsc::sync_channel(50);
let (pus_action_tx, pus_action_rx) = mpsc::sync_channel(50);
let (pus_mode_tx, pus_mode_rx) = mpsc::sync_channel(50);
let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::sync_channel(50);
let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::sync_channel(30);
cfg_if::cfg_if! {
if #[cfg(not(feature = "heap_tmtc"))] {
let tc_releaser = TcReleaser::Static(tc_sender_with_shared_pool.clone());
} else if #[cfg(feature = "heap_tmtc")] {
let tc_releaser = TcReleaser::Heap(tc_source_tx.clone());
}
}
let pus_router = PusTcMpscRouter {
test_tc_sender: pus_test_tx,
event_tc_sender: pus_event_tx,
sched_tc_sender: pus_sched_tx,
hk_tc_sender: pus_hk_tx,
action_tc_sender: pus_action_tx,
mode_tc_sender: pus_mode_tx,
let (event_ctrl_tx, event_ctrl_rx) = mpsc::sync_channel(10);
let mut event_manager = EventManager {
ctrl_rx: event_ctrl_rx,
tm_tx: tm_sink_tx.clone(),
};
let pus_test_service = create_test_service(
tm_sender.clone(),
tc_in_mem_converter.clone(),
event_tx.clone(),
pus_test_rx,
);
let pus_scheduler_service = create_scheduler_service(
tm_sender.clone(),
tc_in_mem_converter.clone(),
tc_releaser,
pus_sched_rx,
create_sched_tc_pool(),
);
let pus_event_service = create_event_service(
tm_sender.clone(),
tc_in_mem_converter.clone(),
pus_event_rx,
event_request_tx,
);
let pus_action_service = create_action_service(
tm_sender.clone(),
tc_in_mem_converter.clone(),
pus_action_rx,
request_map.clone(),
pus_action_reply_rx,
);
let pus_hk_service = create_hk_service(
tm_sender.clone(),
tc_in_mem_converter.clone(),
pus_hk_rx,
request_map.clone(),
pus_hk_reply_rx,
);
let pus_mode_service = create_mode_service(
tm_sender.clone(),
tc_in_mem_converter.clone(),
pus_mode_rx,
request_map,
pus_mode_reply_rx,
);
let mut pus_stack = PusStack::new(
pus_test_service,
pus_hk_service,
pus_event_service,
pus_action_service,
pus_scheduler_service,
pus_mode_service,
);
cfg_if::cfg_if! {
if #[cfg(not(feature = "heap_tmtc"))] {
let mut tmtc_task = TcSourceTask::Static(TcSourceTaskStatic::new(
shared_tc_pool_wrapper.clone(),
tc_source_rx,
PusTcDistributor::new(tm_sender.clone(), pus_router),
));
let tc_sender = TmTcSender::Static(tc_sender_with_shared_pool);
let udp_tm_handler = StaticUdpTmHandler {
tm_rx: tm_server_rx,
tm_store: shared_tm_pool.clone(),
};
} else if #[cfg(feature = "heap_tmtc")] {
let mut tmtc_task = TcSourceTask::Heap(TcSourceTaskDynamic::new(
tc_source_rx,
PusTcDistributor::new(tm_sender.clone(), pus_router),
));
let tc_sender = TmTcSender::Heap(tc_source_tx.clone());
let udp_tm_handler = DynamicUdpTmHandler {
tm_rx: tm_server_rx,
};
}
}
let mut controller = Controller::new(controller_tc_rx, tm_sink_tx.clone(), event_ctrl_tx);
let ccsds_distributor = CcsdsDistributor::default();
let mut tc_source = TcSourceTask::new(tc_source_rx, ccsds_distributor);
tc_source.add_target(ComponentId::EpsPcdu, pcdu_handler_tc_tx);
tc_source.add_target(ComponentId::Controller, controller_tc_tx);
tc_source.add_target(ComponentId::AcsMgm0, mgm_0_handler_tc_tx);
tc_source.add_target(ComponentId::AcsMgm1, mgm_1_handler_tc_tx);
tc_source.add_target(ComponentId::AcsMgmAssembly, mgm_assembly_tc_tx);
let tc_sender = TmTcSender::Normal(tc_source_tx.clone());
let udp_tm_handler = UdpTmHandlerWithChannel {
tm_rx: tm_server_rx,
};
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_sender.clone())
.expect("creating UDP TMTC server failed");
let udp_tc_server = UdpTcServer::new(
ComponentId::UdpServer as u32,
sock_addr,
2048,
tc_sender.clone(),
)
.expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
udp_tc_server,
tm_handler: udp_tm_handler,
tm_handler: udp_tm_handler.into(),
};
let tcp_server_cfg = ServerConfig::new(
TCP_SERVER.id(),
ComponentId::TcpServer as u32,
sock_addr,
Duration::from_millis(400),
4096,
@@ -282,31 +145,14 @@ fn main() {
)
.expect("tcp server creation failed");
cfg_if::cfg_if! {
if #[cfg(not(feature = "heap_tmtc"))] {
let mut tm_sink = TmSink::Static(TmSinkStatic::new(
shared_tm_pool_wrapper,
sync_tm_tcp_source,
tm_sink_rx,
tm_server_tx,
));
} else if #[cfg(feature = "heap_tmtc")] {
let mut tm_sink = TmSink::Heap(TmSinkDynamic::new(
sync_tm_tcp_source,
tm_sink_rx,
tm_server_tx,
));
}
}
let mut tm_sink = TmSink::new(sync_tm_tcp_source, tm_sink_rx, tm_server_tx);
let shared_switch_set = Arc::new(Mutex::default());
let shared_switch_set = Arc::new(Mutex::new(SwitchSet::new_with_init_switches_unknown()));
let (switch_request_tx, switch_request_rx) = mpsc::sync_channel(20);
let switch_helper = PowerSwitchHelper::new(switch_request_tx, shared_switch_set.clone());
let shared_mgm_0_set = Arc::default();
let shared_mgm_1_set = Arc::default();
let mgm_0_mode_node = ModeRequestHandlerMpscBounded::new(MGM0.into(), mgm_0_handler_mode_rx);
let mgm_1_mode_node = ModeRequestHandlerMpscBounded::new(MGM1.into(), mgm_1_handler_mode_rx);
let (mgm_0_spi_interface, mgm_1_spi_interface) =
if let Some(sim_client) = opt_sim_client.as_mut() {
sim_client
@@ -314,55 +160,65 @@ fn main() {
sim_client
.add_reply_recipient(satrs_minisim::SimComponent::Mgm1Lis3Mdl, mgm_1_sim_reply_tx);
(
SpiSimInterfaceWrapper::Sim(SpiSimInterface {
mgm::SpiCommunication::Sim(mgm::SpiSimInterface {
sim_request_tx: sim_request_tx.clone(),
sim_reply_rx: mgm_0_sim_reply_rx,
}),
SpiSimInterfaceWrapper::Sim(SpiSimInterface {
mgm::SpiCommunication::Sim(mgm::SpiSimInterface {
sim_request_tx: sim_request_tx.clone(),
sim_reply_rx: mgm_1_sim_reply_rx,
}),
)
} else {
(
SpiSimInterfaceWrapper::Dummy(SpiDummyInterface::default()),
SpiSimInterfaceWrapper::Dummy(SpiDummyInterface::default()),
mgm::SpiCommunication::Dummy(mgm::SpiDummyInterface::default()),
mgm::SpiCommunication::Dummy(mgm::SpiDummyInterface::default()),
)
};
let mut mgm_0_handler = MgmHandlerLis3Mdl::new(
MGM0,
"MGM_0",
mgm_0_mode_node,
mgm_0_handler_composite_rx,
pus_hk_reply_tx.clone(),
let mut mgm_0_handler = mgm::MgmHandlerLis3Mdl::new(
mgm::MgmId::_0,
TmtcQueues {
tc_rx: mgm_0_handler_tc_rx,
tm_tx: tm_sink_tx.clone(),
},
switch_helper.clone(),
tm_sender.clone(),
mgm_0_spi_interface,
shared_mgm_0_set,
mgm::ModeLeafHelper {
request_rx: mgm_0_mode_request_rx,
report_tx: mgm_0_mode_report_tx,
},
Duration::from_millis(1000)
);
let mut mgm_1_handler = MgmHandlerLis3Mdl::new(
MGM1,
"MGM_1",
mgm_1_mode_node,
mgm_1_handler_composite_rx,
pus_hk_reply_tx.clone(),
let mut mgm_1_handler = mgm::MgmHandlerLis3Mdl::new(
mgm::MgmId::_1,
TmtcQueues {
tc_rx: mgm_1_handler_tc_rx,
tm_tx: tm_sink_tx.clone(),
},
switch_helper.clone(),
tm_sender.clone(),
mgm_1_spi_interface,
shared_mgm_1_set,
mgm::ModeLeafHelper {
request_rx: mgm_1_mode_request_rx,
report_tx: mgm_1_mode_report_tx,
},
Duration::from_millis(1000)
);
// Connect PUS service to device handlers.
connect_mode_nodes(
&mut pus_stack.mode_srv,
mgm_0_handler_mode_tx,
&mut mgm_0_handler,
pus_mode_reply_tx.clone(),
);
connect_mode_nodes(
&mut pus_stack.mode_srv,
mgm_1_handler_mode_tx,
&mut mgm_1_handler,
pus_mode_reply_tx.clone(),
let mut mgm_assembly = mgm_assembly::Assembly::new(
mgm_assembly::ParentQueueHelper {
request_rx: mgm_assembly_request_rx,
report_tx: mgm_assembly_report_tx,
},
mgm_assembly::ChildrenQueueHelper {
request_tx_queues: [mgm_0_mode_request_tx, mgm_1_mode_request_tx],
report_rx_queues: [mgm_0_mode_report_rx, mgm_1_mode_report_rx],
},
TmtcQueues {
tc_rx: mgm_assembly_tc_rx,
tm_tx: tm_sink_tx.clone(),
},
Duration::from_millis(2000),
);
let pcdu_serial_interface = if let Some(sim_client) = opt_sim_client.as_mut() {
@@ -374,23 +230,13 @@ fn main() {
} else {
SerialSimInterfaceWrapper::Dummy(SerialInterfaceDummy::default())
};
let pcdu_mode_node = ModeRequestHandlerMpscBounded::new(PCDU.into(), pcdu_handler_mode_rx);
let mut pcdu_handler = PcduHandler::new(
PCDU,
"PCDU",
pcdu_mode_node,
pcdu_handler_composite_rx,
pus_hk_reply_tx,
pcdu_handler_tc_rx,
tm_sink_tx.clone(),
switch_request_rx,
tm_sender.clone(),
pcdu_serial_interface,
shared_switch_set,
);
connect_mode_nodes(
&mut pus_stack.mode_srv,
pcdu_handler_mode_tx.clone(),
&mut pcdu_handler,
pus_mode_reply_tx,
DeviceMode::Normal,
);
// The PCDU is a critical component which should be in normal mode immediately.
@@ -406,12 +252,15 @@ fn main() {
info!("Starting TMTC and UDP task");
let jh_udp_tmtc = thread::Builder::new()
.name("SATRS tmtc-udp".to_string())
.name("TMTC & UDP".to_string())
.spawn(move || {
info!("Running UDP server on port {SERVER_PORT}");
loop {
if KILL_SIGNAL.load(Ordering::Relaxed) {
break;
}
udp_tmtc_server.periodic_operation();
tmtc_task.periodic_operation();
tc_source.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_UDP_TMTC));
}
})
@@ -419,10 +268,13 @@ fn main() {
info!("Starting TCP task");
let jh_tcp = thread::Builder::new()
.name("sat-rs tcp".to_string())
.name("TCP".to_string())
.spawn(move || {
info!("Running TCP server on port {SERVER_PORT}");
loop {
if KILL_SIGNAL.load(Ordering::Relaxed) {
break;
}
tcp_server.periodic_operation();
}
})
@@ -430,9 +282,14 @@ fn main() {
info!("Starting TM funnel task");
let jh_tm_funnel = thread::Builder::new()
.name("tm sink".to_string())
.spawn(move || loop {
tm_sink.operation();
.name("TM SINK".to_string())
.spawn(move || {
loop {
if KILL_SIGNAL.load(Ordering::Relaxed) {
break;
}
tm_sink.operation();
}
})
.unwrap();
@@ -441,10 +298,15 @@ fn main() {
info!("Starting UDP sim client task");
opt_jh_sim_client = Some(
thread::Builder::new()
.name("sat-rs sim adapter".to_string())
.spawn(move || loop {
if sim_client.operation() == HandlingStatus::Empty {
std::thread::sleep(Duration::from_millis(SIM_CLIENT_IDLE_DELAY_MS));
.name("SIM ADAPTER".to_string())
.spawn(move || {
loop {
if KILL_SIGNAL.load(Ordering::Relaxed) {
break;
}
if sim_client.operation() == HandlingStatus::Empty {
std::thread::sleep(Duration::from_millis(SIM_CLIENT_IDLE_DELAY_MS));
}
}
})
.unwrap(),
@@ -453,38 +315,55 @@ fn main() {
info!("Starting AOCS thread");
let jh_aocs = thread::Builder::new()
.name("sat-rs aocs".to_string())
.spawn(move || loop {
mgm_0_handler.periodic_operation();
mgm_1_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
.name("AOCS".to_string())
.spawn(move || {
loop {
if KILL_SIGNAL.load(Ordering::Relaxed) {
break;
}
mgm_0_handler.periodic_operation();
mgm_1_handler.periodic_operation();
mgm_assembly.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
}
})
.unwrap();
info!("Starting EPS thread");
let jh_eps = thread::Builder::new()
.name("sat-rs eps".to_string())
.spawn(move || loop {
// TODO: We should introduce something like a fixed timeslot helper to allow a more
// declarative API. It would also be very useful for the AOCS task.
//
// TODO: The fixed timeslot handler exists.. use it.
pcdu_handler.periodic_operation(crate::eps::pcdu::OpCode::RegularOp);
thread::sleep(Duration::from_millis(50));
pcdu_handler.periodic_operation(crate::eps::pcdu::OpCode::PollAndRecvReplies);
thread::sleep(Duration::from_millis(50));
pcdu_handler.periodic_operation(crate::eps::pcdu::OpCode::PollAndRecvReplies);
thread::sleep(Duration::from_millis(300));
.name("EPS".to_string())
.spawn(move || {
loop {
if KILL_SIGNAL.load(Ordering::Relaxed) {
break;
}
// TODO: We should introduce something like a fixed timeslot helper to allow a more
// declarative API. It would also be very useful for the AOCS task.
//
// TODO: The fixed timeslot handler exists.. use it.
// TODO: Why not just use sync code in the PCDU handler, and fully delay there?
pcdu_handler.periodic_operation(crate::eps::pcdu::OpCode::RegularOp);
thread::sleep(Duration::from_millis(50));
pcdu_handler.periodic_operation(crate::eps::pcdu::OpCode::PollAndRecvReplies);
thread::sleep(Duration::from_millis(50));
pcdu_handler.periodic_operation(crate::eps::pcdu::OpCode::PollAndRecvReplies);
thread::sleep(Duration::from_millis(300));
}
})
.unwrap();
info!("Starting PUS handler thread");
let jh_pus_handler = thread::Builder::new()
.name("sat-rs pus".to_string())
.spawn(move || loop {
event_handler.periodic_operation();
pus_stack.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));
info!("Starting controller thread");
let jh_controller_thread = thread::Builder::new()
.name("CTRL".to_string())
.spawn(move || {
loop {
if KILL_SIGNAL.load(Ordering::Relaxed) {
break;
}
controller.periodic_operation();
event_manager.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_CONTROLLER));
}
})
.unwrap();
@@ -504,7 +383,7 @@ fn main() {
}
jh_aocs.join().expect("Joining AOCS thread failed");
jh_eps.join().expect("Joining EPS thread failed");
jh_pus_handler
jh_controller_thread
.join()
.expect("Joining PUS handler thread failed");
}
-6
View File
@@ -1,6 +0,0 @@
use core::fmt::Debug;
pub trait SpiInterface {
type Error: Debug;
fn transfer(&mut self, tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error>;
}
+5 -32
View File
@@ -1,11 +1,9 @@
use std::{cell::RefCell, collections::VecDeque, sync::mpsc};
use satrs::{
pus::EcssTmSender,
queue::GenericSendError,
spacepackets::ecss::WritablePusPacket,
tmtc::{PacketAsVec, PacketHandler, PacketSenderWithSharedPool},
ComponentId,
queue::GenericSendError,
tmtc::{PacketAsVec, PacketHandler},
};
#[derive(Default, Debug, Clone)]
@@ -14,8 +12,7 @@ pub struct MockSender(pub RefCell<VecDeque<PacketAsVec>>);
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub enum TmTcSender {
Static(PacketSenderWithSharedPool),
Heap(mpsc::SyncSender<PacketAsVec>),
Normal(mpsc::SyncSender<PacketAsVec>),
Mock(MockSender),
}
@@ -29,37 +26,13 @@ impl TmTcSender {
}
}
impl EcssTmSender for TmTcSender {
fn send_tm(
&self,
sender_id: satrs::ComponentId,
tm: satrs::pus::PusTmVariant,
) -> Result<(), satrs::pus::EcssTmtcError> {
match self {
TmTcSender::Static(sync_sender) => sync_sender.send_tm(sender_id, tm),
TmTcSender::Heap(sync_sender) => match tm {
satrs::pus::PusTmVariant::InStore(_) => panic!("can not send TM in store"),
satrs::pus::PusTmVariant::Direct(pus_tm_creator) => sync_sender
.send(PacketAsVec::new(sender_id, pus_tm_creator.to_vec()?))
.map_err(|_| GenericSendError::RxDisconnected.into()),
},
TmTcSender::Mock(_) => Ok(()),
}
}
}
impl PacketHandler for TmTcSender {
type Error = GenericSendError;
fn handle_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
match self {
TmTcSender::Static(packet_sender_with_shared_pool) => {
if let Err(e) = packet_sender_with_shared_pool.handle_packet(sender_id, packet) {
log::error!("Error sending packet via Static TM/TC sender: {:?}", e);
}
}
TmTcSender::Heap(sync_sender) => {
if let Err(e) = sync_sender.handle_packet(sender_id, packet) {
TmTcSender::Normal(sync_sender) => {
if let Err(e) = sync_sender.send(PacketAsVec::new(sender_id, packet.to_vec())) {
log::error!("Error sending packet via Heap TM/TC sender: {:?}", e);
}
}
+83 -107
View File
@@ -1,121 +1,97 @@
use models::{ComponentId, TcHeader, ccsds::CcsdsTcPacketOwned};
use satrs::{
pool::PoolProvider,
pus::HandlingStatus,
tmtc::{PacketAsVec, PacketInPool, SharedPacketPool},
spacepackets::{CcsdsPacketReader, ChecksumType},
tmtc::PacketAsVec,
};
use std::{
collections::HashMap,
sync::mpsc::{self, TryRecvError},
};
use std::sync::mpsc::{self, TryRecvError};
use crate::pus::PusTcDistributor;
// TC source components where static pools are the backing memory of the received telecommands.
pub struct TcSourceTaskStatic {
shared_tc_pool: SharedPacketPool,
tc_receiver: mpsc::Receiver<PacketInPool>,
/// We allocate this buffer from the heap to avoid a clippy warning on large enum variant
/// differences.
tc_buf: Box<[u8; 4096]>,
pus_distributor: PusTcDistributor,
}
#[allow(dead_code)]
impl TcSourceTaskStatic {
pub fn new(
shared_tc_pool: SharedPacketPool,
tc_receiver: mpsc::Receiver<PacketInPool>,
pus_receiver: PusTcDistributor,
) -> Self {
Self {
shared_tc_pool,
tc_receiver,
tc_buf: Box::new([0; 4096]),
pus_distributor: pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> HandlingStatus {
// Right now, we only expect ECSS PUS packets.
// If packets like CFDP are expected, we might have to check the APID first.
match self.tc_receiver.try_recv() {
Ok(packet_in_pool) => {
let pool = self
.shared_tc_pool
.0
.read()
.expect("locking tc pool failed");
pool.read(&packet_in_pool.store_addr, self.tc_buf.as_mut_slice())
.expect("reading pool failed");
drop(pool);
self.pus_distributor
.handle_tc_packet_in_store(packet_in_pool, self.tc_buf.as_slice())
.ok();
HandlingStatus::HandledOne
}
Err(e) => match e {
TryRecvError::Empty => HandlingStatus::Empty,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
HandlingStatus::Empty
}
},
}
}
}
pub type CcsdsDistributor = HashMap<ComponentId, std::sync::mpsc::SyncSender<CcsdsTcPacketOwned>>;
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub struct TcSourceTask {
pub tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_distributor: PusTcDistributor,
}
#[allow(dead_code)]
impl TcSourceTaskDynamic {
pub fn new(tc_receiver: mpsc::Receiver<PacketAsVec>, pus_receiver: PusTcDistributor) -> Self {
Self {
tc_receiver,
pus_distributor: pus_receiver,
}
}
pub fn periodic_operation(&mut self) {
self.poll_tc();
}
pub fn poll_tc(&mut self) -> HandlingStatus {
// Right now, we only expect ECSS PUS packets.
// If packets like CFDP are expected, we might have to check the APID first.
match self.tc_receiver.try_recv() {
Ok(packet_as_vec) => {
self.pus_distributor
.handle_tc_packet_vec(packet_as_vec)
.ok();
HandlingStatus::HandledOne
}
Err(e) => match e {
TryRecvError::Empty => HandlingStatus::Empty,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
HandlingStatus::Empty
}
},
}
}
}
#[allow(dead_code)]
pub enum TcSourceTask {
Static(TcSourceTaskStatic),
Heap(TcSourceTaskDynamic),
ccsds_distributor: CcsdsDistributor,
}
impl TcSourceTask {
pub fn new(
tc_receiver: mpsc::Receiver<PacketAsVec>,
ccsds_distributor: CcsdsDistributor,
) -> Self {
Self {
tc_receiver,
ccsds_distributor,
}
}
pub fn add_target(
&mut self,
target_id: ComponentId,
sender: mpsc::SyncSender<CcsdsTcPacketOwned>,
) {
self.ccsds_distributor.insert(target_id, sender);
}
pub fn periodic_operation(&mut self) {
match self {
TcSourceTask::Static(task) => task.periodic_operation(),
TcSourceTask::Heap(task) => task.periodic_operation(),
loop {
if self.poll_tc() == HandlingStatus::Empty {
break;
}
}
}
pub fn poll_tc(&mut self) -> HandlingStatus {
match self.tc_receiver.try_recv() {
Ok(packet) => {
log::debug!("received raw packet: {:?}", packet);
let ccsds_tc_reader_result =
CcsdsPacketReader::new(&packet.packet, Some(ChecksumType::WithCrc16));
if ccsds_tc_reader_result.is_err() {
log::warn!(
"received invalid CCSDS TC packet: {:?}",
ccsds_tc_reader_result.err()
);
// TODO: Send a dedicated TM packet.
return HandlingStatus::HandledOne;
}
let ccsds_tc_reader = ccsds_tc_reader_result.unwrap();
let tc_header_result =
postcard::take_from_bytes::<TcHeader>(ccsds_tc_reader.user_data());
if tc_header_result.is_err() {
log::warn!(
"received CCSDS TC packet with invalid TC header: {:?}",
tc_header_result.err()
);
// TODO: Send a dedicated TM packet.
return HandlingStatus::HandledOne;
}
let (tc_header, payload) = tc_header_result.unwrap();
if let Some(sender) = self.ccsds_distributor.get(&tc_header.target_id) {
log::debug!("sending TC packet to target ID: {:?}", tc_header.target_id);
sender
.send(CcsdsTcPacketOwned {
sp_header: *ccsds_tc_reader.sp_header(),
tc_header,
payload: payload.to_vec(),
})
.ok();
} else {
log::warn!("no TC handler for target ID {:?}", tc_header.target_id);
// TODO: Send a dedicated TM packet.
}
HandlingStatus::HandledOne
}
Err(e) => match e {
TryRecvError::Empty => HandlingStatus::Empty,
TryRecvError::Disconnected => {
log::warn!("tmtc thread: sender disconnected");
HandlingStatus::Empty
}
},
}
}
}
+15 -140
View File
@@ -4,18 +4,8 @@ use std::{
};
use arbitrary_int::{u11, u14};
use log::info;
use satrs::{
pool::PoolProvider,
spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
seq_count::SequenceCounter,
seq_count::SequenceCounterCcsdsSimple,
time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket,
},
tmtc::{PacketAsVec, PacketInPool, SharedPacketPool},
};
use models::ccsds::CcsdsTmPacketOwned;
use satrs::spacepackets::seq_count::{SequenceCounter, SequenceCounterCcsdsSimple};
use crate::interface::tcp::SyncTcpTmSource;
@@ -34,150 +24,35 @@ impl CcsdsSeqCounterMap {
}
}
pub struct TmFunnelCommon {
pub struct TmSink {
seq_counter_map: CcsdsSeqCounterMap,
msg_counter_map: HashMap<u8, u16>,
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<CcsdsTmPacketOwned>,
tm_server_tx: mpsc::SyncSender<CcsdsTmPacketOwned>,
}
impl TmFunnelCommon {
pub fn new(sync_tm_tcp_source: SyncTcpTmSource) -> Self {
impl TmSink {
pub fn new(
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<CcsdsTmPacketOwned>,
tm_server_tx: mpsc::SyncSender<CcsdsTmPacketOwned>,
) -> Self {
Self {
seq_counter_map: Default::default(),
msg_counter_map: Default::default(),
sync_tm_tcp_source,
}
}
// Applies common packet processing operations for PUS TM packets. This includes setting
// a sequence counter
fn apply_packet_processing(&mut self, mut zero_copy_writer: PusTmZeroCopyWriter) {
// zero_copy_writer.set_apid(PUS_APID);
zero_copy_writer.set_seq_count(
self.seq_counter_map
.get_and_increment(zero_copy_writer.apid()),
);
let entry = self
.msg_counter_map
.entry(zero_copy_writer.service_type_id())
.or_insert(0);
zero_copy_writer.set_msg_count(*entry);
if *entry == u16::MAX {
*entry = 0;
} else {
*entry += 1;
}
Self::packet_printout(&zero_copy_writer);
// This operation has to come last!
zero_copy_writer.finish();
}
fn packet_printout(tm: &PusTmZeroCopyWriter) {
info!(
"Sending PUS TM[{},{}] with APID {}",
tm.service_type_id(),
tm.message_subtype_id(),
tm.apid()
);
}
}
pub struct TmSinkStatic {
common: TmFunnelCommon,
shared_tm_store: SharedPacketPool,
tm_funnel_rx: mpsc::Receiver<PacketInPool>,
tm_server_tx: mpsc::SyncSender<PacketInPool>,
}
#[allow(dead_code)]
impl TmSinkStatic {
pub fn new(
shared_tm_store: SharedPacketPool,
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PacketInPool>,
tm_server_tx: mpsc::SyncSender<PacketInPool>,
) -> Self {
Self {
common: TmFunnelCommon::new(sync_tm_tcp_source),
shared_tm_store,
tm_funnel_rx,
tm_server_tx,
}
}
pub fn operation(&mut self) {
if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update
// the CRC.
let shared_pool = self.shared_tm_store.0.clone();
let mut pool_guard = shared_pool.write().expect("Locking TM pool failed");
let mut tm_copy = Vec::new();
pool_guard
.modify(&pus_tm_in_pool.store_addr, |buf| {
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN, true)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
tm_copy = buf.to_vec()
})
.expect("Reading TM from pool failed");
self.tm_server_tx
.send(pus_tm_in_pool)
.expect("Sending TM to server failed");
// We could also do this step in the update closure, but I'd rather avoid this, could
// lead to nested locking.
self.common.sync_tm_tcp_source.add_tm(&tm_copy);
}
}
}
pub struct TmSinkDynamic {
common: TmFunnelCommon,
tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_server_tx: mpsc::SyncSender<PacketAsVec>,
}
#[allow(dead_code)]
impl TmSinkDynamic {
pub fn new(
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
tm_server_tx: mpsc::SyncSender<PacketAsVec>,
) -> Self {
Self {
common: TmFunnelCommon::new(sync_tm_tcp_source),
tm_funnel_rx,
tm_server_tx,
}
}
pub fn operation(&mut self) {
if let Ok(mut tm) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update
// the CRC.
let zero_copy_writer =
PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN, true)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
if let Ok(mut tm) = self.tm_funnel_rx.try_recv() {
tm.sp_header
.set_seq_count(self.seq_counter_map.get_and_increment(tm.sp_header.apid()));
self.sync_tm_tcp_source.add_tm(&tm.to_vec());
self.tm_server_tx
.send(tm)
.expect("Sending TM to server failed");
}
}
}
#[allow(dead_code)]
pub enum TmSink {
Static(TmSinkStatic),
Heap(TmSinkDynamic),
}
impl TmSink {
pub fn operation(&mut self) {
match self {
TmSink::Static(static_sink) => static_sink.operation(),
TmSink::Heap(dynamic_sink) => dynamic_sink.operation(),
}
}
}
+2 -1
View File
@@ -11,7 +11,8 @@ license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spacepackets = { version = "0.17", default-features = false }
# spacepackets = { version = "0.17", default-features = false }
spacepackets = { version = "0.17", git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git", default-features = false }
serde = { version = "1", default-features = false, optional = true }
defmt = {version = "1", optional = true }
+1 -6
View File
@@ -14,7 +14,7 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
[dependencies]
satrs-shared = { version = "0.2", path = "../satrs-shared" }
spacepackets = { version = "0.17", default-features = false }
spacepackets = { version = "0.17", git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git", default-features = false }
delegate = "0.13"
paste = "1"
@@ -78,8 +78,3 @@ test_util = []
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--generate-link-to-definition"]
[[test]]
name = "event_test"
path = "tests/pus_events.rs"
required-features = ["test_util"]
+1
View File
@@ -0,0 +1 @@
pub mod scheduler;
+926
View File
@@ -0,0 +1,926 @@
//! # CCSDS Telecommand Scheduler.
#![deny(missing_docs)]
use core::{hash::Hash, time::Duration};
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
use spacepackets::{
CcsdsPacketIdAndPsc,
time::{TimestampError, UnixTime},
};
/// Generic CCSDS scheduling errors.
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum ScheduleError {
/// The release time is within the time-margin added on top of the current time.
/// The first parameter is the current time, the second one the time margin, and the third one
/// the release time.
#[error("release time in margin")]
ReleaseTimeInTimeMargin {
/// Current time.
current_time: UnixTime,
/// Configured time margin.
time_margin: Duration,
/// Release time.
release_time: UnixTime,
},
/// Nested time-tagged commands are not allowed.
#[error("nested scheduled tc")]
NestedScheduledTc,
/// TC data is empty.
#[error("tc data empty")]
TcDataEmpty,
/// Scheduler is full, packet number limit reached.
#[error("scheduler is full, packet number limit reached")]
PacketLimitReached,
/// Scheduler is full, numver of bytes limit reached.
#[error("scheduler is full, number of bytes limit reached")]
ByteLimitReached,
/// Timestamp error.
#[error("timestamp error: {0}")]
TimestampError(#[from] TimestampError),
}
/// Packet ID used for identifying scheduled packets.
///
/// Right now, this ID can be determined from the packet without requiring external input
/// or custom data fields in the CCSDS space pacekt.
#[derive(Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct CcsdsSchedulePacketId {
/// Base ID.
pub base: CcsdsPacketIdAndPsc,
/// Optional checksum of the packet.
pub crc16: Option<u16>,
}
impl CcsdsSchedulePacketId {
/// Create a new CCSDS scheduling packet ID.
pub const fn new(base: CcsdsPacketIdAndPsc, checksum: Option<u16>) -> Self {
Self {
base,
crc16: checksum,
}
}
}
impl Hash for CcsdsSchedulePacketId {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
self.base.hash(state);
self.crc16.hash(state);
}
}
/// Modules requiring [alloc] support.
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use core::time::Duration;
#[cfg(feature = "std")]
use std::time::SystemTimeError;
use alloc::collections::btree_map;
use spacepackets::{CcsdsPacketIdAndPsc, CcsdsPacketReader, time::UnixTime};
use crate::ccsds::scheduler::CcsdsSchedulePacketId;
/// The scheduler can be configured to have bounds for both the number of packets
/// and the total number of bytes used by scheduled packets.
///
/// This can be used to avoid memory exhaustion in systems with limited resources or under
/// heavy workloads.
#[derive(Default, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct Limits {
/// Maximum number of scheduled packets.
pub packets: Option<usize>,
/// Maximum total number of bytes used by scheduled packets.
pub bytes: Option<usize>,
}
impl Limits {
/// Create new limits for the CCSDS scheduler.
pub const fn new(packets: Option<usize>, bytes: Option<usize>) -> Self {
Self { packets, bytes }
}
/// Check if no limits are set.
pub fn has_no_limits(&self) -> bool {
self.packets.is_none() && self.bytes.is_none()
}
}
/// Fill count of the scheduler.
#[derive(Default, Debug)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct FillCount {
/// Number of scheduled packets.
pub packets: usize,
/// Total number of bytes used by scheduled packets.
pub bytes: usize,
}
/// Simple CCSDS scheduler implementation.
///
/// Relies of [alloc] support but limits the number of scheduled packets.
#[derive(Debug)]
pub struct CcsdsScheduler {
tc_map: alloc::collections::BTreeMap<
UnixTime,
alloc::vec::Vec<(CcsdsSchedulePacketId, alloc::vec::Vec<u8>)>,
>,
limits: Limits,
pub(crate) current_time: UnixTime,
time_margin: Duration,
}
impl CcsdsScheduler {
/// Create a new CCSDS scheduler.
pub fn new(current_time: UnixTime, limits: Limits, time_margin: Duration) -> Self {
Self {
tc_map: alloc::collections::BTreeMap::new(),
limits,
current_time,
time_margin,
}
}
/// Like [Self::new], but sets the `init_current_time` parameter to the current system time.
#[cfg(feature = "std")]
pub fn new_with_current_init_time(
limits: Limits,
time_margin: Duration,
) -> Result<Self, SystemTimeError> {
Ok(Self::new(UnixTime::now()?, limits, time_margin))
}
/// Current fill count: number of scheduled packets and total number of bytes.
///
/// The first returned value is the number of scheduled packets, the second one is the
/// byte count.
pub fn current_fill_count(&self) -> FillCount {
let mut fill_count = FillCount::default();
for value in self.tc_map.values() {
for (_, raw_scheduled_tc) in value {
fill_count.packets += 1;
fill_count.bytes += raw_scheduled_tc.len();
}
}
fill_count
}
/// Current number of scheduled entries.
pub fn num_of_entries(&self) -> usize {
self.current_fill_count().packets
}
/// Update the current time.
#[inline]
pub fn update_time(&mut self, current_time: UnixTime) {
self.current_time = current_time;
}
/// Current time.
#[inline]
pub fn current_time(&self) -> &UnixTime {
&self.current_time
}
fn common_check(
&mut self,
release_time: UnixTime,
packet_size: usize,
) -> Result<(), super::ScheduleError> {
if !self.limits.has_no_limits() {
let fill_count = self.current_fill_count();
if let Some(max_bytes) = self.limits.bytes {
if fill_count.bytes + packet_size > max_bytes {
return Err(super::ScheduleError::ByteLimitReached);
}
}
if let Some(max_packets) = self.limits.packets {
if fill_count.packets + 1 > max_packets {
return Err(super::ScheduleError::PacketLimitReached);
}
}
}
if release_time < self.current_time + self.time_margin {
return Err(super::ScheduleError::ReleaseTimeInTimeMargin {
current_time: self.current_time,
time_margin: self.time_margin,
release_time,
});
}
Ok(())
}
/// Insert a telecommand using an existing [CcsdsPacketReader].
pub fn insert_telecommand_with_reader(
&mut self,
reader: &CcsdsPacketReader,
release_time: UnixTime,
) -> Result<(), super::ScheduleError> {
self.common_check(release_time, reader.packet_len())?;
let base_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(reader);
let checksum = reader.checksum();
let packet_id_scheduling = CcsdsSchedulePacketId {
base: base_id,
crc16: checksum,
};
self.insert_telecommand(packet_id_scheduling, reader.raw_data(), release_time)?;
Ok(())
}
/// Insert a raw telecommand, assuming the user has already extracted the
/// [CcsdsSchedulePacketId]
pub fn insert_telecommand(
&mut self,
packet_id_scheduling: CcsdsSchedulePacketId,
raw_packet: &[u8],
release_time: UnixTime,
) -> Result<(), super::ScheduleError> {
self.common_check(release_time, raw_packet.len())?;
match self.tc_map.entry(release_time) {
btree_map::Entry::Vacant(e) => {
e.insert(alloc::vec![(packet_id_scheduling, raw_packet.to_vec())]);
}
btree_map::Entry::Occupied(mut v) => {
v.get_mut()
.push((packet_id_scheduling, raw_packet.to_vec()));
}
}
Ok(())
}
/// Release all telecommands which should be released based on the current time.
pub fn release_telecommands<R: FnMut(&CcsdsSchedulePacketId, &[u8])>(
&mut self,
mut releaser: R,
) {
let tcs_to_release = self.telecommands_to_release();
for tc_group in tcs_to_release {
for (packet_id, raw_tc) in tc_group.1 {
releaser(packet_id, raw_tc);
}
}
self.tc_map.retain(|k, _| k > &self.current_time);
}
/// Retrieve all telecommands which should be released based on the current time.
pub fn telecommands_to_release(
&self,
) -> btree_map::Range<
'_,
UnixTime,
alloc::vec::Vec<(CcsdsSchedulePacketId, alloc::vec::Vec<u8>)>,
> {
self.tc_map.range(..=self.current_time)
}
/// Delete scheduled telecommand by their packet ID.
///
/// Returns whether any telecommand was deleted. This function might have to be called
/// multiple times if multiple identical CCSDS packet IDs are possible.
pub fn delete_by_id(&mut self, packet_id: &CcsdsSchedulePacketId) -> bool {
let mut was_removed = false;
self.tc_map.retain(|_, v| {
let len_before = v.len();
v.retain(|(stored_id, _)| stored_id != packet_id);
let has_remaining = !v.is_empty();
if v.len() < len_before {
was_removed = true;
}
has_remaining
});
was_removed
}
/// Delete all telecommands scheduled in a time window.
///
/// The range includes the start time but excludes the end time. Returns whether any
/// telecommands were deleted.
pub fn delete_time_window(&mut self, start_time: UnixTime, end_time: UnixTime) -> bool {
let len_before = self.tc_map.len();
self.tc_map.retain(|k, _| k < &start_time || k >= &end_time);
self.tc_map.len() < len_before
}
/// Delete all scheduled telecommands scheduled after or at a given time.
///
/// Returns whether any telecommands were deleted.
pub fn delete_starting_at(&mut self, start_time: UnixTime) -> bool {
let len_before = self.tc_map.len();
self.tc_map.retain(|k, _| k < &start_time);
self.tc_map.len() < len_before
}
/// Delete all scheduled telecommands scheduled before but not equal to a given time.
///
/// Returns whether any telecommands were deleted.
pub fn delete_before(&mut self, end_time: UnixTime) -> bool {
let len_before = self.tc_map.len();
self.tc_map.retain(|k, _| k >= &end_time);
self.tc_map.len() < len_before
}
/// Completely clear the scheduler.
pub fn clear(&mut self) {
self.tc_map.clear();
}
}
}
#[cfg(test)]
mod tests {
use arbitrary_int::{traits::Integer, u11, u14};
use spacepackets::{
CcsdsPacketCreatorOwned, CcsdsPacketReader, ChecksumType, SpacePacketHeader,
};
use super::*;
fn test_tc(app_data: &[u8], seq_count: u14) -> CcsdsPacketCreatorOwned {
CcsdsPacketCreatorOwned::new(
SpacePacketHeader::new_for_tc(
u11::new(0x1),
spacepackets::SequenceFlags::Unsegmented,
seq_count,
0,
),
spacepackets::PacketType::Tc,
app_data,
Some(ChecksumType::WithCrc16),
)
.unwrap()
}
#[test]
fn test_basic() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(5000),
);
assert_eq!(scheduler.current_fill_count().packets, 0);
assert_eq!(scheduler.current_fill_count().bytes, 0);
assert_eq!(scheduler.num_of_entries(), 0);
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
assert_eq!(scheduler.current_time(), &unix_time);
scheduler.release_telecommands(|_, _| {
panic!("should not be called");
});
}
#[test]
fn test_mutable_closure() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(5000),
);
let mut some_flag = false;
// We should be able to manipulate the boolean inside the closure.
scheduler.release_telecommands(|_, _| {
some_flag = true;
});
}
#[test]
fn test_clear() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc = test_tc(&[1, 2, 3], u14::ZERO);
let test_tc_raw = test_tc.to_vec();
let reader = CcsdsPacketReader::new(&test_tc_raw, Some(ChecksumType::WithCrc16)).unwrap();
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
assert_eq!(scheduler.current_fill_count().packets, 1);
assert_eq!(scheduler.current_fill_count().bytes, test_tc_raw.len());
assert_eq!(scheduler.num_of_entries(), 1);
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
scheduler.clear();
assert_eq!(scheduler.current_fill_count().packets, 0);
assert_eq!(scheduler.current_fill_count().bytes, 0);
assert_eq!(scheduler.num_of_entries(), 0);
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
}
#[test]
fn insert_and_release_one() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc_0 = test_tc(&[1, 2, 3], u14::ZERO);
let tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&test_tc_0);
let test_tc_raw = test_tc_0.to_vec();
let reader = CcsdsPacketReader::new(&test_tc_raw, Some(ChecksumType::WithCrc16)).unwrap();
let checksum = reader.checksum();
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
assert_eq!(scheduler.current_fill_count().packets, 1);
assert_eq!(scheduler.current_fill_count().bytes, test_tc_raw.len());
assert_eq!(scheduler.num_of_entries(), 1);
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
scheduler.release_telecommands(|_, _| {
panic!("should not be called");
});
scheduler.update_time(UnixTime::new(3, 0));
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
1
);
scheduler.release_telecommands(|tc_id_scheduled, tc_raw| {
assert_eq!(tc_id, tc_id_scheduled.base);
assert_eq!(checksum, tc_id_scheduled.crc16);
assert_eq!(tc_raw, test_tc_raw);
});
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
}
#[test]
fn insert_and_release_multi_0() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc_0 = test_tc(&[42], u14::ZERO);
let test_tc_1 = test_tc(&[1, 2, 3], u14::new(1));
let tc_id_0 = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&test_tc_0);
let tc_id_1 = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&test_tc_1);
let test_tc_0_raw = test_tc_0.to_vec();
let test_tc_1_raw = test_tc_1.to_vec();
let reader_0 =
CcsdsPacketReader::new(&test_tc_0_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_1 =
CcsdsPacketReader::new(&test_tc_1_raw, Some(ChecksumType::WithCrc16)).unwrap();
scheduler
.insert_telecommand_with_reader(&reader_0, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand(
CcsdsSchedulePacketId::new(tc_id_1, reader_1.checksum()),
&test_tc_1_raw,
UnixTime::new(5, 0),
)
.unwrap();
assert_eq!(scheduler.current_fill_count().packets, 2);
assert_eq!(
scheduler.current_fill_count().bytes,
test_tc_0_raw.len() + test_tc_1_raw.len()
);
assert_eq!(scheduler.num_of_entries(), 2);
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
scheduler.release_telecommands(|_, _| {
panic!("should not be called");
});
// Release first TC.
scheduler.update_time(UnixTime::new(3, 0));
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
1
);
scheduler.release_telecommands(|tc_id_scheduled, tc_raw| {
assert_eq!(tc_id_0, tc_id_scheduled.base);
assert_eq!(reader_0.checksum(), tc_id_scheduled.crc16);
assert_eq!(tc_raw, test_tc_0_raw);
});
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
assert_eq!(scheduler.current_fill_count().packets, 1);
assert_eq!(scheduler.current_fill_count().bytes, test_tc_1_raw.len());
assert_eq!(scheduler.num_of_entries(), 1);
// Release second TC.
scheduler.update_time(UnixTime::new(6, 0));
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
1
);
scheduler.release_telecommands(|tc_id_scheduled, tc_raw| {
assert_eq!(tc_id_1, tc_id_scheduled.base);
assert_eq!(reader_1.checksum(), tc_id_scheduled.crc16);
assert_eq!(tc_raw, test_tc_1_raw);
});
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
assert_eq!(scheduler.current_fill_count().packets, 0);
assert_eq!(scheduler.current_fill_count().bytes, 0);
assert_eq!(scheduler.num_of_entries(), 0);
}
#[test]
fn insert_and_release_multi_1() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc_0 = test_tc(&[42], u14::ZERO);
let test_tc_1 = test_tc(&[1, 2, 3], u14::new(1));
let tc_id_0 = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&test_tc_0);
let tc_id_1 = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&test_tc_1);
let test_tc_0_raw = test_tc_0.to_vec();
let test_tc_1_raw = test_tc_1.to_vec();
let reader_0 =
CcsdsPacketReader::new(&test_tc_0_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_1 =
CcsdsPacketReader::new(&test_tc_1_raw, Some(ChecksumType::WithCrc16)).unwrap();
scheduler
.insert_telecommand_with_reader(&reader_0, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand(
CcsdsSchedulePacketId::new(tc_id_1, reader_1.checksum()),
&test_tc_1_raw,
UnixTime::new(5, 0),
)
.unwrap();
assert_eq!(scheduler.current_fill_count().packets, 2);
assert_eq!(
scheduler.current_fill_count().bytes,
test_tc_0_raw.len() + test_tc_1_raw.len()
);
assert_eq!(scheduler.num_of_entries(), 2);
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
scheduler.release_telecommands(|_, _| {
panic!("should not be called");
});
// Release first TC.
scheduler.update_time(UnixTime::new(6, 0));
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
2
);
let mut index = 0;
scheduler.release_telecommands(|tc_id_scheduled, tc_raw| {
if index == 0 {
assert_eq!(tc_id_0, tc_id_scheduled.base);
assert_eq!(reader_0.checksum(), tc_id_scheduled.crc16);
assert_eq!(tc_raw, test_tc_0_raw);
} else {
assert_eq!(tc_id_1, tc_id_scheduled.base);
assert_eq!(reader_1.checksum(), tc_id_scheduled.crc16);
assert_eq!(tc_raw, test_tc_1_raw);
}
index += 1;
});
assert_eq!(
scheduler
.telecommands_to_release()
.collect::<alloc::vec::Vec<_>>()
.len(),
0
);
assert_eq!(scheduler.current_fill_count().packets, 0);
assert_eq!(scheduler.current_fill_count().bytes, 0);
assert_eq!(scheduler.num_of_entries(), 0);
}
#[test]
fn test_packet_limit_reached() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(3), None),
Duration::from_millis(1000),
);
let test_tc_0 = test_tc(&[42], u14::ZERO);
let test_tc_0_raw = test_tc_0.to_vec();
let reader = CcsdsPacketReader::new(&test_tc_0_raw, Some(ChecksumType::WithCrc16)).unwrap();
let tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&test_tc_0);
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
assert_eq!(scheduler.current_fill_count().packets, 3);
assert_eq!(
scheduler.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0)),
Err(ScheduleError::PacketLimitReached)
);
assert_eq!(
scheduler.insert_telecommand(
CcsdsSchedulePacketId::new(tc_id, reader.checksum()),
&test_tc_0_raw,
UnixTime::new(2, 0)
),
Err(ScheduleError::PacketLimitReached)
);
}
#[test]
fn test_byte_limit_reached() {
let unix_time = UnixTime::new(0, 0);
let test_tc_0 = test_tc(&[42], u14::ZERO);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(None, Some(test_tc_0.len_written() * 3)),
Duration::from_millis(1000),
);
let test_tc_0_raw = test_tc_0.to_vec();
let reader = CcsdsPacketReader::new(&test_tc_0_raw, Some(ChecksumType::WithCrc16)).unwrap();
let tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&test_tc_0);
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
assert_eq!(scheduler.current_fill_count().packets, 3);
assert_eq!(
scheduler.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0)),
Err(ScheduleError::ByteLimitReached)
);
assert_eq!(
scheduler.insert_telecommand(
CcsdsSchedulePacketId::new(tc_id, reader.checksum()),
&test_tc_0_raw,
UnixTime::new(2, 0)
),
Err(ScheduleError::ByteLimitReached)
);
}
#[test]
fn test_deletion_by_id() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc = test_tc(&[1, 2, 3], u14::ZERO);
let tc_id = CcsdsPacketIdAndPsc::new_from_ccsds_packet(&test_tc);
let test_tc_raw = test_tc.to_vec();
let reader = CcsdsPacketReader::new(&test_tc_raw, Some(ChecksumType::WithCrc16)).unwrap();
let checksum = reader.checksum();
let id = CcsdsSchedulePacketId::new(tc_id, checksum);
scheduler
.insert_telecommand_with_reader(&reader, UnixTime::new(2, 0))
.unwrap();
scheduler.delete_by_id(&id);
assert_eq!(scheduler.current_fill_count().packets, 0);
assert_eq!(scheduler.current_fill_count().bytes, 0);
}
#[test]
fn test_deletion_by_window_0() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc_0 = test_tc(&[42], u14::ZERO);
let test_tc_1 = test_tc(&[1, 2, 3], u14::new(1));
let test_tc_2 = test_tc(&[1, 2, 3], u14::new(2));
let test_tc_0_raw = test_tc_0.to_vec();
let test_tc_1_raw = test_tc_1.to_vec();
let test_tc_2_raw = test_tc_2.to_vec();
let reader_0 =
CcsdsPacketReader::new(&test_tc_0_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_1 =
CcsdsPacketReader::new(&test_tc_1_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_2 =
CcsdsPacketReader::new(&test_tc_2_raw, Some(ChecksumType::WithCrc16)).unwrap();
scheduler
.insert_telecommand_with_reader(&reader_0, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader_1, UnixTime::new(5, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader_2, UnixTime::new(7, 0))
.unwrap();
let deleted = scheduler.delete_time_window(UnixTime::new(3, 0), UnixTime::new(6, 0));
assert!(deleted);
assert_eq!(scheduler.current_fill_count().packets, 2);
assert_eq!(
scheduler.current_fill_count().bytes,
test_tc_0_raw.len() + test_tc_2_raw.len()
);
scheduler.update_time(UnixTime::new(10, 0));
let mut index = 0;
scheduler.release_telecommands(|_id, packet| {
if index == 0 {
assert_eq!(packet, test_tc_0_raw);
} else {
assert_eq!(packet, test_tc_2_raw);
}
index += 1;
});
assert_eq!(scheduler.current_fill_count().packets, 0);
assert_eq!(scheduler.current_fill_count().bytes, 0);
}
#[test]
fn test_deletion_by_window_1() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc_0 = test_tc(&[42], u14::ZERO);
let test_tc_1 = test_tc(&[1, 2, 3], u14::new(1));
let test_tc_2 = test_tc(&[1, 2, 3], u14::new(2));
let test_tc_0_raw = test_tc_0.to_vec();
let test_tc_1_raw = test_tc_1.to_vec();
let test_tc_2_raw = test_tc_2.to_vec();
let reader_0 =
CcsdsPacketReader::new(&test_tc_0_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_1 =
CcsdsPacketReader::new(&test_tc_1_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_2 =
CcsdsPacketReader::new(&test_tc_2_raw, Some(ChecksumType::WithCrc16)).unwrap();
scheduler
.insert_telecommand_with_reader(&reader_0, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader_1, UnixTime::new(5, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader_2, UnixTime::new(7, 0))
.unwrap();
// This only deletes the first 2 TCs.
let deleted = scheduler.delete_time_window(UnixTime::new(2, 0), UnixTime::new(7, 0));
assert!(deleted);
assert_eq!(scheduler.current_fill_count().packets, 1);
assert_eq!(scheduler.current_fill_count().bytes, test_tc_2_raw.len());
scheduler.update_time(UnixTime::new(10, 0));
scheduler.release_telecommands(|_id, packet| {
assert_eq!(packet, test_tc_2_raw);
});
}
#[test]
fn test_deletion_from_start() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc_0 = test_tc(&[42], u14::ZERO);
let test_tc_1 = test_tc(&[1, 2, 3], u14::new(1));
let test_tc_2 = test_tc(&[1, 2, 3], u14::new(2));
let test_tc_0_raw = test_tc_0.to_vec();
let test_tc_1_raw = test_tc_1.to_vec();
let test_tc_2_raw = test_tc_2.to_vec();
let reader_0 =
CcsdsPacketReader::new(&test_tc_0_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_1 =
CcsdsPacketReader::new(&test_tc_1_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_2 =
CcsdsPacketReader::new(&test_tc_2_raw, Some(ChecksumType::WithCrc16)).unwrap();
scheduler
.insert_telecommand_with_reader(&reader_0, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader_1, UnixTime::new(5, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader_2, UnixTime::new(7, 0))
.unwrap();
// This only deletes the first 2 TCs.
let deleted = scheduler.delete_starting_at(UnixTime::new(5, 0));
assert!(deleted);
assert_eq!(scheduler.current_fill_count().packets, 1);
assert_eq!(scheduler.current_fill_count().bytes, test_tc_0_raw.len());
scheduler.update_time(UnixTime::new(10, 0));
scheduler.release_telecommands(|_id, packet| {
assert_eq!(packet, test_tc_0_raw);
});
}
#[test]
fn test_deletion_until_end() {
let unix_time = UnixTime::new(0, 0);
let mut scheduler = CcsdsScheduler::new(
unix_time,
Limits::new(Some(100), Some(1024)),
Duration::from_millis(1000),
);
let test_tc_0 = test_tc(&[42], u14::ZERO);
let test_tc_1 = test_tc(&[1, 2, 3], u14::new(1));
let test_tc_2 = test_tc(&[1, 2, 3], u14::new(2));
let test_tc_0_raw = test_tc_0.to_vec();
let test_tc_1_raw = test_tc_1.to_vec();
let test_tc_2_raw = test_tc_2.to_vec();
let reader_0 =
CcsdsPacketReader::new(&test_tc_0_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_1 =
CcsdsPacketReader::new(&test_tc_1_raw, Some(ChecksumType::WithCrc16)).unwrap();
let reader_2 =
CcsdsPacketReader::new(&test_tc_2_raw, Some(ChecksumType::WithCrc16)).unwrap();
scheduler
.insert_telecommand_with_reader(&reader_0, UnixTime::new(2, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader_1, UnixTime::new(5, 0))
.unwrap();
scheduler
.insert_telecommand_with_reader(&reader_2, UnixTime::new(7, 0))
.unwrap();
// This only deletes the first 2 TCs.
let deleted = scheduler.delete_before(UnixTime::new(7, 0));
assert!(deleted);
assert_eq!(scheduler.current_fill_count().packets, 1);
assert_eq!(scheduler.current_fill_count().bytes, test_tc_2_raw.len());
scheduler.update_time(UnixTime::new(10, 0));
scheduler.release_telecommands(|_id, packet| {
assert_eq!(packet, test_tc_2_raw);
});
}
}
-448
View File
@@ -1,448 +0,0 @@
use crate::{
ComponentId,
mode::{ModeAndSubmode, ModeReply, ModeRequest, ModeRequestSender},
mode_tree::{ModeStoreProvider, ModeStoreVec},
queue::{GenericSendError, GenericTargetedMessagingError},
request::{GenericMessage, RequestId},
};
use core::fmt::Debug;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ActiveModeCommandContext {
pub target_mode: ModeAndSubmode,
pub active_request_id: RequestId,
}
#[derive(Debug, Default, PartialEq, Eq)]
pub enum DevManagerHelperResult {
#[default]
Idle,
Busy,
ModeCommandingDone(ActiveModeCommandContext),
}
#[derive(Debug)]
pub enum DevManagerHelperError {
ChildNotInStore,
}
pub trait DevManagerUserHook: Debug {
fn send_mode_cmd_to_child(
&self,
request_id: RequestId,
target_id: ComponentId,
mode: ModeAndSubmode,
forced: bool,
children_mode_store: &mut ModeStoreVec,
mode_req_sender: &impl ModeRequestSender,
) -> Result<(), GenericSendError>;
fn send_mode_cmds_to_children(
&self,
request_id: RequestId,
commanded_parent_mode: ModeAndSubmode,
forced: bool,
children_mode_store: &mut ModeStoreVec,
mode_req_sender: &impl ModeRequestSender,
) -> Result<(), GenericSendError>;
}
#[derive(Debug, Default)]
pub struct TransparentDevManagerHook {}
impl DevManagerUserHook for TransparentDevManagerHook {
fn send_mode_cmds_to_children(
&self,
request_id: RequestId,
commanded_parent_mode: ModeAndSubmode,
forced: bool,
children_mode_store: &mut ModeStoreVec,
mode_req_sender: &impl ModeRequestSender,
) -> Result<(), GenericSendError> {
for child in children_mode_store {
mode_req_sender.send_mode_request(
request_id,
child.id(),
ModeRequest::SetMode {
mode_and_submode: commanded_parent_mode,
forced,
},
)?;
child.awaiting_reply = true;
}
Ok(())
}
fn send_mode_cmd_to_child(
&self,
request_id: RequestId,
target_id: ComponentId,
mode: ModeAndSubmode,
forced: bool,
children_mode_store: &mut ModeStoreVec,
mode_req_sender: &impl ModeRequestSender,
) -> Result<(), GenericSendError> {
let mut_val = children_mode_store
.get_mut(target_id)
.ok_or(GenericSendError::TargetDoesNotExist(target_id))?;
mut_val.awaiting_reply = true;
mode_req_sender.send_mode_request(
request_id,
target_id,
ModeRequest::SetMode {
mode_and_submode: mode,
forced,
},
)?;
Ok(())
}
}
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum DevManagerCommandingState {
#[default]
Idle,
AwaitingReplies(ActiveModeCommandContext),
}
impl DevManagerCommandingState {
fn new_active_cmd(mode_and_submode: ModeAndSubmode, active_request_id: RequestId) -> Self {
DevManagerCommandingState::AwaitingReplies(ActiveModeCommandContext {
target_mode: mode_and_submode,
active_request_id,
})
}
}
/// A generic helper for manager components which manage child components in a mode tree.
///
/// Mode commands are usually forwarded to all children components transparently.
/// For example, this could be used in an Assembly component which manages multiple redundant
/// child components. It can also be used inside a manager component which only manages one device.
#[derive(Debug, Default)]
pub struct DevManagerCommandingHelper<UserHook: DevManagerUserHook> {
/// The IDs, modes and reply awaition status of all children are tracked in this data
/// structure.
pub children_mode_store: ModeStoreVec,
pub user_hook: UserHook,
pub state: DevManagerCommandingState,
}
impl<UserHook: DevManagerUserHook> DevManagerCommandingHelper<UserHook> {
pub fn new(user_hook: UserHook) -> Self {
Self {
children_mode_store: Default::default(),
user_hook,
state: Default::default(),
}
}
pub fn send_mode_cmd_to_one_child(
&mut self,
request_id: RequestId,
target_id: ComponentId,
mode_and_submode: ModeAndSubmode,
forced: bool,
mode_req_sender: &impl ModeRequestSender,
) -> Result<(), GenericSendError> {
self.state = DevManagerCommandingState::new_active_cmd(mode_and_submode, request_id);
self.user_hook.send_mode_cmd_to_child(
request_id,
target_id,
mode_and_submode,
forced,
&mut self.children_mode_store,
mode_req_sender,
)?;
Ok(())
}
pub fn send_mode_cmd_to_all_children(
&mut self,
request_id: RequestId,
mode_and_submode: ModeAndSubmode,
forced: bool,
mode_req_sender: &impl ModeRequestSender,
) -> Result<(), GenericSendError> {
self.state = DevManagerCommandingState::new_active_cmd(mode_and_submode, request_id);
self.user_hook.send_mode_cmds_to_children(
request_id,
mode_and_submode,
forced,
&mut self.children_mode_store,
mode_req_sender,
)?;
Ok(())
}
pub fn target_mode(&self) -> Option<ModeAndSubmode> {
match self.state {
DevManagerCommandingState::Idle => None,
DevManagerCommandingState::AwaitingReplies(context) => Some(context.target_mode),
}
}
pub fn state(&self) -> DevManagerCommandingState {
self.state
}
pub fn send_announce_mode_cmd_to_children(
&self,
request_id: RequestId,
mode_req_sender: &impl ModeRequestSender,
recursive: bool,
) -> Result<(), GenericTargetedMessagingError> {
let mut request = ModeRequest::AnnounceMode;
if recursive {
request = ModeRequest::AnnounceModeRecursive;
}
for child in self.children_mode_store.0.iter() {
mode_req_sender.send_mode_request(request_id, child.id(), request)?;
}
Ok(())
}
pub fn add_mode_child(&mut self, target_id: ComponentId, mode: ModeAndSubmode) {
self.children_mode_store.add_component(target_id, mode);
}
/// Helper method which counts the number of children which have a certain mode.
pub fn count_number_of_children_with_mode(&self, mode_and_submode: ModeAndSubmode) -> usize {
let mut children_in_target_mode = 0;
for child in &self.children_mode_store {
if child.mode_and_submode() == mode_and_submode {
children_in_target_mode += 1;
}
}
children_in_target_mode
}
pub fn handle_mode_reply(
&mut self,
mode_reply: &GenericMessage<ModeReply>,
) -> Result<DevManagerHelperResult, DevManagerHelperError> {
let context = match self.state {
DevManagerCommandingState::Idle => return Ok(DevManagerHelperResult::Idle),
DevManagerCommandingState::AwaitingReplies(active_mode_command_context) => {
Some(active_mode_command_context)
}
};
if !self
.children_mode_store
.has_component(mode_reply.sender_id())
{
return Err(DevManagerHelperError::ChildNotInStore);
}
let mut generic_mode_reply_handler = |mode_and_submode: Option<ModeAndSubmode>| {
// Tying the reply awaition to the request ID ensures that something like replies
// belonging to older requests do not interfere with the completion handling of
// the mode commanding. This is important for forced mode commands.
let mut handle_awaition = false;
if let DevManagerCommandingState::AwaitingReplies { .. } = self.state {
handle_awaition = true;
}
let still_awating_replies = self.children_mode_store.mode_reply_handler(
mode_reply.sender_id(),
mode_and_submode,
handle_awaition,
);
// It is okay to unwrap: If awaition should be handled, the returned value should
// always be some valid value.
if handle_awaition && !still_awating_replies.unwrap() {
self.state = DevManagerCommandingState::Idle;
return Ok(DevManagerHelperResult::ModeCommandingDone(context.unwrap()));
}
Ok(DevManagerHelperResult::Busy)
};
match mode_reply.message {
ModeReply::ModeInfo(mode_and_submode) | ModeReply::ModeReply(mode_and_submode) => {
generic_mode_reply_handler(Some(mode_and_submode))
}
ModeReply::CantReachMode(_result_u16) => generic_mode_reply_handler(None),
ModeReply::WrongMode {
expected: _,
reached,
} => generic_mode_reply_handler(Some(reached)),
}
}
}
#[cfg(test)]
mod tests {
use crate::{
mode::{UNKNOWN_MODE, tests::ModeReqSenderMock},
request::MessageMetadata,
};
use super::*;
pub enum ExampleId {
Id1 = 1,
Id2 = 2,
}
pub enum ExampleMode {
Mode1 = 1,
Mode2 = 2,
}
#[test]
fn test_basic() {
let assy_helper = DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
assert_eq!(assy_helper.state(), DevManagerCommandingState::Idle);
}
#[test]
fn test_mode_announce() {
let mut assy_helper = DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
let mode_req_sender = ModeReqSenderMock::default();
assy_helper.add_mode_child(ExampleId::Id1 as ComponentId, UNKNOWN_MODE);
assy_helper.add_mode_child(ExampleId::Id2 as ComponentId, UNKNOWN_MODE);
assy_helper
.send_announce_mode_cmd_to_children(1, &mode_req_sender, false)
.unwrap();
assert_eq!(mode_req_sender.requests.borrow().len(), 2);
let mut req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id1 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(req.request, ModeRequest::AnnounceMode);
req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id2 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(req.request, ModeRequest::AnnounceMode);
}
#[test]
fn test_mode_announce_recursive() {
let mut assy_helper = DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
let mode_req_sender = ModeReqSenderMock::default();
assy_helper.add_mode_child(ExampleId::Id1 as ComponentId, UNKNOWN_MODE);
assy_helper.add_mode_child(ExampleId::Id2 as ComponentId, UNKNOWN_MODE);
assy_helper
.send_announce_mode_cmd_to_children(1, &mode_req_sender, true)
.unwrap();
assert_eq!(mode_req_sender.requests.borrow().len(), 2);
let mut req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id1 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(req.request, ModeRequest::AnnounceModeRecursive);
req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id2 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(req.request, ModeRequest::AnnounceModeRecursive);
}
#[test]
fn test_mode_commanding_one_child() {
let mut dev_mgmt_helper =
DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
let mode_req_sender = ModeReqSenderMock::default();
dev_mgmt_helper.add_mode_child(ExampleId::Id1 as ComponentId, UNKNOWN_MODE);
let expected_mode = ModeAndSubmode::new(ExampleMode::Mode1 as u32, 0);
dev_mgmt_helper
.send_mode_cmd_to_one_child(
1,
ExampleId::Id1 as ComponentId,
expected_mode,
false,
&mode_req_sender,
)
.unwrap();
assert_eq!(mode_req_sender.requests.borrow().len(), 1);
let req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id1 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(
req.request,
ModeRequest::SetMode {
mode_and_submode: expected_mode,
forced: false
}
);
matches!(
dev_mgmt_helper.state(),
DevManagerCommandingState::AwaitingReplies { .. }
);
if let DevManagerCommandingState::AwaitingReplies(ctx) = dev_mgmt_helper.state() {
assert_eq!(ctx.target_mode, expected_mode);
assert_eq!(ctx.active_request_id, 1);
}
let reply = GenericMessage::new(
MessageMetadata::new(1, ExampleId::Id1 as ComponentId),
ModeReply::ModeReply(expected_mode),
);
if let DevManagerHelperResult::ModeCommandingDone(ActiveModeCommandContext {
target_mode,
active_request_id,
}) = dev_mgmt_helper.handle_mode_reply(&reply).unwrap()
{
assert_eq!(target_mode, expected_mode);
assert_eq!(active_request_id, 1);
}
matches!(dev_mgmt_helper.state(), DevManagerCommandingState::Idle);
}
#[test]
fn test_mode_commanding_multi_child() {
let mut dev_mgmt_helper =
DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
let mode_req_sender = ModeReqSenderMock::default();
dev_mgmt_helper.add_mode_child(ExampleId::Id1 as ComponentId, UNKNOWN_MODE);
dev_mgmt_helper.add_mode_child(ExampleId::Id2 as ComponentId, UNKNOWN_MODE);
let expected_mode = ModeAndSubmode::new(ExampleMode::Mode2 as u32, 0);
dev_mgmt_helper
.send_mode_cmd_to_all_children(1, expected_mode, false, &mode_req_sender)
.unwrap();
assert_eq!(mode_req_sender.requests.borrow().len(), 2);
let req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id1 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(
req.request,
ModeRequest::SetMode {
mode_and_submode: expected_mode,
forced: false
}
);
let req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id2 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(
req.request,
ModeRequest::SetMode {
mode_and_submode: expected_mode,
forced: false
}
);
matches!(
dev_mgmt_helper.state(),
DevManagerCommandingState::AwaitingReplies { .. }
);
if let DevManagerCommandingState::AwaitingReplies(ctx) = dev_mgmt_helper.state() {
assert_eq!(ctx.target_mode, expected_mode);
assert_eq!(ctx.active_request_id, 1);
}
let reply = GenericMessage::new(
MessageMetadata::new(1, ExampleId::Id1 as ComponentId),
ModeReply::ModeReply(expected_mode),
);
assert_eq!(
dev_mgmt_helper.handle_mode_reply(&reply).unwrap(),
DevManagerHelperResult::Busy
);
let reply = GenericMessage::new(
MessageMetadata::new(1, ExampleId::Id2 as ComponentId),
ModeReply::ModeReply(expected_mode),
);
if let DevManagerHelperResult::ModeCommandingDone(ActiveModeCommandContext {
target_mode,
active_request_id,
}) = dev_mgmt_helper.handle_mode_reply(&reply).unwrap()
{
assert_eq!(target_mode, expected_mode);
assert_eq!(active_request_id, 1);
}
matches!(dev_mgmt_helper.state(), DevManagerCommandingState::Idle);
}
}
View File
-40
View File
@@ -1,40 +0,0 @@
use crate::ComponentId;
pub type CollectionIntervalFactor = u32;
/// Unique Identifier for a certain housekeeping dataset.
pub type UniqueId = u32;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct HkRequest {
pub unique_id: UniqueId,
pub variant: HkRequestVariant,
}
impl HkRequest {
pub fn new(unique_id: UniqueId, variant: HkRequestVariant) -> Self {
Self { unique_id, variant }
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum HkRequestVariant {
OneShot,
EnablePeriodic,
DisablePeriodic,
ModifyCollectionInterval(CollectionIntervalFactor),
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct TargetedHkRequest {
pub target_id: ComponentId,
pub hk_request: HkRequestVariant,
}
impl TargetedHkRequest {
pub fn new(target_id: ComponentId, hk_request: HkRequestVariant) -> Self {
Self {
target_id,
hk_request,
}
}
}
@@ -50,7 +50,7 @@
//! show how the event management modules can be integrated into a more complex software.
use crate::{
ComponentId,
events::{Event, EventId, GroupId},
legacy::events::{Event, EventId, GroupId},
queue::GenericSendError,
};
use core::marker::PhantomData;
@@ -96,6 +96,7 @@ impl<EventInstance: Event> EventMessage<EventInstance> {
pub trait EventSender<EventInstance: Event> {
type Error;
/// Destination component ID.
fn target_id(&self) -> ComponentId;
fn send(&self, message: EventMessage<EventInstance>) -> Result<(), Self::Error>;
@@ -317,7 +318,7 @@ pub mod alloc_mod {
use alloc::vec::Vec;
use hashbrown::HashMap;
use crate::events::EventErasedAlloc;
use crate::legacy::events::EventErasedAlloc;
use super::*;
@@ -460,7 +461,7 @@ pub mod alloc_mod {
#[cfg(feature = "std")]
pub mod std_mod {
use crate::{
events::{EventErasedAlloc, EventErasedHeapless},
legacy::events::{EventErasedAlloc, EventErasedHeapless},
queue::GenericReceiveError,
};
@@ -577,7 +578,7 @@ mod tests {
use arbitrary_int::u14;
use super::*;
use crate::events::{EventErasedAlloc, Severity};
use crate::legacy::events::{EventErasedAlloc, Severity};
use crate::pus::test_util::{TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1};
use std::sync::mpsc;
@@ -50,7 +50,7 @@
//! The [PUS event](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/pus/event.rs)
//! module and the generic [events module](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/events.rs)
//! show how the event management modules can be integrated into a more complex software.
use crate::events_legacy::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw};
use crate::legacy::events_legacy::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw};
use crate::params::Params;
use crate::queue::GenericSendError;
use core::fmt::Debug;
@@ -588,8 +588,8 @@ pub mod std_mod {
#[cfg(test)]
mod tests {
use super::*;
use crate::event_man_legacy::EventManager;
use crate::events_legacy::{EventU32, GenericEvent, Severity};
use crate::legacy::event_man_legacy::EventManager;
use crate::legacy::events_legacy::{EventU32, GenericEvent, Severity};
use crate::params::{ParamsHeapless, ParamsRaw};
use crate::pus::test_util::{TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1};
use std::format;
+4
View File
@@ -0,0 +1,4 @@
pub mod event_man;
pub mod events;
pub mod pus;
@@ -267,217 +267,4 @@ mod alloc_mod {
}
#[cfg(test)]
mod tests {
use super::*;
use crate::ComponentId;
use crate::events_legacy::{EventU32, Severity};
use crate::pus::test_util::TEST_COMPONENT_ID_0;
use crate::pus::tests::CommonTmInfo;
use crate::pus::{ChannelWithId, EcssTmSender, EcssTmtcError, PusTmVariant};
use spacepackets::ByteConversionError;
use spacepackets::ecss::PusError;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::vec::Vec;
const EXAMPLE_APID: u11 = u11::new(0xee);
const EXAMPLE_GROUP_ID: u16 = 2;
const EXAMPLE_EVENT_ID_0: u16 = 1;
#[allow(dead_code)]
const EXAMPLE_EVENT_ID_1: u16 = 2;
#[derive(Debug, Eq, PartialEq, Clone)]
struct TmInfo {
pub sender_id: ComponentId,
pub common: CommonTmInfo,
pub event: EventU32,
pub aux_data: Vec<u8>,
}
#[derive(Default, Clone)]
struct TestSender {
pub service_queue: RefCell<VecDeque<TmInfo>>,
}
impl ChannelWithId for TestSender {
fn id(&self) -> ComponentId {
0
}
}
impl EcssTmSender for TestSender {
fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
match tm {
PusTmVariant::InStore(_) => {
panic!("TestSender: unexpected call with address");
}
PusTmVariant::Direct(tm) => {
assert!(!tm.source_data().is_empty());
let src_data = tm.source_data();
assert!(src_data.len() >= 4);
let event =
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
let mut aux_data = Vec::new();
if src_data.len() > 4 {
aux_data.extend_from_slice(&src_data[4..]);
}
self.service_queue.borrow_mut().push_back(TmInfo {
sender_id,
common: CommonTmInfo::new_from_tm(&tm),
event,
aux_data,
});
Ok(())
}
}
}
}
fn severity_to_subservice(severity: Severity) -> MessageSubtypeId {
match severity {
Severity::Info => MessageSubtypeId::TmInfoReport,
Severity::Low => MessageSubtypeId::TmLowSeverityReport,
Severity::Medium => MessageSubtypeId::TmMediumSeverityReport,
Severity::High => MessageSubtypeId::TmHighSeverityReport,
}
}
fn report_basic_event(
reporter: &mut EventReporter,
sender: &mut TestSender,
time_stamp: &[u8],
event: EventU32,
severity: Severity,
aux_data: Option<&[u8]>,
) {
match severity {
Severity::Info => {
reporter
.event_info(sender, time_stamp, event, aux_data)
.expect("Error reporting info event");
}
Severity::Low => {
reporter
.event_low_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting low event");
}
Severity::Medium => {
reporter
.event_medium_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting medium event");
}
Severity::High => {
reporter
.event_high_severity(sender, time_stamp, event, aux_data)
.expect("Error reporting high event");
}
}
}
fn basic_event_test(
max_event_aux_data_buf: usize,
severity: Severity,
error_data: Option<&[u8]>,
) {
let mut sender = TestSender::default();
let mut reporter = EventReporter::new(
TEST_COMPONENT_ID_0.id(),
EXAMPLE_APID,
0,
max_event_aux_data_buf,
);
let time_stamp_empty: [u8; 7] = [0; 7];
let mut error_copy = Vec::new();
if let Some(err_data) = error_data {
error_copy.extend_from_slice(err_data);
}
let event = EventU32::new_checked(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event");
report_basic_event(
&mut reporter,
&mut sender,
&time_stamp_empty,
event,
severity,
error_data,
);
let mut service_queue = sender.service_queue.borrow_mut();
assert_eq!(service_queue.len(), 1);
let tm_info = service_queue.pop_front().unwrap();
assert_eq!(
tm_info.common.subservice,
severity_to_subservice(severity) as u8
);
assert_eq!(tm_info.common.dest_id, 0);
assert_eq!(tm_info.common.timestamp, time_stamp_empty);
assert_eq!(tm_info.common.msg_counter, 0);
assert_eq!(tm_info.common.apid, EXAMPLE_APID);
assert_eq!(tm_info.event, event);
assert_eq!(tm_info.sender_id, TEST_COMPONENT_ID_0.id());
assert_eq!(tm_info.aux_data, error_copy);
}
#[test]
fn basic_info_event_generation() {
basic_event_test(4, Severity::Info, None);
}
#[test]
fn basic_low_severity_event() {
basic_event_test(4, Severity::Low, None);
}
#[test]
fn basic_medium_severity_event() {
basic_event_test(4, Severity::Medium, None);
}
#[test]
fn basic_high_severity_event() {
basic_event_test(4, Severity::High, None);
}
#[test]
fn event_with_info_string() {
let info_string = "Test Information";
basic_event_test(32, Severity::Info, Some(info_string.as_bytes()));
}
#[test]
fn low_severity_with_raw_err_data() {
let raw_err_param: i32 = -1;
let raw_err = raw_err_param.to_be_bytes();
basic_event_test(8, Severity::Low, Some(&raw_err))
}
fn check_buf_too_small(
reporter: &mut EventReporter,
sender: &mut TestSender,
expected_found_len: usize,
) {
let time_stamp_empty: [u8; 7] = [0; 7];
let event = EventU32::new_checked(Severity::Info, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
.expect("Error creating example event");
let err = reporter.event_info(sender, &time_stamp_empty, event, None);
assert!(err.is_err());
let err = err.unwrap_err();
if let EcssTmtcError::Pus(PusError::ByteConversion(
ByteConversionError::ToSliceTooSmall { found, expected },
)) = err
{
assert_eq!(expected, 4);
assert_eq!(found, expected_found_len);
} else {
panic!("Unexpected error {:?}", err);
}
}
#[test]
fn insufficient_buffer() {
let mut sender = TestSender::default();
for i in 0..3 {
let mut reporter = EventReporter::new(0, EXAMPLE_APID, 0, i);
check_buf_too_small(&mut reporter, &mut sender, i);
}
}
}
mod tests {}
+1
View File
@@ -0,0 +1 @@
pub mod event;
+2 -10
View File
@@ -9,8 +9,6 @@
//!
//! The core modules of this crate include
//!
//! - The [event manager][event_man] module which provides a publish and
//! and subscribe to route events.
//! - The [pus] module which provides special support for projects using
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
#![no_std]
@@ -23,24 +21,18 @@ extern crate downcast_rs;
extern crate std;
pub mod action;
#[cfg(feature = "alloc")]
pub mod dev_mgmt;
pub mod ccsds;
pub mod encoding;
pub mod event_man;
pub mod event_man_legacy;
pub mod events;
pub mod events_legacy;
#[cfg(feature = "std")]
pub mod executable;
pub mod hal;
pub mod health;
pub mod hk;
pub mod legacy;
pub mod mode;
#[cfg(feature = "std")]
pub mod mode_tree;
pub mod params;
pub mod pool;
pub mod power;
pub mod pus;
pub mod queue;
pub mod request;
-1
View File
@@ -1112,7 +1112,6 @@ mod tests {
let mut buf = [0; 1];
praw.write_to_be_bytes(&mut buf)
.expect("writing to buffer failed");
buf[0] = 200;
}
#[test]
+10 -49
View File
@@ -155,73 +155,34 @@ impl Display for StoreIdError {
#[cfg(feature = "std")]
impl Error for StoreIdError {}
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum PoolError {
/// Requested data block is too large
#[error("data to store with size {0} is too large")]
DataTooLarge(usize),
/// The store is full. Contains the index of the full subpool
#[error("store does not have any capacity")]
StoreFull(u16),
/// The store can not hold any data.
#[error("store does not have any capacity")]
NoCapacity,
/// Store ID is invalid. This also includes partial errors where only the subpool is invalid
#[error("invalid store ID: {0}, address: {1:?}")]
InvalidStoreId(StoreIdError, Option<PoolAddr>),
/// Valid subpool and packet index, but no data is stored at the given address
#[error("no data exists at address {0:?}")]
DataDoesNotExist(PoolAddr),
ByteConversionError(spacepackets::ByteConversionError),
#[error("byte conversion error: {0}")]
ByteConversion(#[from] ByteConversionError),
#[error("lock error")]
LockError,
/// Internal or configuration errors
#[error("lock error")]
InternalError(u32),
}
impl Display for PoolError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PoolError::DataTooLarge(size) => {
write!(f, "data to store with size {size} is too large")
}
PoolError::NoCapacity => {
write!(f, "store does not have any capacity")
}
PoolError::StoreFull(u16) => {
write!(f, "store is too full. index for full subpool: {u16}")
}
PoolError::InvalidStoreId(id_e, addr) => {
write!(f, "invalid store ID: {id_e}, address: {addr:?}")
}
PoolError::DataDoesNotExist(addr) => {
write!(f, "no data exists at address {addr:?}")
}
PoolError::InternalError(e) => {
write!(f, "internal error: {e}")
}
PoolError::ByteConversionError(e) => {
write!(f, "store error: {e}")
}
PoolError::LockError => {
write!(f, "lock error")
}
}
}
}
impl From<ByteConversionError> for PoolError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversionError(value)
}
}
#[cfg(feature = "std")]
impl Error for PoolError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
if let PoolError::InvalidStoreId(e, _) = self {
return Some(e);
}
None
}
}
/// Generic trait for pool providers which provide memory pools for variable sized packet data.
///
/// It specifies a basic API to [Self::add], [Self::modify], [Self::read] and [Self::delete] data
-312
View File
@@ -1,312 +0,0 @@
use core::time::Duration;
use derive_new::new;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "std")]
#[allow(unused_imports)]
pub use std_mod::*;
use crate::request::MessageMetadata;
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum SwitchState {
Off = 0,
On = 1,
Unknown = 2,
Faulty = 3,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum SwitchStateBinary {
Off = 0,
On = 1,
}
impl TryFrom<SwitchState> for SwitchStateBinary {
type Error = ();
fn try_from(value: SwitchState) -> Result<Self, Self::Error> {
match value {
SwitchState::Off => Ok(SwitchStateBinary::Off),
SwitchState::On => Ok(SwitchStateBinary::On),
_ => Err(()),
}
}
}
impl<T: Into<u64>> From<T> for SwitchStateBinary {
fn from(value: T) -> Self {
if value.into() == 0 {
return SwitchStateBinary::Off;
}
SwitchStateBinary::On
}
}
impl From<SwitchStateBinary> for SwitchState {
fn from(value: SwitchStateBinary) -> Self {
match value {
SwitchStateBinary::Off => SwitchState::Off,
SwitchStateBinary::On => SwitchState::On,
}
}
}
pub type SwitchId = u16;
/// Generic trait for a device capable of turning on and off switches.
pub trait PowerSwitcherCommandSender<SwitchType: Into<u16>> {
type Error: core::fmt::Debug;
fn send_switch_on_cmd(
&self,
requestor_info: MessageMetadata,
switch_id: SwitchType,
) -> Result<(), Self::Error>;
fn send_switch_off_cmd(
&self,
requestor_info: MessageMetadata,
switch_id: SwitchType,
) -> Result<(), Self::Error>;
}
pub trait PowerSwitchInfo<SwitchType> {
type Error: core::fmt::Debug;
/// Retrieve the switch state
fn switch_state(&self, switch_id: SwitchType) -> Result<SwitchState, Self::Error>;
fn is_switch_on(&self, switch_id: SwitchType) -> Result<bool, Self::Error> {
Ok(self.switch_state(switch_id)? == SwitchState::On)
}
/// The maximum delay it will take to change a switch.
///
/// This may take into account the time to send a command, wait for it to be executed, and
/// see the switch changed.
fn switch_delay_ms(&self) -> Duration;
}
#[derive(new)]
pub struct SwitchRequest {
switch_id: SwitchId,
target_state: SwitchStateBinary,
}
impl SwitchRequest {
pub fn switch_id(&self) -> SwitchId {
self.switch_id
}
pub fn target_state(&self) -> SwitchStateBinary {
self.target_state
}
}
#[cfg(feature = "std")]
pub mod std_mod {
use std::sync::mpsc;
use crate::{
queue::GenericSendError,
request::{GenericMessage, MessageMetadata},
};
use super::*;
pub type MpscSwitchCmdSender = mpsc::Sender<GenericMessage<SwitchRequest>>;
pub type MpscSwitchCmdSenderBounded = mpsc::SyncSender<GenericMessage<SwitchRequest>>;
impl<SwitchType: Into<u16>> PowerSwitcherCommandSender<SwitchType> for MpscSwitchCmdSender {
type Error = GenericSendError;
fn send_switch_on_cmd(
&self,
requestor_info: MessageMetadata,
switch_id: SwitchType,
) -> Result<(), Self::Error> {
self.send(GenericMessage::new(
requestor_info,
SwitchRequest::new(switch_id.into(), SwitchStateBinary::On),
))
.map_err(|_| GenericSendError::RxDisconnected)
}
fn send_switch_off_cmd(
&self,
requestor_info: MessageMetadata,
switch_id: SwitchType,
) -> Result<(), Self::Error> {
self.send(GenericMessage::new(
requestor_info,
SwitchRequest::new(switch_id.into(), SwitchStateBinary::Off),
))
.map_err(|_| GenericSendError::RxDisconnected)
}
}
impl<SwitchType: Into<u16>> PowerSwitcherCommandSender<SwitchType> for MpscSwitchCmdSenderBounded {
type Error = GenericSendError;
fn send_switch_on_cmd(
&self,
requestor_info: MessageMetadata,
switch_id: SwitchType,
) -> Result<(), Self::Error> {
self.try_send(GenericMessage::new(
requestor_info,
SwitchRequest::new(switch_id.into(), SwitchStateBinary::On),
))
.map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})
}
fn send_switch_off_cmd(
&self,
requestor_info: MessageMetadata,
switch_id: SwitchType,
) -> Result<(), Self::Error> {
self.try_send(GenericMessage::new(
requestor_info,
SwitchRequest::new(switch_id.into(), SwitchStateBinary::Off),
))
.map_err(|e| match e {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
})
}
}
}
#[cfg(test)]
mod tests {
// TODO: Add unittests for PowerSwitcherCommandSender impls for mpsc.
use std::sync::mpsc::{self, TryRecvError};
use crate::{ComponentId, queue::GenericSendError, request::GenericMessage};
use super::*;
const TEST_REQ_ID: u32 = 2;
const TEST_SENDER_ID: ComponentId = 5;
const TEST_SWITCH_ID: u16 = 0x1ff;
fn common_checks(request: &GenericMessage<SwitchRequest>) {
assert_eq!(request.requestor_info.sender_id(), TEST_SENDER_ID);
assert_eq!(request.requestor_info.request_id(), TEST_REQ_ID);
assert_eq!(request.message.switch_id(), TEST_SWITCH_ID);
}
#[test]
fn test_comand_switch_sending_mpsc_regular_on_cmd() {
let (switch_cmd_tx, switch_cmd_rx) = mpsc::channel::<GenericMessage<SwitchRequest>>();
switch_cmd_tx
.send_switch_on_cmd(
MessageMetadata::new(TEST_REQ_ID, TEST_SENDER_ID),
TEST_SWITCH_ID,
)
.expect("sending switch cmd failed");
let request = switch_cmd_rx
.recv()
.expect("receiving switch request failed");
common_checks(&request);
assert_eq!(request.message.target_state(), SwitchStateBinary::On);
}
#[test]
fn test_comand_switch_sending_mpsc_regular_off_cmd() {
let (switch_cmd_tx, switch_cmd_rx) = mpsc::channel::<GenericMessage<SwitchRequest>>();
switch_cmd_tx
.send_switch_off_cmd(
MessageMetadata::new(TEST_REQ_ID, TEST_SENDER_ID),
TEST_SWITCH_ID,
)
.expect("sending switch cmd failed");
let request = switch_cmd_rx
.recv()
.expect("receiving switch request failed");
common_checks(&request);
assert_eq!(request.message.target_state(), SwitchStateBinary::Off);
}
#[test]
fn test_comand_switch_sending_mpsc_regular_rx_disconnected() {
let (switch_cmd_tx, switch_cmd_rx) = mpsc::channel::<GenericMessage<SwitchRequest>>();
drop(switch_cmd_rx);
let result = switch_cmd_tx.send_switch_off_cmd(
MessageMetadata::new(TEST_REQ_ID, TEST_SENDER_ID),
TEST_SWITCH_ID,
);
assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::RxDisconnected);
}
#[test]
fn test_comand_switch_sending_mpsc_sync_on_cmd() {
let (switch_cmd_tx, switch_cmd_rx) = mpsc::sync_channel::<GenericMessage<SwitchRequest>>(3);
switch_cmd_tx
.send_switch_on_cmd(
MessageMetadata::new(TEST_REQ_ID, TEST_SENDER_ID),
TEST_SWITCH_ID,
)
.expect("sending switch cmd failed");
let request = switch_cmd_rx
.recv()
.expect("receiving switch request failed");
common_checks(&request);
assert_eq!(request.message.target_state(), SwitchStateBinary::On);
}
#[test]
fn test_comand_switch_sending_mpsc_sync_off_cmd() {
let (switch_cmd_tx, switch_cmd_rx) = mpsc::sync_channel::<GenericMessage<SwitchRequest>>(3);
switch_cmd_tx
.send_switch_off_cmd(
MessageMetadata::new(TEST_REQ_ID, TEST_SENDER_ID),
TEST_SWITCH_ID,
)
.expect("sending switch cmd failed");
let request = switch_cmd_rx
.recv()
.expect("receiving switch request failed");
common_checks(&request);
assert_eq!(request.message.target_state(), SwitchStateBinary::Off);
}
#[test]
fn test_comand_switch_sending_mpsc_sync_rx_disconnected() {
let (switch_cmd_tx, switch_cmd_rx) = mpsc::sync_channel::<GenericMessage<SwitchRequest>>(1);
drop(switch_cmd_rx);
let result = switch_cmd_tx.send_switch_off_cmd(
MessageMetadata::new(TEST_REQ_ID, TEST_SENDER_ID),
TEST_SWITCH_ID,
);
assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::RxDisconnected);
}
#[test]
fn test_comand_switch_sending_mpsc_sync_queue_full() {
let (switch_cmd_tx, switch_cmd_rx) = mpsc::sync_channel::<GenericMessage<SwitchRequest>>(1);
let mut result = switch_cmd_tx.send_switch_off_cmd(
MessageMetadata::new(TEST_REQ_ID, TEST_SENDER_ID),
TEST_SWITCH_ID,
);
assert!(result.is_ok());
result = switch_cmd_tx.send_switch_off_cmd(
MessageMetadata::new(TEST_REQ_ID, TEST_SENDER_ID),
TEST_SWITCH_ID,
);
assert!(result.is_err());
matches!(result.unwrap_err(), GenericSendError::QueueFull(None));
matches!(switch_cmd_rx.try_recv(), Err(TryRecvError::Empty));
}
}
-464
View File
@@ -1,464 +0,0 @@
use crate::events_legacy::{EventU32, GenericEvent, Severity};
#[cfg(feature = "alloc")]
use crate::events_legacy::{EventU32TypedSev, HasSeverity};
#[cfg(feature = "alloc")]
use core::hash::Hash;
#[cfg(feature = "alloc")]
use hashbrown::HashSet;
#[cfg(feature = "alloc")]
use crate::pus::EcssTmSender;
use crate::pus::EcssTmtcError;
#[cfg(feature = "alloc")]
pub use crate::pus::event::EventReporter;
use crate::pus::verification::TcStateToken;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "heapless")]
pub use heapless_mod::*;
/// This trait allows the PUS event manager implementation to stay generic over various types
/// of backend containers.
///
/// These backend containers keep track on whether a particular event is enabled or disabled for
/// reporting and also expose a simple API to enable or disable the event reporting.
///
/// For example, a straight forward implementation for host systems could use a
/// [hash set](https://docs.rs/hashbrown/latest/hashbrown/struct.HashSet.html)
/// structure to track disabled events. A more primitive and embedded friendly
/// solution could track this information in a static or pre-allocated list which contains
/// the disabled events.
pub trait PusEventReportingMapProvider<Event: GenericEvent> {
type Error;
fn event_enabled(&self, event: &Event) -> bool;
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>;
}
#[cfg(feature = "heapless")]
pub mod heapless_mod {
use super::*;
use crate::events::LargestEventRaw;
use core::marker::PhantomData;
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
// regular Event type again.
#[derive(Default)]
pub struct HeaplessPusMgmtBackendProvider<const N: usize, Provider: GenericEvent> {
disabled: heapless::index_set::FnvIndexSet<LargestEventRaw, N>,
phantom: PhantomData<Provider>,
}
impl<const N: usize, Provider: GenericEvent> PusEventReportingMapProvider<Provider>
for HeaplessPusMgmtBackendProvider<N, Provider>
{
type Error = ();
fn event_enabled(&self, event: &Provider) -> bool {
self.disabled.contains(&event.raw_as_largest_type())
}
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
self.disabled
.insert(event.raw_as_largest_type())
.map_err(|_| ())
}
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(&event.raw_as_largest_type()))
}
}
}
#[derive(Debug, PartialEq, Eq)]
pub enum EventRequest<Event: GenericEvent = EventU32> {
Enable(Event),
Disable(Event),
}
#[derive(Debug)]
pub struct EventRequestWithToken<Event: GenericEvent = EventU32> {
pub request: EventRequest<Event>,
pub token: TcStateToken,
}
#[derive(Debug)]
pub enum EventManError {
EcssTmtcError(EcssTmtcError),
SeverityMissmatch(Severity, Severity),
}
impl From<EcssTmtcError> for EventManError {
fn from(v: EcssTmtcError) -> Self {
Self::EcssTmtcError(v)
}
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use core::marker::PhantomData;
use crate::{
events_legacy::EventU16,
params::{Params, WritableToBeBytes},
pus::event::{DummyEventHook, EventTmHook},
};
use super::*;
/// Default backend provider which uses a hash set as the event reporting status container
/// like mentioned in the example of the [PusEventReportingMapProvider] documentation.
///
/// This provider is a good option for host systems or larger embedded systems where
/// the expected occasional memory allocation performed by the [HashSet] is not an issue.
pub struct DefaultPusEventReportingMap<Event: GenericEvent = EventU32> {
disabled: HashSet<Event>,
}
impl<Event: GenericEvent> Default for DefaultPusEventReportingMap<Event> {
fn default() -> Self {
Self {
disabled: HashSet::default(),
}
}
}
impl<Event: GenericEvent + PartialEq + Eq + Hash + Copy + Clone>
PusEventReportingMapProvider<Event> for DefaultPusEventReportingMap<Event>
{
type Error = ();
fn event_enabled(&self, event: &Event) -> bool {
!self.disabled.contains(event)
}
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(event))
}
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
Ok(self.disabled.insert(*event))
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct EventGenerationResult {
pub event_was_enabled: bool,
pub params_were_propagated: bool,
}
pub struct PusEventTmCreatorWithMap<
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHookInstance: EventTmHook = DummyEventHook,
> {
pub reporter: EventReporter<EventTmHookInstance>,
reporting_map: ReportingMap,
phantom: PhantomData<Event>,
}
impl<
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHookInstance: EventTmHook,
> PusEventTmCreatorWithMap<ReportingMap, Event, EventTmHookInstance>
{
pub fn new(reporter: EventReporter<EventTmHookInstance>, backend: ReportingMap) -> Self {
Self {
reporter,
reporting_map: backend,
phantom: PhantomData,
}
}
pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, ReportingMap::Error> {
self.reporting_map.enable_event_reporting(event)
}
pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, ReportingMap::Error> {
self.reporting_map.disable_event_reporting(event)
}
pub fn generate_pus_event_tm_generic(
&self,
sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8],
event: Event,
params: Option<&[u8]>,
) -> Result<bool, EventManError> {
if !self.reporting_map.event_enabled(&event) {
return Ok(false);
}
match event.severity() {
Severity::Info => self
.reporter
.event_info(sender, time_stamp, event, params)
.map(|_| true)
.map_err(|e| e.into()),
Severity::Low => self
.reporter
.event_low_severity(sender, time_stamp, event, params)
.map(|_| true)
.map_err(|e| e.into()),
Severity::Medium => self
.reporter
.event_medium_severity(sender, time_stamp, event, params)
.map(|_| true)
.map_err(|e| e.into()),
Severity::High => self
.reporter
.event_high_severity(sender, time_stamp, event, params)
.map(|_| true)
.map_err(|e| e.into()),
}
}
pub fn generate_pus_event_tm_generic_with_generic_params(
&self,
sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8],
event: Event,
small_data_buf: &mut [u8],
params: Option<&Params>,
) -> Result<EventGenerationResult, EventManError> {
let mut result = EventGenerationResult {
event_was_enabled: false,
params_were_propagated: true,
};
if params.is_none() {
result.event_was_enabled =
self.generate_pus_event_tm_generic(sender, time_stamp, event, None)?;
return Ok(result);
}
let params = params.unwrap();
result.event_was_enabled = match params {
Params::Heapless(heapless_param) => {
heapless_param
.write_to_be_bytes(&mut small_data_buf[..heapless_param.written_len()])
.map_err(EcssTmtcError::ByteConversion)?;
self.generate_pus_event_tm_generic(
sender,
time_stamp,
event,
Some(small_data_buf),
)?
}
Params::Vec(vec) => {
self.generate_pus_event_tm_generic(sender, time_stamp, event, Some(vec))?
}
Params::String(string) => self.generate_pus_event_tm_generic(
sender,
time_stamp,
event,
Some(string.as_bytes()),
)?,
_ => {
result.params_were_propagated = false;
self.generate_pus_event_tm_generic(sender, time_stamp, event, None)?
}
};
Ok(result)
}
}
impl<Event: GenericEvent + Copy + PartialEq + Eq + Hash, EventTmHookInstance: EventTmHook>
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<Event>, Event, EventTmHookInstance>
{
pub fn new_with_default_backend(reporter: EventReporter<EventTmHookInstance>) -> Self {
Self {
reporter,
reporting_map: DefaultPusEventReportingMap::default(),
phantom: PhantomData,
}
}
}
impl<ReportingMap: PusEventReportingMapProvider<EventU32>>
PusEventTmCreatorWithMap<ReportingMap, EventU32>
{
pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self,
event: &EventU32TypedSev<Severity>,
) -> Result<bool, ReportingMap::Error> {
self.reporting_map.enable_event_reporting(event.as_ref())
}
pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self,
event: &EventU32TypedSev<Severity>,
) -> Result<bool, ReportingMap::Error> {
self.reporting_map.disable_event_reporting(event.as_ref())
}
pub fn generate_pus_event_tm<Severity: HasSeverity>(
&self,
sender: &(impl EcssTmSender + ?Sized),
time_stamp: &[u8],
event: EventU32TypedSev<Severity>,
aux_data: Option<&[u8]>,
) -> Result<bool, EventManError> {
self.generate_pus_event_tm_generic(sender, time_stamp, event.into(), aux_data)
}
}
pub type DefaultPusEventU16TmCreator<EventTmHook = DummyEventHook> =
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<EventU16>, EventU16, EventTmHook>;
pub type DefaultPusEventU32TmCreator<EventTmHook = DummyEventHook> =
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<EventU32>, EventU32, EventTmHook>;
}
#[cfg(test)]
mod tests {
use alloc::string::{String, ToString};
use alloc::vec;
use arbitrary_int::{u11, u21};
use spacepackets::ecss::PusPacket;
use spacepackets::ecss::event::MessageSubtypeId;
use spacepackets::ecss::tm::PusTmReader;
use super::*;
use crate::request::UniqueApidTargetId;
use crate::{events_legacy::SeverityInfo, tmtc::PacketAsVec};
use std::sync::mpsc::{self, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u11 = u11::new(0x02);
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, u21::new(0x05));
fn create_basic_man_1() -> DefaultPusEventU32TmCreator {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128);
PusEventTmCreatorWithMap::new_with_default_backend(reporter)
}
fn create_basic_man_2() -> DefaultPusEventU32TmCreator {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128);
let backend = DefaultPusEventReportingMap::default();
PusEventTmCreatorWithMap::new(reporter, backend)
}
#[test]
fn test_basic() {
let event_man = create_basic_man_1();
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let event_sent = event_man
.generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
// Will not check packet here, correctness of packet was tested somewhere else
event_rx.try_recv().expect("Receiving event TM failed");
}
#[test]
fn test_disable_event() {
let mut event_man = create_basic_man_2();
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
// let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
let mut event_sent = event_man
.generate_pus_event_tm_generic(&event_tx, &EMPTY_STAMP, LOW_SEV_EVENT, None)
.expect("Sending low severity event failed");
assert!(!event_sent);
let res = event_rx.try_recv();
assert!(res.is_err());
assert!(matches!(res.unwrap_err(), TryRecvError::Empty));
// Check that only the low severity event was disabled
event_sent = event_man
.generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
event_rx.try_recv().expect("No info event received");
}
#[test]
fn test_reenable_event() {
let mut event_man = create_basic_man_1();
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
res = event_man.enable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
let event_sent = event_man
.generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
assert!(event_sent);
event_rx.try_recv().expect("No info event received");
}
#[test]
fn test_event_with_generic_string_param() {
let event_man = create_basic_man_1();
let mut small_data_buf = [0; 128];
let param_data = "hello world";
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let res = event_man.generate_pus_event_tm_generic_with_generic_params(
&event_tx,
&EMPTY_STAMP,
INFO_EVENT.into(),
&mut small_data_buf,
Some(&param_data.to_string().into()),
);
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let tm = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service_type_id(), 5);
assert_eq!(
tm.message_subtype_id(),
MessageSubtypeId::TmInfoReport as u8
);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
assert_eq!(u32_event, INFO_EVENT.raw());
let string_data = String::from_utf8_lossy(&tm.user_data()[4..]);
assert_eq!(string_data, param_data);
}
#[test]
fn test_event_with_generic_vec_param() {
let event_man = create_basic_man_1();
let mut small_data_buf = [0; 128];
let param_data = vec![1, 2, 3, 4];
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
let res = event_man.generate_pus_event_tm_generic_with_generic_params(
&event_tx,
&EMPTY_STAMP,
INFO_EVENT.into(),
&mut small_data_buf,
Some(&param_data.clone().into()),
);
assert!(res.is_ok());
let res = res.unwrap();
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let tm = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service_type_id(), 5);
assert_eq!(
tm.message_subtype_id(),
MessageSubtypeId::TmInfoReport as u8
);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
assert_eq!(u32_event, INFO_EVENT.raw());
let vec_data = tm.user_data()[4..].to_vec();
assert_eq!(vec_data, param_data);
}
#[test]
fn test_event_with_generic_store_param_not_propagated() {
// TODO: Test this.
}
#[test]
fn test_event_with_generic_heapless_param() {
// TODO: Test this.
}
}
-355
View File
@@ -1,355 +0,0 @@
use crate::events_legacy::EventU32;
use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::TcStateToken;
use crate::pus::{DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError};
use crate::queue::GenericSendError;
use spacepackets::ecss::PusPacket;
use spacepackets::ecss::event::MessageSubtypeId;
use std::sync::mpsc::Sender;
use super::verification::VerificationReportingProvider;
use super::{
CacheAndReadRawEcssTc, EcssTcReceiver, EcssTmSender, GenericConversionError,
GenericRoutingError, HandlingStatus, PusServiceHelper,
};
pub struct PusEventServiceHandler<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> {
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
event_request_tx: Sender<EventRequestWithToken>,
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
event_request_tx: Sender<EventRequestWithToken>,
) -> Self {
Self {
service_helper,
event_request_tx,
}
}
pub fn poll_and_handle_next_tc<ErrorCb: FnMut(&PartialPusHandlingError)>(
&mut self,
mut error_callback: ErrorCb,
time_stamp: &[u8],
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(HandlingStatus::Empty.into());
}
let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper
.tc_in_mem_converter_mut()
.cache(&ecss_tc_and_token.tc_in_memory)?;
let tc = self.service_helper.tc_in_mem_converter().convert()?;
let subservice = tc.message_subtype_id();
let srv = MessageSubtypeId::try_from(subservice);
if srv.is_err() {
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
tc.message_subtype_id(),
ecss_tc_and_token.token,
));
}
let mut handle_enable_disable_request =
|enable: bool| -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
if tc.user_data().len() < 4 {
return Err(GenericConversionError::NotEnoughAppData {
expected: 4,
found: tc.user_data().len(),
}
.into());
}
let user_data = tc.user_data();
let event_u32 =
EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
let mut token: TcStateToken = ecss_tc_and_token.token.into();
match self.service_helper.common.verif_reporter.start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
) {
Ok(start_token) => {
token = start_token.into();
}
Err(e) => {
error_callback(&PartialPusHandlingError::Verification(e));
}
}
let event_req_with_token = if enable {
EventRequestWithToken {
request: EventRequest::Enable(event_u32),
token,
}
} else {
EventRequestWithToken {
request: EventRequest::Disable(event_u32),
token,
}
};
self.event_request_tx
.send(event_req_with_token)
.map_err(|_| {
PusPacketHandlingError::RequestRouting(GenericRoutingError::Send(
GenericSendError::RxDisconnected,
))
})?;
Ok(HandlingStatus::HandledOne.into())
};
match srv.unwrap() {
MessageSubtypeId::TmInfoReport
| MessageSubtypeId::TmLowSeverityReport
| MessageSubtypeId::TmMediumSeverityReport
| MessageSubtypeId::TmHighSeverityReport => {
return Err(PusPacketHandlingError::RequestConversion(
GenericConversionError::WrongService(tc.message_subtype_id()),
));
}
MessageSubtypeId::TcEnableEventGeneration => {
handle_enable_disable_request(true)?;
}
MessageSubtypeId::TcDisableEventGeneration => {
handle_enable_disable_request(false)?;
}
MessageSubtypeId::TcReportDisabledList | MessageSubtypeId::TmDisabledEventsReport => {
return Ok(DirectPusPacketHandlerResult::SubserviceNotImplemented(
subservice,
ecss_tc_and_token.token,
));
}
}
Ok(HandlingStatus::HandledOne.into())
}
}
#[cfg(test)]
mod tests {
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use delegate::delegate;
use spacepackets::ecss::event::MessageSubtypeId;
use spacepackets::ecss::{CreatorConfig, MessageTypeId};
use spacepackets::time::{TimeWriter, cds};
use spacepackets::util::UnsignedEnum;
use spacepackets::{
SpHeader,
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
},
};
use std::sync::mpsc::{self, Sender};
use crate::pus::event_man::EventRequest;
use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID};
use crate::pus::verification::{
RequestId, VerificationReporter, VerificationReportingProvider,
};
use crate::pus::{GenericConversionError, HandlingStatus, MpscTcReceiver};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::{
events_legacy::EventU32,
pus::{
DirectPusPacketHandlerResult, EcssTcInSharedPoolCacher, PusPacketHandlingError,
event_man::EventRequestWithToken,
tests::PusServiceHandlerWithSharedStoreCommon,
verification::{TcStateAccepted, VerificationToken},
},
};
use super::PusEventServiceHandler;
const TEST_EVENT_0: EventU32 = EventU32::new(crate::events_legacy::Severity::Info, 5, 25);
struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusEventServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolCacher,
VerificationReporter,
>,
}
impl Pus5HandlerWithStoreTester {
pub fn new(event_request_tx: Sender<EventRequestWithToken>) -> Self {
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(0);
Self {
common,
handler: PusEventServiceHandler::new(srv_handler, event_request_tx),
}
}
}
impl PusTestHarness for Pus5HandlerWithStoreTester {
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
.acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7])
.expect("acceptance success failure")
}
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator) {
self.common
.send_tc(self.handler.service_helper.id(), token, tc);
}
delegate! {
to self.common {
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
}
}
}
impl SimplePusPacketHandler for Pus5HandlerWithStoreTester {
fn handle_one_tc(
&mut self,
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
self.handler.poll_and_handle_next_tc(|_| {}, &time_stamp)
}
}
fn event_test(
test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler),
subservice: MessageSubtypeId,
expected_event_req: EventRequest,
event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
) {
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(5, subservice as u8));
let mut app_data = [0; 4];
TEST_EVENT_0
.write_to_be_bytes(&mut app_data)
.expect("writing test event failed");
let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let request_id = token.request_id();
test_harness.handle_one_tc().unwrap();
test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id);
// Completion TM is not generated for us.
assert!(test_harness.check_no_tm_available());
let event_request = event_req_receiver
.try_recv()
.expect("no event request received");
assert_eq!(expected_event_req, event_request.request);
}
#[test]
fn test_enabling_event_reporting() {
let (event_request_tx, event_request_rx) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
event_test(
&mut test_harness,
MessageSubtypeId::TcEnableEventGeneration,
EventRequest::Enable(TEST_EVENT_0),
event_request_rx,
);
}
#[test]
fn test_disabling_event_reporting() {
let (event_request_tx, event_request_rx) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
event_test(
&mut test_harness,
MessageSubtypeId::TcDisableEventGeneration,
EventRequest::Disable(TEST_EVENT_0),
event_request_rx,
);
}
#[test]
fn test_empty_tc_queue() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
assert!(
matches!(
result,
DirectPusPacketHandlerResult::Handled(HandlingStatus::Empty)
),
"unexpected result type {result:?}"
)
}
#[test]
fn test_sending_custom_subservice() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(5, 200));
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
let result = result.unwrap();
if let DirectPusPacketHandlerResult::CustomSubservice(subservice, _) = result {
assert_eq!(subservice, 200);
} else {
panic!("unexpected result type {result:?}")
}
}
#[test]
fn test_sending_invalid_app_data() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(
5,
MessageSubtypeId::TcEnableEventGeneration as u8,
));
let ping_tc =
PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());
let result = result.unwrap_err();
if let PusPacketHandlingError::RequestConversion(
GenericConversionError::NotEnoughAppData { expected, found },
) = result
{
assert_eq!(expected, 4);
assert_eq!(found, 3);
} else {
panic!("unexpected result type {result:?}")
}
}
}
-4
View File
@@ -25,10 +25,6 @@ use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::{ByteConversionError, SpHeader};
pub mod action;
pub mod event;
pub mod event_man;
#[cfg(feature = "std")]
pub mod event_srv;
pub mod mode;
pub mod scheduler;
#[cfg(feature = "std")]
+86 -123
View File
@@ -3,7 +3,7 @@
//! The core data structure of this module is the [PusScheduler]. This structure can be used
//! to perform the scheduling of telecommands like specified in the ECSS standard.
use arbitrary_int::{u11, u14};
use core::fmt::{Debug, Display, Formatter};
use core::fmt::Debug;
use core::time::Duration;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@@ -12,8 +12,6 @@ use spacepackets::ecss::tc::{GenericPusTcSecondaryHeader, IsPusTelecommand, PusT
use spacepackets::ecss::{PusError, PusPacket, WritablePusPacket};
use spacepackets::time::{CcsdsTimeProvider, TimeReader, TimeWriter, TimestampError, UnixTime};
use spacepackets::{ByteConversionError, CcsdsPacket};
#[cfg(feature = "std")]
use std::error::Error;
use crate::pool::{PoolError, PoolProvider};
#[cfg(feature = "alloc")]
@@ -144,107 +142,39 @@ impl<TimeProvider: CcsdsTimeProvider + Clone> TimeWindow<TimeProvider> {
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ScheduleError {
PusError(PusError),
#[error("pus error: {0}")]
PusError(#[from] PusError),
/// The release time is within the time-margin added on top of the current time.
/// The first parameter is the current time, the second one the time margin, and the third one
/// the release time.
#[error("release time in margin")]
ReleaseTimeInTimeMargin {
current_time: UnixTime,
time_margin: Duration,
release_time: UnixTime,
},
/// Nested time-tagged commands are not allowed.
#[error("nested scheduled tc")]
NestedScheduledTc,
StoreError(PoolError),
#[error("store error")]
Pool(#[from] PoolError),
#[error("tc data empty")]
TcDataEmpty,
TimestampError(TimestampError),
#[error("timestamp error: {0}")]
TimestampError(#[from] TimestampError),
#[error("wrong subservice number {0}")]
WrongSubservice(u8),
#[error("wrong service number {0}")]
WrongService(u8),
ByteConversionError(ByteConversionError),
}
impl Display for ScheduleError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
ScheduleError::PusError(e) => {
write!(f, "Pus Error: {e}")
}
ScheduleError::ReleaseTimeInTimeMargin {
current_time,
time_margin,
release_time,
} => {
write!(
f,
"time margin too short, current time: {current_time:?}, time margin: {time_margin:?}, release time: {release_time:?}"
)
}
ScheduleError::NestedScheduledTc => {
write!(f, "nested scheduling is not allowed")
}
ScheduleError::StoreError(e) => {
write!(f, "pus scheduling: {e}")
}
ScheduleError::TcDataEmpty => {
write!(f, "empty TC data field")
}
ScheduleError::TimestampError(e) => {
write!(f, "pus scheduling: {e}")
}
ScheduleError::WrongService(srv) => {
write!(f, "pus scheduling: wrong service number {srv}")
}
ScheduleError::WrongSubservice(subsrv) => {
write!(f, "pus scheduling: wrong subservice number {subsrv}")
}
ScheduleError::ByteConversionError(e) => {
write!(f, "pus scheduling: {e}")
}
}
}
}
impl From<PusError> for ScheduleError {
fn from(e: PusError) -> Self {
Self::PusError(e)
}
}
impl From<PoolError> for ScheduleError {
fn from(e: PoolError) -> Self {
Self::StoreError(e)
}
}
impl From<TimestampError> for ScheduleError {
fn from(e: TimestampError) -> Self {
Self::TimestampError(e)
}
}
impl From<ByteConversionError> for ScheduleError {
fn from(e: ByteConversionError) -> Self {
Self::ByteConversionError(e)
}
}
#[cfg(feature = "std")]
impl Error for ScheduleError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
ScheduleError::PusError(e) => Some(e),
ScheduleError::StoreError(e) => Some(e),
ScheduleError::TimestampError(e) => Some(e),
ScheduleError::ByteConversionError(e) => Some(e),
_ => None,
}
}
#[error("byte conversion error: {0}")]
ByteConversionError(#[from] ByteConversionError),
}
/// Generic trait for scheduler objects which are able to schedule ECSS PUS C packets.
pub trait PusSchedulerProvider {
pub trait PusScheduler {
type TimeProvider: CcsdsTimeProvider + TimeReader;
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), PoolError>;
@@ -405,7 +335,7 @@ pub mod alloc_mod {
///
/// Currently, sub-schedules and groups are not supported.
#[derive(Debug)]
pub struct PusScheduler {
pub struct PusSchedulerAlloc {
// TODO: Use MonotonicTime from tai-time crate instead of UnixTime and cache leap seconds.
// TODO: Introduce optional limit of commands stored in the TC map. If a limit is set,
// there will be a check for each insertion whether the map is full, making the memory
@@ -415,7 +345,8 @@ pub mod alloc_mod {
time_margin: Duration,
enabled: bool,
}
impl PusScheduler {
impl PusSchedulerAlloc {
/// Create a new PUS scheduler.
///
/// # Arguments
@@ -427,7 +358,7 @@ pub mod alloc_mod {
/// * `tc_buf_size` - Buffer for temporary storage of telecommand packets. This buffer
/// should be large enough to accomodate the largest expected TC packets.
pub fn new(init_current_time: UnixTime, time_margin: Duration) -> Self {
PusScheduler {
PusSchedulerAlloc {
tc_map: Default::default(),
current_time: init_current_time,
time_margin,
@@ -449,10 +380,12 @@ pub mod alloc_mod {
num_entries
}
#[inline]
pub fn update_time(&mut self, current_time: UnixTime) {
self.current_time = current_time;
}
#[inline]
pub fn current_time(&self) -> &UnixTime {
&self.current_time
}
@@ -798,7 +731,7 @@ pub mod alloc_mod {
}
}
impl PusSchedulerProvider for PusScheduler {
impl PusScheduler for PusSchedulerAlloc {
type TimeProvider = cds::CdsTime;
/// This will disable the scheduler and clear the schedule as specified in 6.11.4.4.
@@ -982,7 +915,8 @@ mod tests {
#[test]
fn test_enable_api() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
assert!(scheduler.is_enabled());
scheduler.disable();
assert!(!scheduler.is_enabled());
@@ -996,7 +930,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::new(0), &[]);
@@ -1038,7 +973,8 @@ mod tests {
#[test]
fn insert_multi_with_same_time() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
scheduler
.insert_unwrapped_and_stored_tc(
@@ -1097,7 +1033,8 @@ mod tests {
#[test]
fn test_time_update() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let time = UnixTime::new(1, 2_000_000);
scheduler.update_time(time);
assert_eq!(scheduler.current_time(), &time);
@@ -1151,7 +1088,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
@@ -1219,7 +1157,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
@@ -1279,7 +1218,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
scheduler.disable();
@@ -1344,7 +1284,8 @@ mod tests {
#[test]
fn insert_unwrapped_tc() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1394,7 +1335,8 @@ mod tests {
#[test]
fn insert_wrapped_tc() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1446,7 +1388,8 @@ mod tests {
#[test]
fn insert_wrong_service() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1471,7 +1414,8 @@ mod tests {
#[test]
fn insert_wrong_subservice() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1496,7 +1440,8 @@ mod tests {
#[test]
fn insert_wrapped_tc_faulty_app_data() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
false,
@@ -1513,7 +1458,8 @@ mod tests {
#[test]
fn insert_doubly_wrapped_time_tagged_cmd() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
false,
@@ -1531,7 +1477,7 @@ mod tests {
#[test]
fn test_ctor_from_current() {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
let scheduler = PusSchedulerAlloc::new_with_current_init_time(Duration::from_secs(5))
.expect("creation from current time failed");
let current_time = scheduler.current_time;
assert!(current_time.as_secs() > 0);
@@ -1539,7 +1485,8 @@ mod tests {
#[test]
fn test_update_from_current() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
assert_eq!(scheduler.current_time.as_secs(), 0);
scheduler
.update_time_from_now()
@@ -1549,7 +1496,8 @@ mod tests {
#[test]
fn release_time_within_time_margin() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1582,7 +1530,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
@@ -1619,7 +1568,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
@@ -1645,7 +1595,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
@@ -1666,7 +1617,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
@@ -1687,7 +1639,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
@@ -1729,7 +1682,8 @@ mod tests {
#[test]
fn insert_full_store_test() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(1, 64)],
@@ -1745,7 +1699,7 @@ mod tests {
assert!(insert_res.is_err());
let err = insert_res.unwrap_err();
match err {
ScheduleError::StoreError(e) => match e {
ScheduleError::Pool(e) => match e {
PoolError::StoreFull(_) => {}
_ => panic!("unexpected store error {e}"),
},
@@ -1755,7 +1709,7 @@ mod tests {
fn insert_command_with_release_time(
pool: &mut StaticMemoryPool,
scheduler: &mut PusScheduler,
scheduler: &mut PusSchedulerAlloc,
seq_count: u14,
release_secs: u64,
) -> TcInfo {
@@ -1774,7 +1728,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
assert_eq!(scheduler.num_scheduled_telecommands(), 2);
@@ -1806,7 +1761,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
@@ -1841,7 +1797,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
@@ -1876,7 +1833,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
@@ -1917,7 +1875,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
assert_eq!(scheduler.num_scheduled_telecommands(), 2);
@@ -1946,7 +1905,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_0_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
@@ -1973,7 +1933,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let cmd_0_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_1_to_delete =
@@ -2001,7 +1962,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let cmd_out_of_range_0 =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_0_to_delete =
@@ -2039,7 +2001,8 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
+7 -7
View File
@@ -1,4 +1,4 @@
use super::scheduler::PusSchedulerProvider;
use super::scheduler::PusScheduler;
use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{
CacheAndReadRawEcssTc, DirectPusPacketHandlerResult, EcssTcInSharedPoolCacher, EcssTcReceiver,
@@ -15,7 +15,7 @@ use std::sync::mpsc;
/// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service)
/// packets. This handler is able to handle the most important PUS requests for a scheduling
/// service which provides the [PusSchedulerProvider].
/// service which provides the [PusScheduler].
///
/// Please note that this class does not do the regular periodic handling like releasing any
/// telecommands inside the scheduler. The user can retrieve the wrapped scheduler via the
@@ -26,11 +26,11 @@ pub struct PusSchedServiceHandler<
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
PusScheduler: PusSchedulerProvider,
PusSchedulerInstance: PusScheduler,
> {
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
scheduler: PusScheduler,
scheduler: PusSchedulerInstance,
}
impl<
@@ -38,7 +38,7 @@ impl<
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
Scheduler: PusSchedulerProvider,
Scheduler: PusScheduler,
> PusSchedServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter, Scheduler>
{
pub fn new(
@@ -254,7 +254,7 @@ mod tests {
use crate::pus::{DirectPusPacketHandlerResult, MpscTcReceiver, PusPacketHandlingError};
use crate::pus::{
EcssTcInSharedPoolCacher,
scheduler::{self, PusSchedulerProvider, TcInfo},
scheduler::{self, PusScheduler, TcInfo},
tests::PusServiceHandlerWithSharedStoreCommon,
verification::{RequestId, TcStateAccepted, VerificationToken},
};
@@ -349,7 +349,7 @@ mod tests {
inserted_tcs: VecDeque<TcInfo>,
}
impl PusSchedulerProvider for TestScheduler {
impl PusScheduler for TestScheduler {
type TimeProvider = cds::CdsTime;
fn reset(
+2
View File
@@ -1,3 +1,4 @@
/*
use core::cell::Cell;
use num_enum::TryFromPrimitive;
use satrs::dev_mgmt::{
@@ -1608,3 +1609,4 @@ fn command_safe_mode() {
expected_req_id_not_ctrl,
);
}
*/
-164
View File
@@ -1,164 +0,0 @@
use arbitrary_int::{u11, u21};
use satrs::event_man_legacy::{
EventManagerWithMpsc, EventMessage, EventMessageU32, EventRoutingError, EventSendProvider,
EventU32SenderMpsc,
};
use satrs::events_legacy::{EventU32, EventU32TypedSev, Severity, SeverityInfo};
use satrs::params::U32Pair;
use satrs::params::{Params, ParamsHeapless, WritableToBeBytes};
use satrs::pus::event_man::{DefaultPusEventReportingMap, EventReporter, PusEventTmCreatorWithMap};
use satrs::request::UniqueApidTargetId;
use satrs::tmtc::PacketAsVec;
use spacepackets::ecss::PusError;
use spacepackets::ecss::tm::PusTmReader;
use std::sync::mpsc::{self, SendError, TryRecvError};
use std::thread;
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u11 = u11::new(0x02);
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, u21::new(0x05));
#[derive(Debug, Clone)]
pub enum CustomTmSenderError {
SendError(SendError<Vec<u8>>),
PusError(PusError),
}
#[test]
fn test_threaded_usage() {
let (event_tx, event_rx) = mpsc::sync_channel(100);
let mut event_man = EventManagerWithMpsc::new(event_rx);
let (pus_event_man_tx, pus_event_man_rx) = mpsc::channel();
let pus_event_man_send_provider = EventU32SenderMpsc::new(1, pus_event_man_tx);
event_man.subscribe_all(pus_event_man_send_provider.target_id());
event_man.add_sender(pus_event_man_send_provider);
let (event_packet_tx, event_packet_rx) = mpsc::channel::<PacketAsVec>();
let reporter = EventReporter::new(TEST_ID.raw(), u11::new(0x02), 0, 128);
let pus_event_man =
PusEventTmCreatorWithMap::new(reporter, DefaultPusEventReportingMap::default());
let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| {
panic!("received routing error for event {event_msg:?}: {error:?}");
};
// PUS + Generic event manager thread
let jh0 = thread::spawn(move || {
let mut event_cnt = 0;
let mut params_array: [u8; 128] = [0; 128];
loop {
event_man.try_event_handling(error_handler);
match pus_event_man_rx.try_recv() {
Ok(event_msg) => {
let gen_event = |aux_data| {
pus_event_man.generate_pus_event_tm_generic(
&event_packet_tx,
&EMPTY_STAMP,
event_msg.event(),
aux_data,
)
};
let res = if let Some(aux_data) = event_msg.params() {
match aux_data {
Params::Heapless(heapless) => match heapless {
ParamsHeapless::Raw(raw) => {
raw.write_to_be_bytes(&mut params_array)
.expect("Writing raw parameter failed");
gen_event(Some(&params_array[0..raw.written_len()]))
}
ParamsHeapless::EcssEnum(e) => {
e.write_to_be_bytes(&mut params_array)
.expect("Writing ECSS enum failed");
gen_event(Some(&params_array[0..e.written_len()]))
}
},
Params::Vec(vec) => gen_event(Some(vec.as_slice())),
Params::String(str) => gen_event(Some(str.as_bytes())),
Params::Store(_) => gen_event(None),
_ => panic!("unsupported parameter type"),
}
} else {
gen_event(None)
};
event_cnt += 1;
assert!(res.is_ok());
assert!(res.unwrap());
if event_cnt == 2 {
break;
}
}
Err(e) => {
if let TryRecvError::Disconnected = e {
panic!("Event receiver disconnected!")
}
}
}
}
});
// Event sender and TM checker thread
let jh1 = thread::spawn(move || {
event_tx
.send(EventMessage::new(TEST_ID.id(), INFO_EVENT.into()))
.expect("Sending info event failed");
loop {
match event_packet_rx.try_recv() {
// Event TM received successfully
Ok(event_tm) => {
let tm = PusTmReader::new(event_tm.packet.as_slice(), 7)
.expect("Deserializing TM failed");
assert_eq!(tm.service_type_id(), 5);
assert_eq!(tm.message_subtype_id(), 1);
let src_data = tm.source_data();
assert!(!src_data.is_empty());
assert_eq!(src_data.len(), 4);
let event =
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
assert_eq!(event, INFO_EVENT);
break;
}
Err(e) => {
if let TryRecvError::Disconnected = e {
panic!("Event sender disconnected!")
}
}
}
}
event_tx
.send(EventMessage::new_with_params(
TEST_ID.id(),
LOW_SEV_EVENT,
&Params::Heapless((2_u32, 3_u32).into()),
))
.expect("Sending low severity event failed");
loop {
match event_packet_rx.try_recv() {
// Event TM received successfully
Ok(event_tm) => {
let tm = PusTmReader::new(event_tm.packet.as_slice(), 7)
.expect("Deserializing TM failed");
assert_eq!(tm.service_type_id(), 5);
assert_eq!(tm.message_subtype_id(), 2);
let src_data = tm.source_data();
assert!(!src_data.is_empty());
assert_eq!(src_data.len(), 12);
let event =
EventU32::from(u32::from_be_bytes(src_data[0..4].try_into().unwrap()));
assert_eq!(event, LOW_SEV_EVENT);
let u32_pair: U32Pair =
src_data[4..].try_into().expect("Creating U32Pair failed");
assert_eq!(u32_pair.0, 2);
assert_eq!(u32_pair.1, 3);
break;
}
Err(e) => {
if let TryRecvError::Disconnected = e {
panic!("Event sender disconnected!")
}
}
}
}
});
jh0.join().expect("Joining manager thread failed");
jh1.join().expect("Joining creator thread failed");
}