Compare commits
4 Commits
satrs-v0.2
...
00bcc455a4
Author | SHA1 | Date | |
---|---|---|---|
00bcc455a4
|
|||
16ab198d12 | |||
fd950c5a94
|
|||
b45a219c6d
|
64
.github/workflows/ci.yml
vendored
64
.github/workflows/ci.yml
vendored
@ -1,64 +0,0 @@
|
||||
name: ci
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check:
|
||||
name: Check build
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- run: cargo check --release
|
||||
|
||||
test:
|
||||
name: Run Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Install nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
- run: cargo nextest run --all-features
|
||||
- run: cargo test --doc
|
||||
|
||||
cross-check:
|
||||
name: Check Cross-Compilation
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
target:
|
||||
- armv7-unknown-linux-gnueabihf
|
||||
- thumbv7em-none-eabihf
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
targets: "armv7-unknown-linux-gnueabihf, thumbv7em-none-eabihf"
|
||||
- run: cargo check -p satrs --release --target=${{matrix.target}} --no-default-features
|
||||
|
||||
fmt:
|
||||
name: Check formatting
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- run: cargo fmt --all -- --check
|
||||
|
||||
docs:
|
||||
name: Check Documentation Build
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
- run: cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]'
|
||||
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- run: cargo clippy -- -D warnings
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -1,8 +1,6 @@
|
||||
target/
|
||||
|
||||
output.log
|
||||
/Cargo.lock
|
||||
output.log
|
||||
|
||||
output.log
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
<p align="center"> <img src="misc/satrs-logo-v2.png" width="40%"> </p>
|
||||
<p align="center"> <img src="misc/satrs-logo.png" width="40%"> </p>
|
||||
|
||||
[](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
|
||||
[](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/)
|
||||
@ -40,7 +40,7 @@ This project currently contains following crates:
|
||||
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib):
|
||||
Components to build a mission information base from the on-board software directly.
|
||||
* [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example-stm32f3-disco):
|
||||
Example of a simple example using low-level sat-rs components on a bare-metal system
|
||||
Example of a simple example on-board software using sat-rs components on a bare-metal system
|
||||
with constrained resources.
|
||||
|
||||
Each project has its own `CHANGELOG.md`.
|
||||
|
1
automation/Jenkinsfile
vendored
1
automation/Jenkinsfile
vendored
@ -33,7 +33,6 @@ pipeline {
|
||||
stage('Test') {
|
||||
steps {
|
||||
sh 'cargo nextest r --all-features'
|
||||
sh 'cargo test --doc'
|
||||
}
|
||||
}
|
||||
stage('Check with all features') {
|
||||
|
@ -47,7 +47,7 @@ def main():
|
||||
parser.add_argument(
|
||||
"-p",
|
||||
"--package",
|
||||
choices=["satrs", "satrs-minisim", "satrs-example"],
|
||||
choices=["satrs", "satrs-minisim"],
|
||||
default="satrs",
|
||||
help="Choose project to generate coverage for",
|
||||
)
|
||||
|
@ -166,7 +166,7 @@ Subsystem<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:
|
||||
<y:Geometry height="30.0" width="125.0" x="1151.9280499999995" y="281.84403125000006"/>
|
||||
<y:Fill color="#CCFFFF" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="14" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="20.296875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="58.837890625" x="33.0810546875" xml:space="preserve" y="4.8515625">TM Sink<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="14" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="20.296875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="76.255859375" x="24.3720703125" xml:space="preserve" y="4.8515625">TM Funnel<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
@ -260,7 +260,7 @@ Mode Tree<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:
|
||||
<y:Geometry height="57.265600000000006" width="631.1152" x="810.8847999999999" y="411.39428125"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="261.8125" x="166.89412267941418" xml:space="preserve" y="3.144146301369915">satrs-minisim
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="261.8125" x="166.89412267941418" xml:space="preserve" y="3.144146301369915">satrs-satellite
|
||||
Simulator based on asynchronix<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="-0.028136269449041573" nodeRatioY="-0.08493150684931505" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
@ -272,7 +272,7 @@ Simulator based on asynchronix<y:LabelModel><y:SmartNodeLabelModel distance="4.0
|
||||
<y:Geometry height="50.0" width="631.1152000000002" x="810.8847999999998" y="476.2958625000002"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="374.8359375" x="110.3824039294143" xml:space="preserve" y="0.12842465753431043">pytmtc
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Dialog" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="41.25" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="374.8359375" x="110.3824039294143" xml:space="preserve" y="0.12842465753431043">satrs-tmtc
|
||||
Command-line interface based TMTC handling<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="-0.028136269449041573" nodeRatioY="-0.08493150684931505" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Before Width: | Height: | Size: 49 KiB |
@ -17,7 +17,7 @@ it is still centered around small packets. `sat-rs` provides support for these E
|
||||
standards and also attempts to fill the gap to the internet protocol by providing the following
|
||||
components.
|
||||
|
||||
1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/std/udp_server/index.html).
|
||||
1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/host/udp_server/index.html).
|
||||
UDP is already packet based which makes it an excellent fit for exchanging space packets.
|
||||
2. [TCP TMTC Server Components](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/index.html).
|
||||
TCP is a stream based protocol, so the library provides building blocks to parse telemetry
|
||||
@ -39,12 +39,8 @@ task might be to store all arriving telemetry persistently. This is especially i
|
||||
space systems which do not have permanent contact like low-earth-orbit (LEO) satellites.
|
||||
|
||||
The most important task of a TC source is to deliver the telecommands to the correct recipients.
|
||||
For component oriented software using message passing, this usually includes staged demultiplexing
|
||||
components to determine where a command needs to be sent.
|
||||
|
||||
Using a generic concept of a TC source and a TM sink as part of the software design simplifies
|
||||
the flexibility of the TMTC infrastructure: Newly added TM generators and TC receiver only have to
|
||||
forward their generated or received packets to those handler objects.
|
||||
For modern component oriented software using message passing, this usually includes staged
|
||||
demultiplexing components to determine where a command needs to be sent.
|
||||
|
||||
# Low-level protocols and the bridge to the communcation subsystem
|
||||
|
||||
|
@ -1,24 +1,16 @@
|
||||
# Events
|
||||
|
||||
Events are an important mechanism used for remote systems to monitor unexpected
|
||||
or expected anomalies and events occuring on these systems.
|
||||
|
||||
One common use case for events on remote systems is to offer a light-weight publish-subscribe
|
||||
mechanism and IPC mechanism for software and hardware events which are also packaged as telemetry
|
||||
(TM) or can trigger a system response. They can also be tied to
|
||||
Events can be an extremely important mechanism used for remote systems to monitor unexpected
|
||||
or expected anomalies and events occuring on these systems. They are oftentimes tied to
|
||||
Fault Detection, Isolation and Recovery (FDIR) operations, which need to happen autonomously.
|
||||
|
||||
The PUS Service 5 standardizes how the ground interface for events might look like, but does not
|
||||
specify how other software components might react to those events. There is the PUS Service 19,
|
||||
which might be used for that purpose, but the event components recommended by this framework do not
|
||||
rely on the present of this service.
|
||||
Events can also be used as a convenient Inter-Process Communication (IPC) mechansism, which is
|
||||
also observable for the Ground segment. The PUS Service 5 standardizes how the ground interface
|
||||
for events might look like, but does not specify how other software components might react
|
||||
to those events. There is the PUS Service 19, which might be used for that purpose, but the
|
||||
event components recommended by this framework do not really need this service.
|
||||
|
||||
The following images shows how the flow of events could look like in a system where components
|
||||
can generate events, and where other system components might be interested in those events:
|
||||
|
||||

|
||||
|
||||
For the concrete implementation of your own event management and/or event routing system, you
|
||||
can have a look at the event management documentation inside the
|
||||
[API documentation](https://docs.rs/satrs/latest/satrs/event_man/index.html) where you can also
|
||||
find references to all examples.
|
||||
|
@ -1,11 +1,11 @@
|
||||
# Modes
|
||||
|
||||
Modes are an extremely useful concept to model complex systems. They allow simplified
|
||||
system reasoning for both system operators and OBSW developers. They also provide a way to alter
|
||||
the behaviour of a component and also provide observability of a system. A few examples of how to
|
||||
model the mode of different components within a space system with modes will be given.
|
||||
Modes are an extremely useful concept for complex system in general. They also allow simplified
|
||||
system reasoning for both system operators and OBSW developers. They model the behaviour of a
|
||||
component and also provide observability of a system. A few examples of how to model
|
||||
different components of a space system with modes will be given.
|
||||
|
||||
## Pyhsical device component with modes
|
||||
## Modelling a pyhsical devices with modes
|
||||
|
||||
The following simple mode scheme with the following three mode
|
||||
|
||||
@ -13,8 +13,7 @@ The following simple mode scheme with the following three mode
|
||||
- `ON`
|
||||
- `NORMAL`
|
||||
|
||||
can be applied to a large number of simpler device controllers of a remote system, for example
|
||||
sensors.
|
||||
can be applied to a large number of simpler devices of a remote system, for example sensors.
|
||||
|
||||
1. `OFF` means that a device is physically switched off, and the corresponding software component
|
||||
does not poll the device regularly.
|
||||
@ -32,7 +31,7 @@ for the majority of devices:
|
||||
2. `NORMAL` or `ON` to `OFF`: Any important shutdown configuration or handling must be performed
|
||||
before powering off the device.
|
||||
|
||||
## Controller components with modes
|
||||
## Modelling a controller with modes
|
||||
|
||||
Controller components are not modelling physical devices, but a mode scheme is still the best
|
||||
way to model most of these components.
|
||||
|
60
satrs-example-stm32f3-disco/Cargo.lock
generated
60
satrs-example-stm32f3-disco/Cargo.lock
generated
@ -22,9 +22,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "autocfg"
|
||||
version = "1.2.0"
|
||||
version = "1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80"
|
||||
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
|
||||
|
||||
[[package]]
|
||||
name = "bare-metal"
|
||||
@ -88,13 +88,19 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.37"
|
||||
version = "0.4.35"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e"
|
||||
checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a"
|
||||
dependencies = [
|
||||
"num-traits",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "cobs"
|
||||
version = "0.2.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15"
|
||||
|
||||
[[package]]
|
||||
name = "cobs"
|
||||
version = "0.2.3"
|
||||
@ -183,7 +189,7 @@ dependencies = [
|
||||
"ident_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -194,7 +200,7 @@ checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
|
||||
dependencies = [
|
||||
"darling_core",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -227,7 +233,7 @@ dependencies = [
|
||||
"proc-macro-error",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -259,7 +265,7 @@ checksum = "984bc6eca246389726ac2826acc2488ca0fe5fcd6b8d9b48797021951d76a125"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -325,7 +331,7 @@ dependencies = [
|
||||
"darling",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -553,7 +559,7 @@ checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -574,9 +580,9 @@ checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
|
||||
|
||||
[[package]]
|
||||
name = "pin-project-lite"
|
||||
version = "0.2.14"
|
||||
version = "0.2.13"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02"
|
||||
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
|
||||
|
||||
[[package]]
|
||||
name = "pin-utils"
|
||||
@ -628,9 +634,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rtcc"
|
||||
version = "0.3.2"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "95973c3a0274adc4f3c5b70d2b5b85618d6de9559a6737d3293ecae9a2fc0839"
|
||||
checksum = "f4fbd0d5bed2b76e27a7ef872568b34072c1af94c277cd52c17a89d54673b3fe"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
]
|
||||
@ -674,7 +680,7 @@ dependencies = [
|
||||
"proc-macro-error",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -723,8 +729,10 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "satrs"
|
||||
version = "0.2.0-rc.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c8cb19cba46a45047ff0879ebfbf9d6ae1c5b2e0e38b2e08760b10a441d4dae6"
|
||||
dependencies = [
|
||||
"cobs",
|
||||
"cobs 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
|
||||
"crc",
|
||||
"delegate",
|
||||
"num-traits",
|
||||
@ -739,7 +747,7 @@ dependencies = [
|
||||
name = "satrs-example-stm32f3-disco"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"cobs",
|
||||
"cobs 0.2.3 (git+https://github.com/robamu/cobs.rs.git?branch=all_features)",
|
||||
"cortex-m",
|
||||
"cortex-m-rt",
|
||||
"cortex-m-semihosting",
|
||||
@ -759,7 +767,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "satrs-shared"
|
||||
version = "0.1.3"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75a402ba556a7f5eef707035b45e64a3259b09674311e98697f3dd0508a1bf51"
|
||||
dependencies = [
|
||||
"spacepackets",
|
||||
]
|
||||
@ -799,12 +809,12 @@ checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
|
||||
|
||||
[[package]]
|
||||
name = "spacepackets"
|
||||
version = "0.11.0-rc.2"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2cfd5f9a4c7f10714d21f9bc61f2d176cb7ae092cdd687e7ade2d4e6f7d7125"
|
||||
checksum = "28246ae2451af240c3e3ff3c51363c7b6ad565ca6aa9bad23b8c725687c485e1"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"crc",
|
||||
"defmt",
|
||||
"delegate",
|
||||
"num-traits",
|
||||
"num_enum",
|
||||
@ -899,9 +909,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "syn"
|
||||
version = "2.0.58"
|
||||
version = "2.0.53"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687"
|
||||
checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@ -925,7 +935,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@ -991,5 +1001,5 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"syn 2.0.58",
|
||||
"syn 2.0.53",
|
||||
]
|
||||
|
@ -46,9 +46,8 @@ branch = "complete-dma-update-hal"
|
||||
# path = "../stm32f3-discovery"
|
||||
|
||||
[dependencies.satrs]
|
||||
path = "../satrs"
|
||||
version = "0.2.0-rc.0"
|
||||
default-features = false
|
||||
features = ["defmt"]
|
||||
|
||||
[dev-dependencies]
|
||||
defmt-test = "0.3"
|
||||
|
@ -103,12 +103,3 @@ After that, you can for example send a ping to the MCU using the following comma
|
||||
```sh
|
||||
./main.py -p /ping
|
||||
```
|
||||
|
||||
You can configure the blinky frequency using
|
||||
|
||||
```sh
|
||||
./main.py -p /change_blink_freq
|
||||
```
|
||||
|
||||
All these commands will package a PUS telecommand which will be sent to the MCU using the COBS
|
||||
format as the packet framing format.
|
||||
|
@ -94,7 +94,6 @@ class SatRsConfigHook(HookBase):
|
||||
def create_cmd_definition_tree() -> CmdTreeNode:
|
||||
root_node = CmdTreeNode.root_node()
|
||||
root_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
|
||||
root_node.add_child(CmdTreeNode("change_blink_freq", "Change blink frequency"))
|
||||
return root_node
|
||||
|
||||
|
||||
@ -216,25 +215,6 @@ class TcHandler(TcHandlerBase):
|
||||
if cmd_path == "/ping":
|
||||
q.add_log_cmd("Sending PUS ping telecommand")
|
||||
q.add_pus_tc(PusTelecommand(service=17, subservice=1))
|
||||
if cmd_path == "/change_blink_freq":
|
||||
self.create_change_blink_freq_command(q)
|
||||
|
||||
def create_change_blink_freq_command(self, q: DefaultPusQueueHelper):
|
||||
q.add_log_cmd("Changing blink frequency")
|
||||
while True:
|
||||
blink_freq = int(
|
||||
input(
|
||||
"Please specify new blink frequency in ms. Valid Range [2..10000]: "
|
||||
)
|
||||
)
|
||||
if blink_freq < 2 or blink_freq > 10000:
|
||||
print(
|
||||
"Invalid blink frequency. Please specify a value between 2 and 10000."
|
||||
)
|
||||
continue
|
||||
break
|
||||
app_data = struct.pack("!I", blink_freq)
|
||||
q.add_pus_tc(PusTelecommand(service=8, subservice=1, app_data=app_data))
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -1,13 +1,5 @@
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
use satrs::pus::verification::{
|
||||
FailParams, TcStateAccepted, VerificationReportCreator, VerificationToken,
|
||||
};
|
||||
use satrs::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
|
||||
use satrs::spacepackets::ecss::EcssEnumU16;
|
||||
use satrs::spacepackets::CcsdsPacket;
|
||||
use satrs::spacepackets::{ByteConversionError, SpHeader};
|
||||
// global logger + panicking-behavior + memory layout
|
||||
use satrs_example_stm32f3_disco as _;
|
||||
|
||||
@ -15,17 +7,21 @@ use rtic::app;
|
||||
|
||||
use heapless::{mpmc::Q8, Vec};
|
||||
#[allow(unused_imports)]
|
||||
use rtic_monotonics::systick::fugit::{MillisDurationU32, TimerInstantU32};
|
||||
use rtic_monotonics::systick::fugit::TimerInstantU32;
|
||||
use rtic_monotonics::systick::ExtU32;
|
||||
use satrs::seq_count::SequenceCountProviderCore;
|
||||
use satrs::spacepackets::{ecss::PusPacket, ecss::WritablePusPacket};
|
||||
use satrs::{
|
||||
pool::StoreError,
|
||||
pus::{EcssChannel, EcssTmSenderCore, EcssTmtcError, PusTmWrapper},
|
||||
spacepackets::{ecss::PusPacket, ecss::WritablePusPacket},
|
||||
};
|
||||
use stm32f3xx_hal::dma::dma1;
|
||||
use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3};
|
||||
use stm32f3xx_hal::pac::USART2;
|
||||
use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent};
|
||||
|
||||
const UART_BAUD: u32 = 115200;
|
||||
const DEFAULT_BLINK_FREQ_MS: u32 = 1000;
|
||||
const BLINK_FREQ_MS: u32 = 1000;
|
||||
const TX_HANDLER_FREQ_MS: u32 = 20;
|
||||
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u32 = 5;
|
||||
const MAX_TC_LEN: usize = 128;
|
||||
@ -58,6 +54,7 @@ type TcPacket = Vec<u8, MAX_TC_LEN>;
|
||||
|
||||
static TM_REQUESTS: Q8<TmPacket> = Q8::new();
|
||||
|
||||
use core::cell::RefCell;
|
||||
use core::sync::atomic::{AtomicU16, Ordering};
|
||||
|
||||
pub struct SeqCountProviderAtomicRef {
|
||||
@ -96,45 +93,56 @@ pub struct TxIdle {
|
||||
dma_channel: dma1::C7,
|
||||
}
|
||||
|
||||
#[derive(Debug, defmt::Format)]
|
||||
pub enum TmSendError {
|
||||
ByteConversion(ByteConversionError),
|
||||
Queue,
|
||||
pub struct TmSender {
|
||||
vec: Option<RefCell<Vec<u8, MAX_TM_LEN>>>,
|
||||
}
|
||||
|
||||
impl From<ByteConversionError> for TmSendError {
|
||||
fn from(value: ByteConversionError) -> Self {
|
||||
Self::ByteConversion(value)
|
||||
}
|
||||
}
|
||||
|
||||
fn send_tm(tm_creator: PusTmCreator) -> Result<(), TmSendError> {
|
||||
if tm_creator.len_written() > MAX_TM_LEN {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
expected: tm_creator.len_written(),
|
||||
found: MAX_TM_LEN,
|
||||
impl TmSender {
|
||||
pub fn new(tm_packet: TmPacket) -> Self {
|
||||
Self {
|
||||
vec: Some(RefCell::new(tm_packet)),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
let mut tm_vec = TmPacket::new();
|
||||
tm_vec
|
||||
.resize(tm_creator.len_written(), 0)
|
||||
.expect("vec resize failed");
|
||||
tm_creator.write_to_bytes(tm_vec.as_mut_slice())?;
|
||||
defmt::info!(
|
||||
"Sending TM[{},{}] with size {}",
|
||||
tm_creator.service(),
|
||||
tm_creator.subservice(),
|
||||
tm_creator.len_written()
|
||||
);
|
||||
TM_REQUESTS
|
||||
.enqueue(tm_vec)
|
||||
.map_err(|_| TmSendError::Queue)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_tm_send_error(error: TmSendError) {
|
||||
defmt::warn!("sending tm failed with error {}", error);
|
||||
impl EcssChannel for TmSender {
|
||||
fn id(&self) -> satrs::ChannelId {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
impl EcssTmSenderCore for TmSender {
|
||||
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
|
||||
let vec = self.vec.as_ref();
|
||||
if vec.is_none() {
|
||||
panic!("send_tm should only be called once");
|
||||
}
|
||||
let vec_ref = vec.unwrap();
|
||||
let mut vec = vec_ref.borrow_mut();
|
||||
match tm {
|
||||
PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
|
||||
PusTmWrapper::Direct(tm) => {
|
||||
if tm.len_written() > MAX_TM_LEN {
|
||||
return Err(EcssTmtcError::Store(StoreError::DataTooLarge(
|
||||
tm.len_written(),
|
||||
)));
|
||||
}
|
||||
vec.resize(tm.len_written(), 0).expect("vec resize failed");
|
||||
tm.write_to_bytes(vec.as_mut_slice())?;
|
||||
defmt::info!(
|
||||
"Sending TM[{},{}] with size {}",
|
||||
tm.service(),
|
||||
tm.subservice(),
|
||||
tm.len_written()
|
||||
);
|
||||
drop(vec);
|
||||
TM_REQUESTS
|
||||
.enqueue(vec_ref.take())
|
||||
.map_err(|_| EcssTmtcError::Store(StoreError::StoreFull(0)))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub enum UartTxState {
|
||||
@ -149,106 +157,18 @@ pub struct UartTxShared {
|
||||
state: UartTxState,
|
||||
}
|
||||
|
||||
pub struct RequestWithToken {
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
request: Request,
|
||||
}
|
||||
|
||||
#[derive(Debug, defmt::Format)]
|
||||
pub enum Request {
|
||||
Ping,
|
||||
ChangeBlinkFrequency(u32),
|
||||
}
|
||||
|
||||
#[derive(Debug, defmt::Format)]
|
||||
pub enum RequestError {
|
||||
InvalidApid = 1,
|
||||
InvalidService = 2,
|
||||
InvalidSubservice = 3,
|
||||
NotEnoughAppData = 4,
|
||||
}
|
||||
|
||||
pub fn convert_pus_tc_to_request(
|
||||
tc: &PusTcReader,
|
||||
verif_reporter: &mut VerificationReportCreator,
|
||||
src_data_buf: &mut [u8],
|
||||
timestamp: &[u8],
|
||||
) -> Result<RequestWithToken, RequestError> {
|
||||
defmt::info!(
|
||||
"Found PUS TC [{},{}] with length {}",
|
||||
tc.service(),
|
||||
tc.subservice(),
|
||||
tc.len_packed()
|
||||
);
|
||||
|
||||
let token = verif_reporter.add_tc(tc);
|
||||
if tc.apid() != PUS_APID {
|
||||
defmt::warn!("Received tc with unknown APID {}", tc.apid());
|
||||
let result = send_tm(
|
||||
verif_reporter
|
||||
.acceptance_failure(
|
||||
src_data_buf,
|
||||
token,
|
||||
SEQ_COUNT_PROVIDER.get_and_increment(),
|
||||
0,
|
||||
FailParams::new(timestamp, &EcssEnumU16::new(0), &[]),
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
if let Err(e) = result {
|
||||
handle_tm_send_error(e);
|
||||
}
|
||||
return Err(RequestError::InvalidApid);
|
||||
}
|
||||
let (tm_creator, accepted_token) = verif_reporter
|
||||
.acceptance_success(
|
||||
src_data_buf,
|
||||
token,
|
||||
SEQ_COUNT_PROVIDER.get_and_increment(),
|
||||
0,
|
||||
timestamp,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
if let Err(e) = send_tm(tm_creator) {
|
||||
handle_tm_send_error(e);
|
||||
}
|
||||
|
||||
if tc.service() == 17 && tc.subservice() == 1 {
|
||||
if tc.subservice() == 1 {
|
||||
return Ok(RequestWithToken {
|
||||
request: Request::Ping,
|
||||
token: accepted_token,
|
||||
});
|
||||
} else {
|
||||
return Err(RequestError::InvalidSubservice);
|
||||
}
|
||||
} else if tc.service() == 8 {
|
||||
if tc.subservice() == 1 {
|
||||
if tc.user_data().len() < 4 {
|
||||
return Err(RequestError::NotEnoughAppData);
|
||||
}
|
||||
let new_freq_ms = u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap());
|
||||
return Ok(RequestWithToken {
|
||||
request: Request::ChangeBlinkFrequency(new_freq_ms),
|
||||
token: accepted_token,
|
||||
});
|
||||
} else {
|
||||
return Err(RequestError::InvalidSubservice);
|
||||
}
|
||||
} else {
|
||||
return Err(RequestError::InvalidService);
|
||||
}
|
||||
}
|
||||
|
||||
#[app(device = stm32f3xx_hal::pac, peripherals = true)]
|
||||
mod app {
|
||||
use super::*;
|
||||
use core::slice::Iter;
|
||||
use rtic_monotonics::systick::Systick;
|
||||
use rtic_monotonics::Monotonic;
|
||||
use satrs::pus::verification::{TcStateStarted, VerificationReportCreator};
|
||||
use satrs::spacepackets::{ecss::tc::PusTcReader, time::cds::P_FIELD_BASE};
|
||||
use satrs::pus::verification::FailParams;
|
||||
use satrs::pus::verification::VerificationReporterCore;
|
||||
use satrs::spacepackets::{
|
||||
ecss::tc::PusTcReader, ecss::tm::PusTmCreator, ecss::tm::PusTmSecondaryHeader,
|
||||
ecss::EcssEnumU16, time::cds::P_FIELD_BASE, CcsdsPacket, SpHeader,
|
||||
};
|
||||
#[allow(unused_imports)]
|
||||
use stm32f3_discovery::leds::Direction;
|
||||
use stm32f3_discovery::leds::Leds;
|
||||
@ -261,16 +181,15 @@ mod app {
|
||||
|
||||
#[shared]
|
||||
struct Shared {
|
||||
blink_freq: MillisDurationU32,
|
||||
tx_shared: UartTxShared,
|
||||
rx_transfer: Option<RxDmaTransferType>,
|
||||
}
|
||||
|
||||
#[local]
|
||||
struct Local {
|
||||
verif_reporter: VerificationReportCreator,
|
||||
leds: Leds,
|
||||
last_dir: Direction,
|
||||
verif_reporter: VerificationReporterCore,
|
||||
curr_dir: Iter<'static, Direction>,
|
||||
}
|
||||
|
||||
@ -296,6 +215,8 @@ mod app {
|
||||
defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery");
|
||||
let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb);
|
||||
|
||||
let verif_reporter = VerificationReporterCore::new(PUS_APID).unwrap();
|
||||
|
||||
let leds = Leds::new(
|
||||
gpioe.pe8,
|
||||
gpioe.pe9,
|
||||
@ -344,12 +265,8 @@ mod app {
|
||||
defmt::info!("Spawning tasks");
|
||||
blink::spawn().unwrap();
|
||||
serial_tx_handler::spawn().unwrap();
|
||||
|
||||
let verif_reporter = VerificationReportCreator::new(PUS_APID).unwrap();
|
||||
|
||||
(
|
||||
Shared {
|
||||
blink_freq: MillisDurationU32::from_ticks(DEFAULT_BLINK_FREQ_MS),
|
||||
tx_shared: UartTxShared {
|
||||
last_completed: None,
|
||||
state: UartTxState::Idle(Some(TxIdle {
|
||||
@ -360,16 +277,17 @@ mod app {
|
||||
rx_transfer: Some(rx_transfer),
|
||||
},
|
||||
Local {
|
||||
verif_reporter,
|
||||
//timer: mono_timer,
|
||||
leds,
|
||||
last_dir: Direction::North,
|
||||
curr_dir: Direction::iter(),
|
||||
verif_reporter,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[task(local = [leds, curr_dir, last_dir], shared=[blink_freq])]
|
||||
async fn blink(mut cx: blink::Context) {
|
||||
#[task(local = [leds, curr_dir, last_dir])]
|
||||
async fn blink(cx: blink::Context) {
|
||||
let blink::LocalResources {
|
||||
leds,
|
||||
curr_dir,
|
||||
@ -393,8 +311,7 @@ mod app {
|
||||
toggle_leds(curr_dir.next().unwrap());
|
||||
}
|
||||
}
|
||||
let current_blink_freq = cx.shared.blink_freq.lock(|current| *current);
|
||||
Systick::delay(current_blink_freq).await;
|
||||
Systick::delay(BLINK_FREQ_MS.millis()).await;
|
||||
}
|
||||
}
|
||||
|
||||
@ -469,18 +386,18 @@ mod app {
|
||||
|
||||
#[task(
|
||||
local = [
|
||||
verif_reporter,
|
||||
stamp_buf: [u8; 7] = [0; 7],
|
||||
decode_buf: [u8; MAX_TC_LEN] = [0; MAX_TC_LEN],
|
||||
src_data_buf: [u8; MAX_TM_LEN] = [0; MAX_TM_LEN],
|
||||
timestamp: [u8; 7] = [0; 7],
|
||||
verif_reporter
|
||||
],
|
||||
shared = [blink_freq]
|
||||
)]
|
||||
async fn serial_rx_handler(
|
||||
mut cx: serial_rx_handler::Context,
|
||||
cx: serial_rx_handler::Context,
|
||||
received_packet: Vec<u8, MAX_TC_LEN>,
|
||||
) {
|
||||
cx.local.timestamp[0] = P_FIELD_BASE;
|
||||
defmt::info!("running rx handler");
|
||||
cx.local.stamp_buf[0] = P_FIELD_BASE;
|
||||
defmt::info!("Received packet with {} bytes", received_packet.len());
|
||||
let decode_buf = cx.local.decode_buf;
|
||||
let packet = received_packet.as_slice();
|
||||
@ -500,49 +417,18 @@ mod app {
|
||||
Ok(len) => {
|
||||
defmt::info!("Decoded packet length: {}", len);
|
||||
let pus_tc = PusTcReader::new(decode_buf);
|
||||
let verif_reporter = cx.local.verif_reporter;
|
||||
match pus_tc {
|
||||
Ok((tc, _tc_len)) => {
|
||||
match convert_pus_tc_to_request(
|
||||
&tc,
|
||||
cx.local.verif_reporter,
|
||||
cx.local.src_data_buf,
|
||||
cx.local.timestamp,
|
||||
) {
|
||||
Ok(request_with_token) => {
|
||||
let started_token = handle_start_verification(
|
||||
request_with_token.token,
|
||||
cx.local.verif_reporter,
|
||||
cx.local.src_data_buf,
|
||||
cx.local.timestamp,
|
||||
);
|
||||
|
||||
match request_with_token.request {
|
||||
Request::Ping => {
|
||||
handle_ping_request(cx.local.timestamp);
|
||||
}
|
||||
Request::ChangeBlinkFrequency(new_freq_ms) => {
|
||||
defmt::info!("Received blink frequency change request with new frequncy {}", new_freq_ms);
|
||||
cx.shared.blink_freq.lock(|blink_freq| {
|
||||
*blink_freq =
|
||||
MillisDurationU32::from_ticks(new_freq_ms);
|
||||
});
|
||||
}
|
||||
}
|
||||
handle_completion_verification(
|
||||
started_token,
|
||||
cx.local.verif_reporter,
|
||||
cx.local.src_data_buf,
|
||||
cx.local.timestamp,
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
// TODO: Error handling: Send verification failure based on request error.
|
||||
defmt::warn!("request error {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
defmt::warn!("Error unpacking PUS TC: {}", e);
|
||||
Ok((tc, tc_len)) => handle_tc(
|
||||
tc,
|
||||
tc_len,
|
||||
verif_reporter,
|
||||
cx.local.src_data_buf,
|
||||
cx.local.stamp_buf,
|
||||
),
|
||||
Err(_e) => {
|
||||
// TODO: Print error after API rework.
|
||||
defmt::warn!("Error unpacking PUS TC");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -552,64 +438,104 @@ mod app {
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_ping_request(timestamp: &[u8]) {
|
||||
defmt::info!("Received PUS ping telecommand, sending ping reply TM[17,2]");
|
||||
let sp_header =
|
||||
SpHeader::new_for_unseg_tc(PUS_APID, SEQ_COUNT_PROVIDER.get_and_increment(), 0);
|
||||
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, timestamp);
|
||||
let ping_reply = PusTmCreator::new(sp_header, sec_header, &[], true);
|
||||
let mut tm_packet = TmPacket::new();
|
||||
tm_packet
|
||||
.resize(ping_reply.len_written(), 0)
|
||||
.expect("vec resize failed");
|
||||
ping_reply.write_to_bytes(&mut tm_packet).unwrap();
|
||||
if TM_REQUESTS.enqueue(tm_packet).is_err() {
|
||||
defmt::warn!("TC queue full");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_start_verification(
|
||||
accepted_token: VerificationToken<TcStateAccepted>,
|
||||
verif_reporter: &mut VerificationReportCreator,
|
||||
src_data_buf: &mut [u8],
|
||||
timestamp: &[u8],
|
||||
) -> VerificationToken<TcStateStarted> {
|
||||
let (tm_creator, started_token) = verif_reporter
|
||||
.start_success(
|
||||
src_data_buf,
|
||||
accepted_token,
|
||||
SEQ_COUNT_PROVIDER.get(),
|
||||
0,
|
||||
×tamp,
|
||||
)
|
||||
.unwrap();
|
||||
let result = send_tm(tm_creator);
|
||||
if let Err(e) = result {
|
||||
handle_tm_send_error(e);
|
||||
}
|
||||
started_token
|
||||
}
|
||||
|
||||
fn handle_completion_verification(
|
||||
started_token: VerificationToken<TcStateStarted>,
|
||||
verif_reporter: &mut VerificationReportCreator,
|
||||
src_data_buf: &mut [u8],
|
||||
timestamp: &[u8],
|
||||
fn handle_tc(
|
||||
tc: PusTcReader,
|
||||
tc_len: usize,
|
||||
verif_reporter: &mut VerificationReporterCore,
|
||||
src_data_buf: &mut [u8; MAX_TM_LEN],
|
||||
stamp_buf: &[u8; 7],
|
||||
) {
|
||||
let result = send_tm(
|
||||
verif_reporter
|
||||
.completion_success(
|
||||
defmt::info!(
|
||||
"Found PUS TC [{},{}] with length {}",
|
||||
tc.service(),
|
||||
tc.subservice(),
|
||||
tc_len
|
||||
);
|
||||
|
||||
let token = verif_reporter.add_tc(&tc);
|
||||
if tc.apid() != PUS_APID {
|
||||
defmt::warn!("Received tc with unknown APID {}", tc.apid());
|
||||
let sendable = verif_reporter
|
||||
.acceptance_failure(
|
||||
src_data_buf,
|
||||
started_token,
|
||||
token,
|
||||
SEQ_COUNT_PROVIDER.get(),
|
||||
0,
|
||||
timestamp,
|
||||
FailParams::new(stamp_buf, &EcssEnumU16::new(0), &[]),
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
if let Err(e) = result {
|
||||
handle_tm_send_error(e);
|
||||
.unwrap();
|
||||
let sender = TmSender::new(TmPacket::new());
|
||||
if let Err(_e) = verif_reporter.send_acceptance_failure(sendable, &sender) {
|
||||
defmt::warn!("Sending acceptance failure failed");
|
||||
};
|
||||
return;
|
||||
}
|
||||
let sendable = verif_reporter
|
||||
.acceptance_success(src_data_buf, token, SEQ_COUNT_PROVIDER.get(), 0, stamp_buf)
|
||||
.unwrap();
|
||||
|
||||
let sender = TmSender::new(TmPacket::new());
|
||||
let accepted_token = match verif_reporter.send_acceptance_success(sendable, &sender) {
|
||||
Ok(token) => token,
|
||||
Err(_e) => {
|
||||
// TODO: Print error as soon as EcssTmtcError has Format attr.. or rework API.
|
||||
defmt::warn!("Sending acceptance success failed");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
if tc.service() == 17 {
|
||||
if tc.subservice() == 1 {
|
||||
let sendable = verif_reporter
|
||||
.start_success(
|
||||
src_data_buf,
|
||||
accepted_token,
|
||||
SEQ_COUNT_PROVIDER.get(),
|
||||
0,
|
||||
stamp_buf,
|
||||
)
|
||||
.unwrap();
|
||||
// let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
|
||||
let sender = TmSender::new(TmPacket::new());
|
||||
let started_token = match verif_reporter.send_start_success(sendable, &sender) {
|
||||
Ok(token) => token,
|
||||
Err(_e) => {
|
||||
// TODO: Print error as soon as EcssTmtcError has Format attr.. or rework API.
|
||||
defmt::warn!("Sending acceptance success failed");
|
||||
return;
|
||||
}
|
||||
};
|
||||
defmt::info!("Received PUS ping telecommand, sending ping reply TM[17,2]");
|
||||
let mut sp_header =
|
||||
SpHeader::tc_unseg(PUS_APID, SEQ_COUNT_PROVIDER.get(), 0).unwrap();
|
||||
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, stamp_buf);
|
||||
let ping_reply = PusTmCreator::new(&mut sp_header, sec_header, &[], true);
|
||||
let mut tm_packet = TmPacket::new();
|
||||
tm_packet
|
||||
.resize(ping_reply.len_written(), 0)
|
||||
.expect("vec resize failed");
|
||||
ping_reply.write_to_bytes(&mut tm_packet).unwrap();
|
||||
if TM_REQUESTS.enqueue(tm_packet).is_err() {
|
||||
defmt::warn!("TC queue full");
|
||||
return;
|
||||
}
|
||||
SEQ_COUNT_PROVIDER.increment();
|
||||
let sendable = verif_reporter
|
||||
.completion_success(
|
||||
src_data_buf,
|
||||
started_token,
|
||||
SEQ_COUNT_PROVIDER.get(),
|
||||
0,
|
||||
stamp_buf,
|
||||
)
|
||||
.unwrap();
|
||||
let sender = TmSender::new(TmPacket::new());
|
||||
if let Err(_e) = verif_reporter.send_step_or_completion_success(sendable, &sender) {
|
||||
defmt::warn!("Sending completion success failed");
|
||||
}
|
||||
} else {
|
||||
// TODO: Invalid subservice
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -17,15 +17,11 @@ zerocopy = "0.6"
|
||||
csv = "1"
|
||||
num_enum = "0.7"
|
||||
thiserror = "1"
|
||||
lazy_static = "1"
|
||||
strum = { version = "0.26", features = ["derive"] }
|
||||
derive-new = "0.5"
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
|
||||
[dependencies.satrs]
|
||||
# version = "0.2.0-rc.0"
|
||||
path = "../satrs"
|
||||
features = ["test_util"]
|
||||
|
||||
[dependencies.satrs-mib]
|
||||
version = "0.1.1"
|
||||
@ -34,6 +30,3 @@ path = "../satrs-mib"
|
||||
[features]
|
||||
dyn_tmtc = []
|
||||
default = ["dyn_tmtc"]
|
||||
|
||||
[dev-dependencies]
|
||||
env_logger = "0.11"
|
||||
|
@ -1,143 +0,0 @@
|
||||
import datetime
|
||||
import struct
|
||||
import logging
|
||||
|
||||
from spacepackets.ccsds import CdsShortTimestamp
|
||||
from spacepackets.ecss import PusTelecommand
|
||||
from tmtccmd.config import CmdTreeNode
|
||||
from tmtccmd.pus.tc.s200_fsfw_mode import Mode
|
||||
from tmtccmd.tmtc import DefaultPusQueueHelper
|
||||
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
|
||||
from tmtccmd.pus.s200_fsfw_mode import Subservice as ModeSubservice
|
||||
|
||||
from common import AcsId, Apid
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_set_mode_cmd(
|
||||
apid: int, unique_id: int, mode: int, submode: int
|
||||
) -> PusTelecommand:
|
||||
app_data = bytearray()
|
||||
app_data.extend(struct.pack("!I", unique_id))
|
||||
app_data.extend(struct.pack("!I", mode))
|
||||
app_data.extend(struct.pack("!H", submode))
|
||||
return PusTelecommand(
|
||||
service=200,
|
||||
subservice=ModeSubservice.TC_MODE_COMMAND,
|
||||
apid=apid,
|
||||
app_data=app_data,
|
||||
)
|
||||
|
||||
|
||||
def create_cmd_definition_tree() -> CmdTreeNode:
|
||||
|
||||
root_node = CmdTreeNode.root_node()
|
||||
|
||||
hk_node = CmdTreeNode("hk", "Housekeeping Node", hide_children_for_print=True)
|
||||
hk_node.add_child(CmdTreeNode("one_shot_hk", "Request One Shot HK set"))
|
||||
hk_node.add_child(
|
||||
CmdTreeNode("enable", "Enable periodic housekeeping data generation")
|
||||
)
|
||||
hk_node.add_child(
|
||||
CmdTreeNode("disable", "Disable periodic housekeeping data generation")
|
||||
)
|
||||
|
||||
mode_node = CmdTreeNode("mode", "Mode Node", hide_children_for_print=True)
|
||||
set_mode_node = CmdTreeNode(
|
||||
"set_mode", "Set Node", hide_children_which_are_leaves=True
|
||||
)
|
||||
set_mode_node.add_child(CmdTreeNode("off", "Set OFF Mode"))
|
||||
set_mode_node.add_child(CmdTreeNode("on", "Set ON Mode"))
|
||||
set_mode_node.add_child(CmdTreeNode("normal", "Set NORMAL Mode"))
|
||||
mode_node.add_child(set_mode_node)
|
||||
mode_node.add_child(CmdTreeNode("read_mode", "Read Mode"))
|
||||
|
||||
test_node = CmdTreeNode("test", "Test Node")
|
||||
test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
|
||||
test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event"))
|
||||
root_node.add_child(test_node)
|
||||
|
||||
scheduler_node = CmdTreeNode("scheduler", "Scheduler Node")
|
||||
scheduler_node.add_child(
|
||||
CmdTreeNode(
|
||||
"schedule_ping_10_secs_ahead", "Schedule Ping to execute in 10 seconds"
|
||||
)
|
||||
)
|
||||
root_node.add_child(scheduler_node)
|
||||
|
||||
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
|
||||
mgm_node = CmdTreeNode("mgms", "MGM devices node")
|
||||
mgm_node.add_child(mode_node)
|
||||
mgm_node.add_child(hk_node)
|
||||
|
||||
acs_node.add_child(mgm_node)
|
||||
root_node.add_child(acs_node)
|
||||
|
||||
return root_node
|
||||
|
||||
|
||||
def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
|
||||
# It should always be at least the root path "/", so we split of the empty portion left of it.
|
||||
cmd_path_list = cmd_path.split("/")[1:]
|
||||
if len(cmd_path_list) == 0:
|
||||
_LOGGER.warning("empty command path")
|
||||
return
|
||||
if cmd_path_list[0] == "test":
|
||||
assert len(cmd_path_list) >= 2
|
||||
if cmd_path_list[1] == "ping":
|
||||
q.add_log_cmd("Sending PUS ping telecommand")
|
||||
return q.add_pus_tc(
|
||||
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=1)
|
||||
)
|
||||
elif cmd_path_list[1] == "trigger_event":
|
||||
q.add_log_cmd("Triggering test event")
|
||||
return q.add_pus_tc(
|
||||
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=128)
|
||||
)
|
||||
if cmd_path_list[0] == "scheduler":
|
||||
assert len(cmd_path_list) >= 2
|
||||
if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
|
||||
q.add_log_cmd("Sending PUS scheduled TC telecommand")
|
||||
crt_time = CdsShortTimestamp.from_now()
|
||||
time_stamp = crt_time + datetime.timedelta(seconds=10)
|
||||
time_stamp = time_stamp.pack()
|
||||
return q.add_pus_tc(
|
||||
create_time_tagged_cmd(
|
||||
time_stamp,
|
||||
PusTelecommand(service=17, subservice=1),
|
||||
apid=Apid.SCHED,
|
||||
)
|
||||
)
|
||||
if cmd_path_list[0] == "acs":
|
||||
assert len(cmd_path_list) >= 2
|
||||
if cmd_path_list[1] == "mgms":
|
||||
assert len(cmd_path_list) >= 3
|
||||
if cmd_path_list[2] == "hk":
|
||||
if cmd_path_list[3] == "one_shot_hk":
|
||||
q.add_log_cmd("Sending HK one shot request")
|
||||
# TODO: Fix
|
||||
# q.add_pus_tc(
|
||||
# create_request_one_hk_command(
|
||||
# make_addressable_id(Apid.ACS, AcsId.MGM_SET)
|
||||
# )
|
||||
# )
|
||||
if cmd_path_list[2] == "mode":
|
||||
if cmd_path_list[3] == "set_mode":
|
||||
handle_set_mode_cmd(
|
||||
q, "MGM 0", cmd_path_list[4], Apid.ACS, AcsId.MGM_0
|
||||
)
|
||||
|
||||
|
||||
def handle_set_mode_cmd(
|
||||
q: DefaultPusQueueHelper, target_str: str, mode_str: str, apid: int, unique_id: int
|
||||
):
|
||||
if mode_str == "off":
|
||||
q.add_log_cmd(f"Sending Mode OFF to {target_str}")
|
||||
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.OFF, 0))
|
||||
elif mode_str == "on":
|
||||
q.add_log_cmd(f"Sending Mode ON to {target_str}")
|
||||
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.ON, 0))
|
||||
elif mode_str == "normal":
|
||||
q.add_log_cmd(f"Sending Mode NORMAL to {target_str}")
|
||||
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.NORMAL, 0))
|
@ -4,13 +4,11 @@ import dataclasses
|
||||
import enum
|
||||
import struct
|
||||
|
||||
from spacepackets.ecss.tc import PacketId, PacketType
|
||||
|
||||
class Apid(enum.IntEnum):
|
||||
SCHED = 1
|
||||
GENERIC_PUS = 2
|
||||
ACS = 3
|
||||
CFDP = 4
|
||||
TMTC = 5
|
||||
EXAMPLE_PUS_APID = 0x02
|
||||
EXAMPLE_PUS_PACKET_ID_TM = PacketId(PacketType.TM, True, EXAMPLE_PUS_APID)
|
||||
TM_PACKET_IDS = [EXAMPLE_PUS_PACKET_ID_TM]
|
||||
|
||||
|
||||
class EventSeverity(enum.IntEnum):
|
||||
@ -38,8 +36,8 @@ class EventU32:
|
||||
)
|
||||
|
||||
|
||||
class AcsId(enum.IntEnum):
|
||||
MGM_0 = 0
|
||||
class RequestTargetId(enum.IntEnum):
|
||||
ACS = 1
|
||||
|
||||
|
||||
class AcsHkIds(enum.IntEnum):
|
@ -3,11 +3,10 @@
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
from typing import Any, Optional
|
||||
from typing import Optional
|
||||
from prompt_toolkit.history import History
|
||||
from prompt_toolkit.history import FileHistory
|
||||
|
||||
from spacepackets.ccsds import PacketId, PacketType
|
||||
import tmtccmd
|
||||
from spacepackets.ecss import PusTelemetry, PusVerificator
|
||||
from spacepackets.ecss.pus_17_test import Service17Tm
|
||||
@ -17,7 +16,7 @@ from spacepackets.ccsds.time import CdsShortTimestamp
|
||||
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
|
||||
from tmtccmd.core.base import BackendRequest
|
||||
from tmtccmd.pus import VerificationWrapper
|
||||
from tmtccmd.tmtc import CcsdsTmHandler, GenericApidHandlerBase
|
||||
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase
|
||||
from tmtccmd.com import ComInterface
|
||||
from tmtccmd.config import (
|
||||
CmdTreeNode,
|
||||
@ -47,7 +46,7 @@ from tmtccmd.util.obj_id import ObjectIdDictT
|
||||
|
||||
|
||||
import pus_tc
|
||||
from common import Apid, EventU32
|
||||
from common import EXAMPLE_PUS_APID, TM_PACKET_IDS, EventU32
|
||||
|
||||
_LOGGER = logging.getLogger()
|
||||
|
||||
@ -63,13 +62,10 @@ class SatRsConfigHook(HookBase):
|
||||
)
|
||||
|
||||
assert self.cfg_path is not None
|
||||
packet_id_list = []
|
||||
for apid in Apid:
|
||||
packet_id_list.append(PacketId(PacketType.TM, True, apid))
|
||||
cfg = create_com_interface_cfg_default(
|
||||
com_if_key=com_if_key,
|
||||
json_cfg_path=self.cfg_path,
|
||||
space_packet_ids=packet_id_list,
|
||||
space_packet_ids=TM_PACKET_IDS,
|
||||
)
|
||||
assert cfg is not None
|
||||
return create_com_interface_default(cfg)
|
||||
@ -89,23 +85,21 @@ class SatRsConfigHook(HookBase):
|
||||
return get_core_object_ids()
|
||||
|
||||
|
||||
class PusHandler(GenericApidHandlerBase):
|
||||
class PusHandler(SpecificApidHandlerBase):
|
||||
def __init__(
|
||||
self,
|
||||
file_logger: logging.Logger,
|
||||
verif_wrapper: VerificationWrapper,
|
||||
raw_logger: RawTmtcTimedLogWrapper,
|
||||
):
|
||||
super().__init__(None)
|
||||
super().__init__(EXAMPLE_PUS_APID, None)
|
||||
self.file_logger = file_logger
|
||||
self.raw_logger = raw_logger
|
||||
self.verif_wrapper = verif_wrapper
|
||||
|
||||
def handle_tm(self, apid: int, packet: bytes, _user_args: Any):
|
||||
def handle_tm(self, packet: bytes, _user_args: any):
|
||||
try:
|
||||
pus_tm = PusTelemetry.unpack(
|
||||
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
|
||||
)
|
||||
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
|
||||
except ValueError as e:
|
||||
_LOGGER.warning("Could not generate PUS TM object from raw data")
|
||||
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
|
||||
@ -113,7 +107,7 @@ class PusHandler(GenericApidHandlerBase):
|
||||
service = pus_tm.service
|
||||
if service == 1:
|
||||
tm_packet = Service1Tm.unpack(
|
||||
data=packet, params=UnpackParams(CdsShortTimestamp.TIMESTAMP_SIZE, 1, 2)
|
||||
data=packet, params=UnpackParams(CdsShortTimestamp.empty(), 1, 2)
|
||||
)
|
||||
res = self.verif_wrapper.add_tm(tm_packet)
|
||||
if res is None:
|
||||
@ -130,9 +124,7 @@ class PusHandler(GenericApidHandlerBase):
|
||||
elif service == 3:
|
||||
_LOGGER.info("No handling for HK packets implemented")
|
||||
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
|
||||
pus_tm = PusTelemetry.unpack(
|
||||
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
|
||||
)
|
||||
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
|
||||
if pus_tm.subservice == 25:
|
||||
if len(pus_tm.source_data) < 8:
|
||||
raise ValueError("No addressable ID in HK packet")
|
||||
@ -140,18 +132,16 @@ class PusHandler(GenericApidHandlerBase):
|
||||
_LOGGER.info(json_str)
|
||||
elif service == 5:
|
||||
tm_packet = PusTelemetry.unpack(
|
||||
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
|
||||
packet, time_reader=CdsShortTimestamp.empty()
|
||||
)
|
||||
src_data = tm_packet.source_data
|
||||
event_u32 = EventU32.unpack(src_data)
|
||||
_LOGGER.info(
|
||||
f"Received event packet. Source APID: {Apid(tm_packet.apid)!r}, Event: {event_u32}"
|
||||
)
|
||||
_LOGGER.info(f"Received event packet. Event: {event_u32}")
|
||||
if event_u32.group_id == 0 and event_u32.unique_id == 0:
|
||||
_LOGGER.info("Received test event")
|
||||
elif service == 17:
|
||||
tm_packet = Service17Tm.unpack(
|
||||
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
|
||||
packet, time_reader=CdsShortTimestamp.empty()
|
||||
)
|
||||
if tm_packet.subservice == 2:
|
||||
self.file_logger.info("Received Ping Reply TM[17,2]")
|
||||
@ -168,7 +158,7 @@ class PusHandler(GenericApidHandlerBase):
|
||||
f"The service {service} is not implemented in Telemetry Factory"
|
||||
)
|
||||
tm_packet = PusTelemetry.unpack(
|
||||
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
|
||||
packet, time_reader=CdsShortTimestamp.empty()
|
||||
)
|
||||
self.raw_logger.log_tm(pus_tm)
|
||||
|
||||
@ -187,7 +177,7 @@ class TcHandler(TcHandlerBase):
|
||||
tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE,
|
||||
seq_cnt_provider=seq_count_provider,
|
||||
pus_verificator=self.verif_wrapper.pus_verificator,
|
||||
default_pus_apid=None,
|
||||
default_pus_apid=EXAMPLE_PUS_APID,
|
||||
)
|
||||
|
||||
def send_cb(self, send_params: SendCbParams):
|
||||
@ -203,15 +193,15 @@ class TcHandler(TcHandlerBase):
|
||||
_LOGGER.info(log_entry.log_str)
|
||||
|
||||
def queue_finished_cb(self, info: ProcedureWrapper):
|
||||
if info.proc_type == TcProcedureType.TREE_COMMANDING:
|
||||
def_proc = info.to_tree_commanding_procedure()
|
||||
if info.proc_type == TcProcedureType.DEFAULT:
|
||||
def_proc = info.to_def_procedure()
|
||||
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
|
||||
|
||||
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
|
||||
q = self.queue_helper
|
||||
q.queue_wrapper = wrapper.queue_wrapper
|
||||
if info.proc_type == TcProcedureType.TREE_COMMANDING:
|
||||
def_proc = info.to_tree_commanding_procedure()
|
||||
if info.proc_type == TcProcedureType.DEFAULT:
|
||||
def_proc = info.to_def_procedure()
|
||||
assert def_proc.cmd_path is not None
|
||||
pus_tc.pack_pus_telecommands(q, def_proc.cmd_path)
|
||||
|
||||
@ -231,6 +221,7 @@ def main():
|
||||
post_args_wrapper.set_params_without_prompts(proc_wrapper)
|
||||
else:
|
||||
post_args_wrapper.set_params_with_prompts(proc_wrapper)
|
||||
params.apid = EXAMPLE_PUS_APID
|
||||
setup_args = SetupWrapper(
|
||||
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
|
||||
)
|
||||
@ -242,9 +233,8 @@ def main():
|
||||
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
|
||||
# Create primary TM handler and add it to the CCSDS Packet Handler
|
||||
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
|
||||
ccsds_handler = CcsdsTmHandler(generic_handler=tm_handler)
|
||||
# TODO: We could add the CFDP handler for the CFDP APID at a later stage.
|
||||
# ccsds_handler.add_apid_handler(tm_handler)
|
||||
ccsds_handler = CcsdsTmHandler(generic_handler=None)
|
||||
ccsds_handler.add_apid_handler(tm_handler)
|
||||
|
||||
# Create TC handler
|
||||
seq_count_provider = PusFileSeqCountProvider()
|
||||
@ -262,7 +252,6 @@ def main():
|
||||
while True:
|
||||
state = tmtc_backend.periodic_op(None)
|
||||
if state.request == BackendRequest.TERMINATION_NO_ERROR:
|
||||
tmtc_backend.close_com_if()
|
||||
sys.exit(0)
|
||||
elif state.request == BackendRequest.DELAY_IDLE:
|
||||
_LOGGER.info("TMTC Client in IDLE mode")
|
||||
@ -277,7 +266,6 @@ def main():
|
||||
elif state.request == BackendRequest.CALL_NEXT:
|
||||
pass
|
||||
except KeyboardInterrupt:
|
||||
tmtc_backend.close_com_if()
|
||||
sys.exit(0)
|
||||
|
||||
|
85
satrs-example/satrs-tmtc/pus_tc.py
Normal file
85
satrs-example/satrs-tmtc/pus_tc.py
Normal file
@ -0,0 +1,85 @@
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from spacepackets.ccsds import CdsShortTimestamp
|
||||
from spacepackets.ecss import PusTelecommand
|
||||
from tmtccmd.config import CmdTreeNode
|
||||
from tmtccmd.tmtc import DefaultPusQueueHelper
|
||||
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
|
||||
from tmtccmd.pus.tc.s3_fsfw_hk import create_request_one_hk_command
|
||||
|
||||
from common import (
|
||||
EXAMPLE_PUS_APID,
|
||||
make_addressable_id,
|
||||
RequestTargetId,
|
||||
AcsHkIds,
|
||||
)
|
||||
|
||||
_LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def create_cmd_definition_tree() -> CmdTreeNode:
|
||||
|
||||
root_node = CmdTreeNode.root_node()
|
||||
|
||||
test_node = CmdTreeNode("test", "Test Node")
|
||||
test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
|
||||
test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event"))
|
||||
root_node.add_child(test_node)
|
||||
|
||||
scheduler_node = CmdTreeNode("scheduler", "Scheduler Node")
|
||||
scheduler_node.add_child(
|
||||
CmdTreeNode(
|
||||
"schedule_ping_10_secs_ahead", "Schedule Ping to execute in 10 seconds"
|
||||
)
|
||||
)
|
||||
root_node.add_child(scheduler_node)
|
||||
|
||||
acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
|
||||
mgm_node = CmdTreeNode("mgms", "MGM devices node")
|
||||
mgm_node.add_child(CmdTreeNode("one_shot_hk", "Request one shot HK"))
|
||||
acs_node.add_child(mgm_node)
|
||||
root_node.add_child(acs_node)
|
||||
|
||||
return root_node
|
||||
|
||||
|
||||
def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
|
||||
# It should always be at least the root path "/", so we split of the empty portion left of it.
|
||||
cmd_path_list = cmd_path.split("/")[1:]
|
||||
if len(cmd_path_list) == 0:
|
||||
_LOGGER.warning("empty command path")
|
||||
return
|
||||
if cmd_path_list[0] == "test":
|
||||
assert len(cmd_path_list) >= 2
|
||||
if cmd_path_list[1] == "ping":
|
||||
q.add_log_cmd("Sending PUS ping telecommand")
|
||||
return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
|
||||
elif cmd_path_list[1] == "trigger_event":
|
||||
q.add_log_cmd("Triggering test event")
|
||||
return q.add_pus_tc(PusTelecommand(service=17, subservice=128))
|
||||
if cmd_path_list[0] == "scheduler":
|
||||
assert len(cmd_path_list) >= 2
|
||||
if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
|
||||
q.add_log_cmd("Sending PUS scheduled TC telecommand")
|
||||
crt_time = CdsShortTimestamp.from_now()
|
||||
time_stamp = crt_time + datetime.timedelta(seconds=10)
|
||||
time_stamp = time_stamp.pack()
|
||||
return q.add_pus_tc(
|
||||
create_time_tagged_cmd(
|
||||
time_stamp,
|
||||
PusTelecommand(service=17, subservice=1),
|
||||
apid=EXAMPLE_PUS_APID,
|
||||
)
|
||||
)
|
||||
if cmd_path_list[0] == "acs":
|
||||
assert len(cmd_path_list) >= 2
|
||||
if cmd_path_list[1] == "mgm":
|
||||
assert len(cmd_path_list) >= 3
|
||||
if cmd_path_list[2] == "one_shot_hk":
|
||||
q.add_log_cmd("Sending HK one shot request")
|
||||
q.add_pus_tc(
|
||||
create_request_one_hk_command(
|
||||
make_addressable_id(RequestTargetId.ACS, AcsHkIds.MGM_SET)
|
||||
)
|
||||
)
|
@ -1,2 +1,2 @@
|
||||
tmtccmd == 8.0.0rc2
|
||||
tmtccmd == 8.0.0rc1
|
||||
# -e git+https://github.com/robamu-org/tmtccmd@97e5e51101a08b21472b3ddecc2063359f7e307a#egg=tmtccmd
|
118
satrs-example/src/acs.rs
Normal file
118
satrs-example/src/acs.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use std::sync::mpsc::{self, TryRecvError};
|
||||
|
||||
use log::{info, warn};
|
||||
use satrs::pus::verification::VerificationReportingProvider;
|
||||
use satrs::pus::{EcssTmSender, PusTmWrapper};
|
||||
use satrs::request::TargetAndApidId;
|
||||
use satrs::spacepackets::ecss::hk::Subservice as HkSubservice;
|
||||
use satrs::{
|
||||
hk::HkRequest,
|
||||
spacepackets::{
|
||||
ecss::tm::{PusTmCreator, PusTmSecondaryHeader},
|
||||
time::cds::{CdsTime, DaysLen16Bits},
|
||||
SequenceFlags, SpHeader,
|
||||
},
|
||||
};
|
||||
use satrs_example::config::{RequestTargetId, PUS_APID};
|
||||
|
||||
use crate::{
|
||||
hk::{AcsHkIds, HkUniqueId},
|
||||
requests::{Request, RequestWithToken},
|
||||
update_time,
|
||||
};
|
||||
|
||||
pub struct AcsTask<VerificationReporter: VerificationReportingProvider> {
|
||||
timestamp: [u8; 7],
|
||||
time_provider: CdsTime<DaysLen16Bits>,
|
||||
verif_reporter: VerificationReporter,
|
||||
tm_sender: Box<dyn EcssTmSender>,
|
||||
request_rx: mpsc::Receiver<RequestWithToken>,
|
||||
}
|
||||
|
||||
impl<VerificationReporter: VerificationReportingProvider> AcsTask<VerificationReporter> {
|
||||
pub fn new(
|
||||
tm_sender: impl EcssTmSender,
|
||||
request_rx: mpsc::Receiver<RequestWithToken>,
|
||||
verif_reporter: VerificationReporter,
|
||||
) -> Self {
|
||||
Self {
|
||||
timestamp: [0; 7],
|
||||
time_provider: CdsTime::new_with_u16_days(0, 0),
|
||||
verif_reporter,
|
||||
tm_sender: Box::new(tm_sender),
|
||||
request_rx,
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_hk_request(&mut self, target_id: u32, unique_id: u32) {
|
||||
assert_eq!(target_id, RequestTargetId::AcsSubsystem as u32);
|
||||
if unique_id == AcsHkIds::TestMgmSet as u32 {
|
||||
let mut sp_header = SpHeader::tm(PUS_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTmSecondaryHeader::new_simple(
|
||||
3,
|
||||
HkSubservice::TmHkPacket as u8,
|
||||
&self.timestamp,
|
||||
);
|
||||
let mut buf: [u8; 8] = [0; 8];
|
||||
let hk_id = HkUniqueId::new(target_id, unique_id);
|
||||
hk_id.write_to_be_bytes(&mut buf).unwrap();
|
||||
let pus_tm = PusTmCreator::new(&mut sp_header, sec_header, &buf, true);
|
||||
self.tm_sender
|
||||
.send_tm(PusTmWrapper::Direct(pus_tm))
|
||||
.expect("Sending HK TM failed");
|
||||
}
|
||||
// TODO: Verification failure for invalid unique IDs.
|
||||
}
|
||||
|
||||
pub fn try_reading_one_request(&mut self) -> bool {
|
||||
match self.request_rx.try_recv() {
|
||||
Ok(request) => {
|
||||
info!(
|
||||
"ACS thread: Received HK request {:?}",
|
||||
request.targeted_request
|
||||
);
|
||||
let target_and_apid_id = TargetAndApidId::from(request.targeted_request.target_id);
|
||||
match request.targeted_request.request {
|
||||
Request::Hk(hk_req) => match hk_req {
|
||||
HkRequest::OneShot(unique_id) => {
|
||||
self.handle_hk_request(target_and_apid_id.target(), unique_id)
|
||||
}
|
||||
HkRequest::Enable(_) => {}
|
||||
HkRequest::Disable(_) => {}
|
||||
HkRequest::ModifyCollectionInterval(_, _) => {}
|
||||
},
|
||||
Request::Mode(_mode_req) => {
|
||||
warn!("mode request handling not implemented yet")
|
||||
}
|
||||
Request::Action(_action_req) => {
|
||||
warn!("action request handling not implemented yet")
|
||||
}
|
||||
}
|
||||
let started_token = self
|
||||
.verif_reporter
|
||||
.start_success(request.token, &self.timestamp)
|
||||
.expect("Sending start success failed");
|
||||
self.verif_reporter
|
||||
.completion_success(started_token, &self.timestamp)
|
||||
.expect("Sending completion success failed");
|
||||
true
|
||||
}
|
||||
Err(e) => match e {
|
||||
TryRecvError::Empty => false,
|
||||
TryRecvError::Disconnected => {
|
||||
warn!("ACS thread: Message Queue TX disconnected!");
|
||||
false
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
update_time(&mut self.time_provider, &mut self.timestamp);
|
||||
loop {
|
||||
if !self.try_reading_one_request() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,282 +0,0 @@
|
||||
use derive_new::new;
|
||||
use satrs::hk::{HkRequest, HkRequestVariant};
|
||||
use satrs::queue::{GenericSendError, GenericTargetedMessagingError};
|
||||
use satrs::spacepackets::ecss::hk;
|
||||
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
|
||||
use satrs::spacepackets::SpHeader;
|
||||
use satrs_example::{DeviceMode, TimeStampHelper};
|
||||
use std::sync::mpsc::{self};
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use satrs::mode::{
|
||||
ModeAndSubmode, ModeError, ModeProvider, ModeReply, ModeRequest, ModeRequestHandler,
|
||||
};
|
||||
use satrs::pus::{EcssTmSender, PusTmVariant};
|
||||
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
|
||||
use satrs_example::config::components::PUS_MODE_SERVICE;
|
||||
|
||||
use crate::pus::hk::{HkReply, HkReplyVariant};
|
||||
use crate::requests::CompositeRequest;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const GAUSS_TO_MICROTESLA_FACTOR: f32 = 100.0;
|
||||
// This is the selected resoltion for the STM LIS3MDL device for the 4 Gauss sensitivity setting.
|
||||
const FIELD_LSB_PER_GAUSS_4_SENS: f32 = 1.0 / 6842.0;
|
||||
|
||||
pub trait SpiInterface {
|
||||
type Error;
|
||||
fn transfer(&mut self, tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SpiDummyInterface {
|
||||
pub dummy_val_0: i16,
|
||||
pub dummy_val_1: i16,
|
||||
pub dummy_val_2: i16,
|
||||
}
|
||||
|
||||
impl SpiInterface for SpiDummyInterface {
|
||||
type Error = ();
|
||||
|
||||
fn transfer(&mut self, _tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error> {
|
||||
rx[0..2].copy_from_slice(&self.dummy_val_0.to_be_bytes());
|
||||
rx[2..4].copy_from_slice(&self.dummy_val_1.to_be_bytes());
|
||||
rx[4..6].copy_from_slice(&self.dummy_val_2.to_be_bytes());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize)]
|
||||
pub struct MgmData {
|
||||
pub valid: bool,
|
||||
pub x: f32,
|
||||
pub y: f32,
|
||||
pub z: f32,
|
||||
}
|
||||
|
||||
pub struct MpscModeLeafInterface {
|
||||
pub request_rx: mpsc::Receiver<GenericMessage<ModeRequest>>,
|
||||
pub reply_tx_to_pus: mpsc::Sender<GenericMessage<ModeReply>>,
|
||||
pub reply_tx_to_parent: mpsc::Sender<GenericMessage<ModeReply>>,
|
||||
}
|
||||
|
||||
/// Example MGM device handler strongly based on the LIS3MDL MEMS device.
|
||||
#[derive(new)]
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub struct MgmHandlerLis3Mdl<ComInterface: SpiInterface, TmSender: EcssTmSender> {
|
||||
id: UniqueApidTargetId,
|
||||
dev_str: &'static str,
|
||||
mode_interface: MpscModeLeafInterface,
|
||||
composite_request_receiver: mpsc::Receiver<GenericMessage<CompositeRequest>>,
|
||||
hk_reply_sender: mpsc::Sender<GenericMessage<HkReply>>,
|
||||
tm_sender: TmSender,
|
||||
com_interface: ComInterface,
|
||||
shared_mgm_set: Arc<Mutex<MgmData>>,
|
||||
#[new(value = "ModeAndSubmode::new(satrs_example::DeviceMode::Off as u32, 0)")]
|
||||
mode_and_submode: ModeAndSubmode,
|
||||
#[new(default)]
|
||||
tx_buf: [u8; 12],
|
||||
#[new(default)]
|
||||
rx_buf: [u8; 12],
|
||||
#[new(default)]
|
||||
tm_buf: [u8; 16],
|
||||
#[new(default)]
|
||||
stamp_helper: TimeStampHelper,
|
||||
}
|
||||
|
||||
impl<ComInterface: SpiInterface, TmSender: EcssTmSender> MgmHandlerLis3Mdl<ComInterface, TmSender> {
|
||||
pub fn periodic_operation(&mut self) {
|
||||
self.stamp_helper.update_from_now();
|
||||
// Handle requests.
|
||||
self.handle_composite_requests();
|
||||
self.handle_mode_requests();
|
||||
if self.mode() == DeviceMode::Normal as u32 {
|
||||
log::trace!("polling LIS3MDL sensor {}", self.dev_str);
|
||||
// Communicate with the device.
|
||||
let result = self.com_interface.transfer(&self.tx_buf, &mut self.rx_buf);
|
||||
assert!(result.is_ok());
|
||||
// Actual data begins on the second byte, similarly to how a lot of SPI devices behave.
|
||||
let x_raw = i16::from_be_bytes(self.rx_buf[1..3].try_into().unwrap());
|
||||
let y_raw = i16::from_be_bytes(self.rx_buf[3..5].try_into().unwrap());
|
||||
let z_raw = i16::from_be_bytes(self.rx_buf[5..7].try_into().unwrap());
|
||||
// Simple scaling to retrieve the float value, assuming a sensor resolution of
|
||||
let mut mgm_guard = self.shared_mgm_set.lock().unwrap();
|
||||
mgm_guard.x = x_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
|
||||
mgm_guard.y = y_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
|
||||
mgm_guard.z = z_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
|
||||
drop(mgm_guard);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_composite_requests(&mut self) {
|
||||
loop {
|
||||
match self.composite_request_receiver.try_recv() {
|
||||
Ok(ref msg) => match &msg.message {
|
||||
CompositeRequest::Hk(hk_request) => {
|
||||
self.handle_hk_request(&msg.requestor_info, hk_request)
|
||||
}
|
||||
// TODO: This object does not have actions (yet).. Still send back completion failure
|
||||
// reply.
|
||||
CompositeRequest::Action(_action_req) => {}
|
||||
},
|
||||
|
||||
Err(e) => {
|
||||
if e != mpsc::TryRecvError::Empty {
|
||||
log::warn!(
|
||||
"{}: failed to receive composite request: {:?}",
|
||||
self.dev_str,
|
||||
e
|
||||
);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_hk_request(&mut self, requestor_info: &MessageMetadata, hk_request: &HkRequest) {
|
||||
match hk_request.variant {
|
||||
HkRequestVariant::OneShot => {
|
||||
self.hk_reply_sender
|
||||
.send(GenericMessage::new(
|
||||
*requestor_info,
|
||||
HkReply::new(hk_request.unique_id, HkReplyVariant::Ack),
|
||||
))
|
||||
.expect("failed to send HK reply");
|
||||
let sec_header = PusTmSecondaryHeader::new(
|
||||
3,
|
||||
hk::Subservice::TmHkPacket as u8,
|
||||
0,
|
||||
0,
|
||||
self.stamp_helper.stamp(),
|
||||
);
|
||||
let mgm_snapshot = *self.shared_mgm_set.lock().unwrap();
|
||||
// Use binary serialization here. We want the data to be tightly packed.
|
||||
self.tm_buf[0] = mgm_snapshot.valid as u8;
|
||||
self.tm_buf[1..5].copy_from_slice(&mgm_snapshot.x.to_be_bytes());
|
||||
self.tm_buf[5..9].copy_from_slice(&mgm_snapshot.y.to_be_bytes());
|
||||
self.tm_buf[9..13].copy_from_slice(&mgm_snapshot.z.to_be_bytes());
|
||||
let hk_tm = PusTmCreator::new(
|
||||
SpHeader::new_from_apid(self.id.apid),
|
||||
sec_header,
|
||||
&self.tm_buf[0..12],
|
||||
true,
|
||||
);
|
||||
self.tm_sender
|
||||
.send_tm(self.id.id(), PusTmVariant::Direct(hk_tm))
|
||||
.expect("failed to send HK TM");
|
||||
}
|
||||
HkRequestVariant::EnablePeriodic => todo!(),
|
||||
HkRequestVariant::DisablePeriodic => todo!(),
|
||||
HkRequestVariant::ModifyCollectionInterval(_) => todo!(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_mode_requests(&mut self) {
|
||||
loop {
|
||||
// TODO: Only allow one set mode request per cycle?
|
||||
match self.mode_interface.request_rx.try_recv() {
|
||||
Ok(msg) => {
|
||||
let result = self.handle_mode_request(msg);
|
||||
// TODO: Trigger event?
|
||||
if result.is_err() {
|
||||
log::warn!(
|
||||
"{}: mode request failed with error {:?}",
|
||||
self.dev_str,
|
||||
result.err().unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
if e != mpsc::TryRecvError::Empty {
|
||||
log::warn!("{}: failed to receive mode request: {:?}", self.dev_str, e);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<ComInterface: SpiInterface, TmSender: EcssTmSender> ModeProvider
|
||||
for MgmHandlerLis3Mdl<ComInterface, TmSender>
|
||||
{
|
||||
fn mode_and_submode(&self) -> ModeAndSubmode {
|
||||
self.mode_and_submode
|
||||
}
|
||||
}
|
||||
|
||||
impl<ComInterface: SpiInterface, TmSender: EcssTmSender> ModeRequestHandler
|
||||
for MgmHandlerLis3Mdl<ComInterface, TmSender>
|
||||
{
|
||||
type Error = ModeError;
|
||||
fn start_transition(
|
||||
&mut self,
|
||||
requestor: MessageMetadata,
|
||||
mode_and_submode: ModeAndSubmode,
|
||||
) -> Result<(), satrs::mode::ModeError> {
|
||||
log::info!(
|
||||
"{}: transitioning to mode {:?}",
|
||||
self.dev_str,
|
||||
mode_and_submode
|
||||
);
|
||||
self.mode_and_submode = mode_and_submode;
|
||||
self.handle_mode_reached(Some(requestor))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn announce_mode(&self, _requestor_info: Option<MessageMetadata>, _recursive: bool) {
|
||||
log::info!(
|
||||
"{} announcing mode: {:?}",
|
||||
self.dev_str,
|
||||
self.mode_and_submode
|
||||
);
|
||||
}
|
||||
|
||||
fn handle_mode_reached(
|
||||
&mut self,
|
||||
requestor: Option<MessageMetadata>,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.announce_mode(requestor, false);
|
||||
if let Some(requestor) = requestor {
|
||||
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
|
||||
log::warn!(
|
||||
"can not send back mode reply to sender {}",
|
||||
requestor.sender_id()
|
||||
);
|
||||
} else {
|
||||
self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode()))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn send_mode_reply(
|
||||
&self,
|
||||
requestor: MessageMetadata,
|
||||
reply: ModeReply,
|
||||
) -> Result<(), Self::Error> {
|
||||
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
|
||||
log::warn!(
|
||||
"can not send back mode reply to sender {}",
|
||||
requestor.sender_id()
|
||||
);
|
||||
}
|
||||
self.mode_interface
|
||||
.reply_tx_to_pus
|
||||
.send(GenericMessage::new(requestor, reply))
|
||||
.map_err(|_| GenericTargetedMessagingError::Send(GenericSendError::RxDisconnected))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_mode_info(
|
||||
&mut self,
|
||||
_requestor_info: MessageMetadata,
|
||||
_info: ModeAndSubmode,
|
||||
) -> Result<(), Self::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1 +0,0 @@
|
||||
pub mod mgm;
|
@ -12,7 +12,8 @@ use std::time::Duration;
|
||||
fn main() {
|
||||
let mut buf = [0; 32];
|
||||
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
|
||||
let pus_tc = PusTcCreator::new_simple(SpHeader::new_from_apid(0x02), 17, 1, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap();
|
||||
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed");
|
||||
let tc_req_id = RequestId::new(&pus_tc);
|
||||
println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}");
|
||||
|
44
satrs-example/src/ccsds.rs
Normal file
44
satrs-example/src/ccsds.rs
Normal file
@ -0,0 +1,44 @@
|
||||
use satrs::pus::ReceivesEcssPusTc;
|
||||
use satrs::spacepackets::{CcsdsPacket, SpHeader};
|
||||
use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc};
|
||||
use satrs_example::config::PUS_APID;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CcsdsReceiver<
|
||||
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone,
|
||||
E,
|
||||
> {
|
||||
pub tc_source: TcSource,
|
||||
}
|
||||
|
||||
impl<
|
||||
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
|
||||
E: 'static,
|
||||
> CcsdsPacketHandler for CcsdsReceiver<TcSource, E>
|
||||
{
|
||||
type Error = E;
|
||||
|
||||
fn valid_apids(&self) -> &'static [u16] {
|
||||
&[PUS_APID]
|
||||
}
|
||||
|
||||
fn handle_known_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
if sp_header.apid() == PUS_APID {
|
||||
return self.tc_source.pass_ccsds(sp_header, tc_raw);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_unknown_apid(
|
||||
&mut self,
|
||||
sp_header: &SpHeader,
|
||||
_tc_raw: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
println!("Unknown APID 0x{:x?} detected", sp_header.apid());
|
||||
Ok(())
|
||||
}
|
||||
}
|
@ -1,12 +1,7 @@
|
||||
use lazy_static::lazy_static;
|
||||
use satrs::{
|
||||
res_code::ResultU16,
|
||||
spacepackets::{PacketId, PacketType},
|
||||
};
|
||||
use satrs::res_code::ResultU16;
|
||||
use satrs_mib::res_code::ResultU16Info;
|
||||
use satrs_mib::resultcode;
|
||||
use std::{collections::HashSet, net::Ipv4Addr};
|
||||
use strum::IntoEnumIterator;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use num_enum::{IntoPrimitive, TryFromPrimitive};
|
||||
use satrs::{
|
||||
@ -14,6 +9,8 @@ use satrs::{
|
||||
pool::{StaticMemoryPool, StaticPoolConfig},
|
||||
};
|
||||
|
||||
pub const PUS_APID: u16 = 0x02;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)]
|
||||
#[repr(u8)]
|
||||
pub enum CustomPusServiceId {
|
||||
@ -32,30 +29,13 @@ pub const AOCS_APID: u16 = 1;
|
||||
pub enum GroupId {
|
||||
Tmtc = 0,
|
||||
Hk = 1,
|
||||
Mode = 2,
|
||||
}
|
||||
|
||||
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
|
||||
pub const SERVER_PORT: u16 = 7301;
|
||||
|
||||
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(0, 0);
|
||||
|
||||
lazy_static! {
|
||||
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {
|
||||
let mut set = HashSet::new();
|
||||
for id in components::Apid::iter() {
|
||||
set.insert(PacketId::new(PacketType::Tc, true, id as u16));
|
||||
}
|
||||
set
|
||||
};
|
||||
pub static ref APID_VALIDATOR: HashSet<u16> = {
|
||||
let mut set = HashSet::new();
|
||||
for id in components::Apid::iter() {
|
||||
set.insert(id as u16);
|
||||
}
|
||||
set
|
||||
};
|
||||
}
|
||||
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> =
|
||||
EventU32TypedSev::<SeverityInfo>::const_new(0, 0);
|
||||
|
||||
pub mod tmtc_err {
|
||||
|
||||
@ -73,8 +53,6 @@ pub mod tmtc_err {
|
||||
pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 4);
|
||||
#[resultcode]
|
||||
pub const ROUTING_ERROR: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 5);
|
||||
#[resultcode(info = "Request timeout for targeted PUS request. P1: Request ID. P2: Target ID")]
|
||||
pub const REQUEST_TIMEOUT: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 6);
|
||||
|
||||
#[resultcode(
|
||||
info = "Not enough data inside the TC application data field. Optionally includes: \
|
||||
@ -114,70 +92,27 @@ pub mod hk_err {
|
||||
];
|
||||
}
|
||||
|
||||
pub mod mode_err {
|
||||
use super::*;
|
||||
|
||||
#[resultcode]
|
||||
pub const WRONG_MODE: ResultU16 = ResultU16::new(GroupId::Mode as u8, 0);
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
pub enum TmSenderId {
|
||||
PusVerification = 0,
|
||||
PusTest = 1,
|
||||
PusEvent = 2,
|
||||
PusHk = 3,
|
||||
PusAction = 4,
|
||||
PusSched = 5,
|
||||
AllEvents = 6,
|
||||
AcsSubsystem = 7,
|
||||
}
|
||||
|
||||
pub mod components {
|
||||
use satrs::request::UniqueApidTargetId;
|
||||
use strum::EnumIter;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, EnumIter)]
|
||||
pub enum Apid {
|
||||
Sched = 1,
|
||||
GenericPus = 2,
|
||||
Acs = 3,
|
||||
Cfdp = 4,
|
||||
Tmtc = 5,
|
||||
}
|
||||
|
||||
// Component IDs for components with the PUS APID.
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
pub enum PusId {
|
||||
PusEventManagement = 0,
|
||||
PusRouting = 1,
|
||||
PusTest = 2,
|
||||
PusAction = 3,
|
||||
PusMode = 4,
|
||||
PusHk = 5,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
pub enum AcsId {
|
||||
Mgm0 = 0,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
pub enum TmtcId {
|
||||
UdpServer = 0,
|
||||
TcpServer = 1,
|
||||
}
|
||||
|
||||
pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32);
|
||||
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::GenericPus as u16, 0);
|
||||
pub const PUS_ROUTING_SERVICE: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32);
|
||||
pub const PUS_TEST_SERVICE: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32);
|
||||
pub const PUS_MODE_SERVICE: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32);
|
||||
pub const PUS_HK_SERVICE: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32);
|
||||
pub const PUS_SCHED_SERVICE: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::Sched as u16, 0);
|
||||
pub const MGM_HANDLER_0: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
|
||||
pub const UDP_SERVER: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::UdpServer as u32);
|
||||
pub const TCP_SERVER: UniqueApidTargetId =
|
||||
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::TcpServer as u32);
|
||||
#[derive(Copy, Clone, PartialEq, Eq)]
|
||||
pub enum TcReceiverId {
|
||||
PusTest = 1,
|
||||
PusEvent = 2,
|
||||
PusHk = 3,
|
||||
PusAction = 4,
|
||||
PusSched = 5,
|
||||
}
|
||||
|
||||
pub mod pool {
|
||||
use super::*;
|
||||
pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) {
|
||||
|
@ -1,90 +1,66 @@
|
||||
use std::sync::mpsc::{self};
|
||||
|
||||
use crate::pus::create_verification_reporter;
|
||||
use satrs::event_man::{EventMessageU32, EventRoutingError};
|
||||
use satrs::pus::event::EventTmHookProvider;
|
||||
use satrs::pus::verification::VerificationReporter;
|
||||
use satrs::pus::EcssTmSender;
|
||||
use satrs::request::UniqueApidTargetId;
|
||||
use satrs::{
|
||||
event_man::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
|
||||
event_man::{
|
||||
EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded,
|
||||
MpscEventReceiver,
|
||||
},
|
||||
events::EventU32,
|
||||
params::Params,
|
||||
pus::{
|
||||
event_man::{
|
||||
DefaultPusEventU32TmCreator, EventReporter, EventRequest, EventRequestWithToken,
|
||||
DefaultPusEventU32Dispatcher, EventReporter, EventRequest, EventRequestWithToken,
|
||||
},
|
||||
verification::{TcStateStarted, VerificationReportingProvider, VerificationToken},
|
||||
EcssTmSender,
|
||||
},
|
||||
spacepackets::time::cds::CdsTime,
|
||||
spacepackets::time::cds::{self, CdsTime},
|
||||
};
|
||||
use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
|
||||
use satrs_example::config::PUS_APID;
|
||||
|
||||
use crate::update_time;
|
||||
|
||||
// This helper sets the APID of the event sender for the PUS telemetry.
|
||||
#[derive(Default)]
|
||||
pub struct EventApidSetter {
|
||||
pub next_apid: u16,
|
||||
}
|
||||
|
||||
impl EventTmHookProvider for EventApidSetter {
|
||||
fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) {
|
||||
tm.set_apid(self.next_apid);
|
||||
}
|
||||
}
|
||||
|
||||
/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event
|
||||
/// packets. It also handles the verification completion of PUS event service requests.
|
||||
pub struct PusEventHandler<TmSender: EcssTmSender> {
|
||||
pub struct PusEventHandler<VerificationReporter: VerificationReportingProvider> {
|
||||
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
|
||||
pus_event_tm_creator: DefaultPusEventU32TmCreator<EventApidSetter>,
|
||||
pus_event_man_rx: mpsc::Receiver<EventMessageU32>,
|
||||
tm_sender: TmSender,
|
||||
pus_event_dispatcher: DefaultPusEventU32Dispatcher<()>,
|
||||
pus_event_man_rx: mpsc::Receiver<(EventU32, Option<Params>)>,
|
||||
tm_sender: Box<dyn EcssTmSender>,
|
||||
time_provider: CdsTime,
|
||||
timestamp: [u8; 7],
|
||||
small_data_buf: [u8; 64],
|
||||
verif_handler: VerificationReporter,
|
||||
}
|
||||
/*
|
||||
*/
|
||||
|
||||
impl<TmSender: EcssTmSender> PusEventHandler<TmSender> {
|
||||
impl<VerificationReporter: VerificationReportingProvider> PusEventHandler<VerificationReporter> {
|
||||
pub fn new(
|
||||
tm_sender: TmSender,
|
||||
verif_handler: VerificationReporter,
|
||||
event_manager: &mut EventManagerWithBoundedMpsc,
|
||||
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
|
||||
tm_sender: impl EcssTmSender,
|
||||
) -> Self {
|
||||
let event_queue_cap = 30;
|
||||
let (pus_event_man_tx, pus_event_man_rx) = mpsc::sync_channel(event_queue_cap);
|
||||
|
||||
// All events sent to the manager are routed to the PUS event manager, which generates PUS event
|
||||
// telemetry for each event.
|
||||
let event_reporter = EventReporter::new_with_hook(
|
||||
PUS_EVENT_MANAGEMENT.raw(),
|
||||
0,
|
||||
0,
|
||||
128,
|
||||
EventApidSetter::default(),
|
||||
)
|
||||
.unwrap();
|
||||
let event_reporter = EventReporter::new(PUS_APID, 128).unwrap();
|
||||
let pus_event_dispatcher =
|
||||
DefaultPusEventU32TmCreator::new_with_default_backend(event_reporter);
|
||||
let pus_event_man_send_provider = EventU32SenderMpscBounded::new(
|
||||
PUS_EVENT_MANAGEMENT.raw(),
|
||||
pus_event_man_tx,
|
||||
event_queue_cap,
|
||||
);
|
||||
DefaultPusEventU32Dispatcher::new_with_default_backend(event_reporter);
|
||||
let pus_event_man_send_provider =
|
||||
EventU32SenderMpscBounded::new(1, pus_event_man_tx, event_queue_cap);
|
||||
|
||||
event_manager.subscribe_all(pus_event_man_send_provider.target_id());
|
||||
event_manager.subscribe_all(pus_event_man_send_provider.channel_id());
|
||||
event_manager.add_sender(pus_event_man_send_provider);
|
||||
|
||||
Self {
|
||||
event_request_rx,
|
||||
pus_event_tm_creator: pus_event_dispatcher,
|
||||
pus_event_dispatcher,
|
||||
pus_event_man_rx,
|
||||
time_provider: CdsTime::new_with_u16_days(0, 0),
|
||||
time_provider: cds::CdsTime::new_with_u16_days(0, 0),
|
||||
timestamp: [0; 7],
|
||||
small_data_buf: [0; 64],
|
||||
verif_handler,
|
||||
tm_sender,
|
||||
tm_sender: Box::new(tm_sender),
|
||||
}
|
||||
}
|
||||
|
||||
@ -95,203 +71,115 @@ impl<TmSender: EcssTmSender> PusEventHandler<TmSender> {
|
||||
.try_into()
|
||||
.expect("expected start verification token");
|
||||
self.verif_handler
|
||||
.completion_success(&self.tm_sender, started_token, timestamp)
|
||||
.completion_success(started_token, timestamp)
|
||||
.expect("Sending completion success failed");
|
||||
};
|
||||
loop {
|
||||
// handle event requests
|
||||
match self.event_request_rx.try_recv() {
|
||||
Ok(event_req) => match event_req.request {
|
||||
EventRequest::Enable(event) => {
|
||||
self.pus_event_tm_creator
|
||||
.enable_tm_for_event(&event)
|
||||
.expect("Enabling TM failed");
|
||||
update_time(&mut self.time_provider, &mut self.timestamp);
|
||||
report_completion(event_req, &self.timestamp);
|
||||
}
|
||||
EventRequest::Disable(event) => {
|
||||
self.pus_event_tm_creator
|
||||
.disable_tm_for_event(&event)
|
||||
.expect("Disabling TM failed");
|
||||
update_time(&mut self.time_provider, &mut self.timestamp);
|
||||
report_completion(event_req, &self.timestamp);
|
||||
}
|
||||
},
|
||||
Err(e) => match e {
|
||||
mpsc::TryRecvError::Empty => break,
|
||||
mpsc::TryRecvError::Disconnected => {
|
||||
log::warn!("all event request senders have disconnected");
|
||||
break;
|
||||
}
|
||||
},
|
||||
// handle event requests
|
||||
if let Ok(event_req) = self.event_request_rx.try_recv() {
|
||||
match event_req.request {
|
||||
EventRequest::Enable(event) => {
|
||||
self.pus_event_dispatcher
|
||||
.enable_tm_for_event(&event)
|
||||
.expect("Enabling TM failed");
|
||||
update_time(&mut self.time_provider, &mut self.timestamp);
|
||||
report_completion(event_req, &self.timestamp);
|
||||
}
|
||||
EventRequest::Disable(event) => {
|
||||
self.pus_event_dispatcher
|
||||
.disable_tm_for_event(&event)
|
||||
.expect("Disabling TM failed");
|
||||
update_time(&mut self.time_provider, &mut self.timestamp);
|
||||
report_completion(event_req, &self.timestamp);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_pus_event_tm(&mut self) {
|
||||
loop {
|
||||
// Perform the generation of PUS event packets
|
||||
match self.pus_event_man_rx.try_recv() {
|
||||
Ok(event_msg) => {
|
||||
// We use the TM modification hook to set the sender APID for each event.
|
||||
self.pus_event_tm_creator.reporter.tm_hook.next_apid =
|
||||
UniqueApidTargetId::from(event_msg.sender_id()).apid;
|
||||
update_time(&mut self.time_provider, &mut self.timestamp);
|
||||
let generation_result = self
|
||||
.pus_event_tm_creator
|
||||
.generate_pus_event_tm_generic_with_generic_params(
|
||||
&self.tm_sender,
|
||||
&self.timestamp,
|
||||
event_msg.event(),
|
||||
&mut self.small_data_buf,
|
||||
event_msg.params(),
|
||||
)
|
||||
.expect("Sending TM as event failed");
|
||||
if !generation_result.params_were_propagated {
|
||||
log::warn!(
|
||||
"Event TM parameters were not propagated: {:?}",
|
||||
event_msg.params()
|
||||
);
|
||||
}
|
||||
}
|
||||
Err(e) => match e {
|
||||
mpsc::TryRecvError::Empty => break,
|
||||
mpsc::TryRecvError::Disconnected => {
|
||||
log::warn!("All event senders have disconnected");
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
// Perform the generation of PUS event packets
|
||||
if let Ok((event, _param)) = self.pus_event_man_rx.try_recv() {
|
||||
update_time(&mut self.time_provider, &mut self.timestamp);
|
||||
self.pus_event_dispatcher
|
||||
.generate_pus_event_tm_generic(
|
||||
self.tm_sender.upcast_mut(),
|
||||
&self.timestamp,
|
||||
event,
|
||||
None,
|
||||
)
|
||||
.expect("Sending TM as event failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct EventHandler<TmSender: EcssTmSender> {
|
||||
pub pus_event_handler: PusEventHandler<TmSender>,
|
||||
pub struct EventManagerWrapper {
|
||||
event_manager: EventManagerWithBoundedMpsc,
|
||||
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender> EventHandler<TmSender> {
|
||||
pub fn new(
|
||||
tm_sender: TmSender,
|
||||
event_rx: mpsc::Receiver<EventMessageU32>,
|
||||
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
|
||||
) -> Self {
|
||||
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
|
||||
let pus_event_handler = PusEventHandler::new(
|
||||
tm_sender,
|
||||
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
|
||||
&mut event_manager,
|
||||
event_request_rx,
|
||||
);
|
||||
|
||||
impl EventManagerWrapper {
|
||||
pub fn new() -> Self {
|
||||
// The sender handle is the primary sender handle for all components which want to create events.
|
||||
// The event manager will receive the RX handle to receive all the events.
|
||||
let (event_sender, event_man_rx) = mpsc::channel();
|
||||
let event_recv = MpscEventReceiver::<EventU32>::new(event_man_rx);
|
||||
Self {
|
||||
pus_event_handler,
|
||||
event_manager,
|
||||
event_manager: EventManagerWithBoundedMpsc::new(event_recv),
|
||||
event_sender,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option<Params>)> {
|
||||
self.event_sender.clone()
|
||||
}
|
||||
|
||||
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
|
||||
&mut self.event_manager
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
self.pus_event_handler.handle_event_requests();
|
||||
self.try_event_routing();
|
||||
self.pus_event_handler.generate_pus_event_tm();
|
||||
}
|
||||
|
||||
pub fn try_event_routing(&mut self) {
|
||||
let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| {
|
||||
self.routing_error_handler(event_msg, error)
|
||||
};
|
||||
// Perform the event routing.
|
||||
self.event_manager.try_event_handling(error_handler);
|
||||
}
|
||||
|
||||
pub fn routing_error_handler(&self, event_msg: &EventMessageU32, error: EventRoutingError) {
|
||||
log::warn!("event routing error for event {event_msg:?}: {error:?}");
|
||||
self.event_manager
|
||||
.try_event_handling()
|
||||
.expect("event handling failed");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use satrs::{
|
||||
events::EventU32,
|
||||
pus::verification::VerificationReporterCfg,
|
||||
spacepackets::{
|
||||
ecss::{tm::PusTmReader, PusPacket},
|
||||
CcsdsPacket,
|
||||
},
|
||||
tmtc::PacketAsVec,
|
||||
};
|
||||
pub struct EventHandler<VerificationReporter: VerificationReportingProvider> {
|
||||
pub event_man_wrapper: EventManagerWrapper,
|
||||
pub pus_event_handler: PusEventHandler<VerificationReporter>,
|
||||
}
|
||||
|
||||
use super::*;
|
||||
|
||||
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(1, 2);
|
||||
const TEST_EVENT: EventU32 = EventU32::new(satrs::events::Severity::Info, 1, 1);
|
||||
|
||||
pub struct EventManagementTestbench {
|
||||
pub event_tx: mpsc::SyncSender<EventMessageU32>,
|
||||
pub event_manager: EventManagerWithBoundedMpsc,
|
||||
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
|
||||
pub pus_event_handler: PusEventHandler<mpsc::Sender<PacketAsVec>>,
|
||||
}
|
||||
|
||||
impl EventManagementTestbench {
|
||||
pub fn new() -> Self {
|
||||
let (event_tx, event_rx) = mpsc::sync_channel(10);
|
||||
let (_event_req_tx, event_req_rx) = mpsc::sync_channel(10);
|
||||
let (tm_sender, tm_receiver) = mpsc::channel();
|
||||
let verif_reporter_cfg = VerificationReporterCfg::new(0x05, 2, 2, 128).unwrap();
|
||||
let verif_reporter =
|
||||
VerificationReporter::new(PUS_EVENT_MANAGEMENT.id(), &verif_reporter_cfg);
|
||||
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
|
||||
let pus_event_handler = PusEventHandler::<mpsc::Sender<PacketAsVec>>::new(
|
||||
tm_sender,
|
||||
verif_reporter,
|
||||
&mut event_manager,
|
||||
event_req_rx,
|
||||
);
|
||||
Self {
|
||||
event_tx,
|
||||
tm_receiver,
|
||||
event_manager,
|
||||
pus_event_handler,
|
||||
}
|
||||
impl<VerificationReporter: VerificationReportingProvider> EventHandler<VerificationReporter> {
|
||||
pub fn new(
|
||||
tm_sender: impl EcssTmSender,
|
||||
verif_handler: VerificationReporter,
|
||||
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
|
||||
) -> Self {
|
||||
let mut event_man_wrapper = EventManagerWrapper::new();
|
||||
let pus_event_handler = PusEventHandler::new(
|
||||
verif_handler,
|
||||
event_man_wrapper.event_manager(),
|
||||
event_request_rx,
|
||||
tm_sender,
|
||||
);
|
||||
Self {
|
||||
event_man_wrapper,
|
||||
pus_event_handler,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_event_generation() {
|
||||
let mut testbench = EventManagementTestbench::new();
|
||||
testbench
|
||||
.event_tx
|
||||
.send(EventMessageU32::new(
|
||||
TEST_CREATOR_ID.id(),
|
||||
EventU32::new(satrs::events::Severity::Info, 1, 1),
|
||||
))
|
||||
.expect("failed to send event");
|
||||
testbench.pus_event_handler.handle_event_requests();
|
||||
testbench.event_manager.try_event_handling(|_, _| {});
|
||||
testbench.pus_event_handler.generate_pus_event_tm();
|
||||
let tm_packet = testbench
|
||||
.tm_receiver
|
||||
.try_recv()
|
||||
.expect("failed to receive TM packet");
|
||||
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
|
||||
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
|
||||
.expect("failed to create TM reader")
|
||||
.0;
|
||||
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
|
||||
assert_eq!(tm_reader.user_data().len(), 4);
|
||||
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());
|
||||
assert_eq!(event_read_back, TEST_EVENT);
|
||||
pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option<Params>)> {
|
||||
self.event_man_wrapper.clone_event_sender()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_event_disabled() {
|
||||
// TODO: Add test.
|
||||
#[allow(dead_code)]
|
||||
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
|
||||
self.event_man_wrapper.event_manager()
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
self.pus_event_handler.handle_event_requests();
|
||||
self.event_man_wrapper.try_event_routing();
|
||||
self.pus_event_handler.generate_pus_event_tm();
|
||||
}
|
||||
}
|
||||
|
@ -1,25 +1,27 @@
|
||||
use derive_new::new;
|
||||
use satrs::hk::UniqueId;
|
||||
use satrs::request::UniqueApidTargetId;
|
||||
use satrs::spacepackets::ByteConversionError;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum AcsHkIds {
|
||||
TestMgmSet = 1,
|
||||
}
|
||||
|
||||
#[derive(Debug, new, Copy, Clone)]
|
||||
pub struct HkUniqueId {
|
||||
target_id: UniqueApidTargetId,
|
||||
set_id: UniqueId,
|
||||
target_id: u32,
|
||||
set_id: u32,
|
||||
}
|
||||
|
||||
impl HkUniqueId {
|
||||
#[allow(dead_code)]
|
||||
pub fn target_id(&self) -> UniqueApidTargetId {
|
||||
pub fn target_id(&self) -> u32 {
|
||||
self.target_id
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
pub fn set_id(&self) -> UniqueId {
|
||||
pub fn set_id(&self) -> u32 {
|
||||
self.set_id
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
if buf.len() < 8 {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
@ -27,7 +29,7 @@ impl HkUniqueId {
|
||||
expected: 8,
|
||||
});
|
||||
}
|
||||
buf[0..4].copy_from_slice(&self.target_id.unique_id.to_be_bytes());
|
||||
buf[0..4].copy_from_slice(&self.target_id.to_be_bytes());
|
||||
buf[4..8].copy_from_slice(&self.set_id.to_be_bytes());
|
||||
|
||||
Ok(8)
|
||||
|
@ -1,3 +0,0 @@
|
||||
//! This module contains all component related to the direct interface of the example.
|
||||
pub mod tcp;
|
||||
pub mod udp;
|
@ -1,154 +0,0 @@
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
collections::{HashSet, VecDeque},
|
||||
fmt::Debug,
|
||||
marker::PhantomData,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
use log::{info, warn};
|
||||
use satrs::{
|
||||
encoding::ccsds::{SpValidity, SpacePacketValidator},
|
||||
hal::std::tcp_server::{HandledConnectionHandler, ServerConfig, TcpSpacepacketsServer},
|
||||
spacepackets::{CcsdsPacket, PacketId},
|
||||
tmtc::{PacketSenderRaw, PacketSource},
|
||||
};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ConnectionFinishedHandler {}
|
||||
|
||||
pub struct SimplePacketValidator {
|
||||
pub valid_ids: HashSet<PacketId>,
|
||||
}
|
||||
|
||||
impl SpacePacketValidator for SimplePacketValidator {
|
||||
fn validate(
|
||||
&self,
|
||||
sp_header: &satrs::spacepackets::SpHeader,
|
||||
_raw_buf: &[u8],
|
||||
) -> satrs::encoding::ccsds::SpValidity {
|
||||
if self.valid_ids.contains(&sp_header.packet_id()) {
|
||||
return SpValidity::Valid;
|
||||
}
|
||||
log::warn!("ignoring space packet with header {:?}", sp_header);
|
||||
// We could perform a CRC check.. but lets keep this simple and assume that TCP ensures
|
||||
// data integrity.
|
||||
SpValidity::Skip
|
||||
}
|
||||
}
|
||||
|
||||
impl HandledConnectionHandler for ConnectionFinishedHandler {
|
||||
fn handled_connection(&mut self, info: satrs::hal::std::tcp_server::HandledConnectionInfo) {
|
||||
info!(
|
||||
"Served {} TMs and {} TCs for client {:?}",
|
||||
info.num_sent_tms, info.num_received_tcs, info.addr
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct SyncTcpTmSource {
|
||||
tm_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
max_packets_stored: usize,
|
||||
pub silent_packet_overwrite: bool,
|
||||
}
|
||||
|
||||
impl SyncTcpTmSource {
|
||||
pub fn new(max_packets_stored: usize) -> Self {
|
||||
Self {
|
||||
tm_queue: Arc::default(),
|
||||
max_packets_stored,
|
||||
silent_packet_overwrite: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_tm(&mut self, tm: &[u8]) {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failec");
|
||||
if tm_queue.len() > self.max_packets_stored {
|
||||
if !self.silent_packet_overwrite {
|
||||
warn!("TPC TM source is full, deleting oldest packet");
|
||||
}
|
||||
tm_queue.pop_front();
|
||||
}
|
||||
tm_queue.push_back(tm.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketSource for SyncTcpTmSource {
|
||||
type Error = ();
|
||||
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failed");
|
||||
if !tm_queue.is_empty() {
|
||||
let next_vec = tm_queue.front().unwrap();
|
||||
if buffer.len() < next_vec.len() {
|
||||
panic!(
|
||||
"provided buffer too small, must be at least {} bytes",
|
||||
next_vec.len()
|
||||
);
|
||||
}
|
||||
let next_vec = tm_queue.pop_front().unwrap();
|
||||
buffer[0..next_vec.len()].copy_from_slice(&next_vec);
|
||||
if next_vec.len() > 9 {
|
||||
let service = next_vec[7];
|
||||
let subservice = next_vec[8];
|
||||
info!("Sending PUS TM[{service},{subservice}]")
|
||||
} else {
|
||||
info!("Sending PUS TM");
|
||||
}
|
||||
return Ok(next_vec.len());
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
pub type TcpServer<ReceivesTc, SendError> = TcpSpacepacketsServer<
|
||||
SyncTcpTmSource,
|
||||
ReceivesTc,
|
||||
SimplePacketValidator,
|
||||
ConnectionFinishedHandler,
|
||||
(),
|
||||
SendError,
|
||||
>;
|
||||
|
||||
pub struct TcpTask<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>(
|
||||
pub TcpServer<TcSender, SendError>,
|
||||
PhantomData<SendError>,
|
||||
);
|
||||
|
||||
impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
|
||||
TcpTask<TcSender, SendError>
|
||||
{
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tm_source: SyncTcpTmSource,
|
||||
tc_sender: TcSender,
|
||||
valid_ids: HashSet<PacketId>,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
Ok(Self(
|
||||
TcpSpacepacketsServer::new(
|
||||
cfg,
|
||||
tm_source,
|
||||
tc_sender,
|
||||
SimplePacketValidator { valid_ids },
|
||||
ConnectionFinishedHandler::default(),
|
||||
None,
|
||||
)?,
|
||||
PhantomData,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
loop {
|
||||
let result = self
|
||||
.0
|
||||
.handle_all_connections(Some(Duration::from_millis(400)));
|
||||
match result {
|
||||
Ok(_conn_result) => (),
|
||||
Err(e) => {
|
||||
warn!("TCP server error: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,39 +1 @@
|
||||
use satrs::spacepackets::time::{cds::CdsTime, TimeWriter};
|
||||
|
||||
pub mod config;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum DeviceMode {
|
||||
Off = 0,
|
||||
On = 1,
|
||||
Normal = 2,
|
||||
}
|
||||
|
||||
pub struct TimeStampHelper {
|
||||
stamper: CdsTime,
|
||||
time_stamp: [u8; 7],
|
||||
}
|
||||
|
||||
impl TimeStampHelper {
|
||||
pub fn stamp(&self) -> &[u8] {
|
||||
&self.time_stamp
|
||||
}
|
||||
|
||||
pub fn update_from_now(&mut self) {
|
||||
self.stamper
|
||||
.update_from_now()
|
||||
.expect("Updating timestamp failed");
|
||||
self.stamper
|
||||
.write_to_bytes(&mut self.time_stamp)
|
||||
.expect("Writing timestamp failed");
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TimeStampHelper {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
stamper: CdsTime::now_with_u16_days().expect("creating time stamper failed"),
|
||||
time_stamp: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,223 +1,229 @@
|
||||
mod acs;
|
||||
mod ccsds;
|
||||
mod events;
|
||||
mod hk;
|
||||
mod interface;
|
||||
mod logger;
|
||||
mod pus;
|
||||
mod requests;
|
||||
mod tcp;
|
||||
mod tm_funnel;
|
||||
mod tmtc;
|
||||
mod udp;
|
||||
|
||||
use crate::events::EventHandler;
|
||||
use crate::interface::udp::DynamicUdpTmHandler;
|
||||
use crate::pus::stack::PusStack;
|
||||
use crate::tmtc::tc_source::{TcSourceTaskDynamic, TcSourceTaskStatic};
|
||||
use crate::tmtc::tm_sink::{TmSinkDynamic, TmSinkStatic};
|
||||
use crate::tm_funnel::{TmFunnelDynamic, TmFunnelStatic};
|
||||
use log::info;
|
||||
use pus::test::create_test_service_dynamic;
|
||||
use satrs::hal::std::tcp_server::ServerConfig;
|
||||
use satrs::hal::std::udp_server::UdpTcServer;
|
||||
use satrs::request::GenericMessage;
|
||||
use satrs::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
|
||||
use satrs::request::TargetAndApidId;
|
||||
use satrs::tmtc::tm_helper::SharedTmPool;
|
||||
use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools};
|
||||
use satrs_example::config::tasks::{
|
||||
FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC,
|
||||
};
|
||||
use satrs_example::config::{OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT};
|
||||
use satrs_example::config::{RequestTargetId, TmSenderId, OBSW_SERVER_ADDR, PUS_APID, SERVER_PORT};
|
||||
use tmtc::PusTcSourceProviderDynamic;
|
||||
use udp::DynamicUdpTmHandler;
|
||||
|
||||
use crate::acs::mgm::{MgmHandlerLis3Mdl, MpscModeLeafInterface, SpiDummyInterface};
|
||||
use crate::interface::tcp::{SyncTcpTmSource, TcpTask};
|
||||
use crate::interface::udp::{StaticUdpTmHandler, UdpTmtcServer};
|
||||
use crate::acs::AcsTask;
|
||||
use crate::ccsds::CcsdsReceiver;
|
||||
use crate::logger::setup_logger;
|
||||
use crate::pus::action::{create_action_service_dynamic, create_action_service_static};
|
||||
use crate::pus::event::{create_event_service_dynamic, create_event_service_static};
|
||||
use crate::pus::hk::{create_hk_service_dynamic, create_hk_service_static};
|
||||
use crate::pus::mode::{create_mode_service_dynamic, create_mode_service_static};
|
||||
use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_service_static};
|
||||
use crate::pus::test::create_test_service_static;
|
||||
use crate::pus::{PusTcDistributor, PusTcMpscRouter};
|
||||
use crate::requests::{CompositeRequest, GenericRequestRouter};
|
||||
use satrs::mode::ModeRequest;
|
||||
use crate::pus::{PusReceiver, PusTcMpscRouter};
|
||||
use crate::requests::{GenericRequestRouter, RequestWithToken};
|
||||
use crate::tcp::{SyncTcpTmSource, TcpTask};
|
||||
use crate::tmtc::{
|
||||
PusTcSourceProviderSharedPool, SharedTcPool, TcSourceTaskDynamic, TcSourceTaskStatic,
|
||||
};
|
||||
use crate::udp::{StaticUdpTmHandler, UdpTmtcServer};
|
||||
use satrs::pus::event_man::EventRequestWithToken;
|
||||
use satrs::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender};
|
||||
use satrs::pus::{EcssTmSender, TmAsVecSenderWithId, TmInSharedPoolSenderWithId};
|
||||
use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter};
|
||||
use satrs_example::config::components::{MGM_HANDLER_0, TCP_SERVER, UDP_SERVER};
|
||||
use satrs::tmtc::CcsdsDistributor;
|
||||
use satrs::ChannelId;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::mpsc;
|
||||
use std::sync::mpsc::{self, channel};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
use std::time::Duration;
|
||||
|
||||
fn create_verification_reporter<Sender: EcssTmSender + Clone>(
|
||||
verif_sender: Sender,
|
||||
) -> VerificationReporterWithSender<Sender> {
|
||||
let verif_cfg = VerificationReporterCfg::new(PUS_APID, 1, 2, 8).unwrap();
|
||||
// Every software component which needs to generate verification telemetry, gets a cloned
|
||||
// verification reporter.
|
||||
VerificationReporterWithSender::new(&verif_cfg, verif_sender)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn static_tmtc_pool_main() {
|
||||
let (tm_pool, tc_pool) = create_static_pools();
|
||||
let shared_tm_pool = Arc::new(RwLock::new(tm_pool));
|
||||
let shared_tc_pool = Arc::new(RwLock::new(tc_pool));
|
||||
let shared_tm_pool_wrapper = SharedPacketPool::new(&shared_tm_pool);
|
||||
let shared_tc_pool_wrapper = SharedPacketPool::new(&shared_tc_pool);
|
||||
let shared_tm_pool = SharedTmPool::new(tm_pool);
|
||||
let shared_tc_pool = SharedTcPool {
|
||||
pool: Arc::new(RwLock::new(tc_pool)),
|
||||
};
|
||||
let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50);
|
||||
let (tm_sink_tx, tm_sink_rx) = mpsc::sync_channel(50);
|
||||
let (tm_funnel_tx, tm_funnel_rx) = mpsc::sync_channel(50);
|
||||
let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50);
|
||||
|
||||
let tm_sink_tx_sender =
|
||||
PacketSenderWithSharedPool::new(tm_sink_tx.clone(), shared_tm_pool_wrapper.clone());
|
||||
|
||||
let (mgm_handler_composite_tx, mgm_handler_composite_rx) =
|
||||
mpsc::channel::<GenericMessage<CompositeRequest>>();
|
||||
let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::<GenericMessage<ModeRequest>>();
|
||||
// Every software component which needs to generate verification telemetry, receives a cloned
|
||||
// verification reporter.
|
||||
let verif_reporter = create_verification_reporter(TmInSharedPoolSenderWithId::new(
|
||||
TmSenderId::PusVerification as ChannelId,
|
||||
"verif_sender",
|
||||
shared_tm_pool.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
));
|
||||
|
||||
let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32);
|
||||
let (acs_thread_tx, acs_thread_rx) = channel::<RequestWithToken>();
|
||||
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
|
||||
let mut request_map = GenericRequestRouter::default();
|
||||
request_map
|
||||
.composite_router_map
|
||||
.insert(MGM_HANDLER_0.id(), mgm_handler_composite_tx);
|
||||
request_map
|
||||
.mode_router_map
|
||||
.insert(MGM_HANDLER_0.id(), mgm_handler_mode_tx);
|
||||
request_map.0.insert(acs_target_id.into(), acs_thread_tx);
|
||||
|
||||
// This helper structure is used by all telecommand providers which need to send telecommands
|
||||
// to the TC source.
|
||||
let tc_source = PacketSenderWithSharedPool::new(tc_source_tx, shared_tc_pool_wrapper.clone());
|
||||
let tc_source = PusTcSourceProviderSharedPool {
|
||||
shared_pool: shared_tc_pool.clone(),
|
||||
tc_source: tc_source_tx,
|
||||
};
|
||||
|
||||
// Create event handling components
|
||||
// These sender handles are used to send event requests, for example to enable or disable
|
||||
// certain events.
|
||||
let (event_tx, event_rx) = mpsc::sync_channel(100);
|
||||
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
|
||||
|
||||
// The event task is the core handler to perform the event routing and TM handling as specified
|
||||
// in the sat-rs documentation.
|
||||
let mut event_handler = EventHandler::new(tm_sink_tx.clone(), event_rx, event_request_rx);
|
||||
|
||||
let (pus_test_tx, pus_test_rx) = mpsc::channel();
|
||||
let (pus_event_tx, pus_event_rx) = mpsc::channel();
|
||||
let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
|
||||
let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
|
||||
let (pus_action_tx, pus_action_rx) = mpsc::channel();
|
||||
let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
|
||||
|
||||
let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
|
||||
let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
|
||||
let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
|
||||
let mut event_handler = EventHandler::new(
|
||||
TmInSharedPoolSenderWithId::new(
|
||||
TmSenderId::AllEvents as ChannelId,
|
||||
"ALL_EVENTS_TX",
|
||||
shared_tm_pool.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
),
|
||||
verif_reporter.clone(),
|
||||
event_request_rx,
|
||||
);
|
||||
|
||||
let (pus_test_tx, pus_test_rx) = channel();
|
||||
let (pus_event_tx, pus_event_rx) = channel();
|
||||
let (pus_sched_tx, pus_sched_rx) = channel();
|
||||
let (pus_hk_tx, pus_hk_rx) = channel();
|
||||
let (pus_action_tx, pus_action_rx) = channel();
|
||||
let pus_router = PusTcMpscRouter {
|
||||
test_tc_sender: pus_test_tx,
|
||||
event_tc_sender: pus_event_tx,
|
||||
sched_tc_sender: pus_sched_tx,
|
||||
hk_tc_sender: pus_hk_tx,
|
||||
action_tc_sender: pus_action_tx,
|
||||
mode_tc_sender: pus_mode_tx,
|
||||
test_service_receiver: pus_test_tx,
|
||||
event_service_receiver: pus_event_tx,
|
||||
sched_service_receiver: pus_sched_tx,
|
||||
hk_service_receiver: pus_hk_tx,
|
||||
action_service_receiver: pus_action_tx,
|
||||
};
|
||||
let pus_test_service = create_test_service_static(
|
||||
tm_sink_tx_sender.clone(),
|
||||
shared_tc_pool.clone(),
|
||||
event_tx.clone(),
|
||||
shared_tm_pool.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
shared_tc_pool.pool.clone(),
|
||||
event_handler.clone_event_sender(),
|
||||
pus_test_rx,
|
||||
);
|
||||
let pus_scheduler_service = create_scheduler_service_static(
|
||||
tm_sink_tx_sender.clone(),
|
||||
shared_tm_pool.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
tc_source.clone(),
|
||||
pus_sched_rx,
|
||||
create_sched_tc_pool(),
|
||||
);
|
||||
let pus_event_service = create_event_service_static(
|
||||
tm_sink_tx_sender.clone(),
|
||||
shared_tc_pool.clone(),
|
||||
shared_tm_pool.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
shared_tc_pool.pool.clone(),
|
||||
pus_event_rx,
|
||||
event_request_tx,
|
||||
);
|
||||
let pus_action_service = create_action_service_static(
|
||||
tm_sink_tx_sender.clone(),
|
||||
shared_tc_pool.clone(),
|
||||
shared_tm_pool.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
shared_tc_pool.pool.clone(),
|
||||
pus_action_rx,
|
||||
request_map.clone(),
|
||||
pus_action_reply_rx,
|
||||
);
|
||||
let pus_hk_service = create_hk_service_static(
|
||||
tm_sink_tx_sender.clone(),
|
||||
shared_tc_pool.clone(),
|
||||
shared_tm_pool.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
shared_tc_pool.pool.clone(),
|
||||
pus_hk_rx,
|
||||
request_map.clone(),
|
||||
pus_hk_reply_rx,
|
||||
);
|
||||
let pus_mode_service = create_mode_service_static(
|
||||
tm_sink_tx_sender.clone(),
|
||||
shared_tc_pool.clone(),
|
||||
pus_mode_rx,
|
||||
request_map,
|
||||
pus_mode_reply_rx,
|
||||
);
|
||||
let mut pus_stack = PusStack::new(
|
||||
pus_test_service,
|
||||
pus_hk_service,
|
||||
pus_event_service,
|
||||
pus_action_service,
|
||||
pus_scheduler_service,
|
||||
pus_mode_service,
|
||||
pus_test_service,
|
||||
);
|
||||
|
||||
let ccsds_receiver = CcsdsReceiver { tc_source };
|
||||
let mut tmtc_task = TcSourceTaskStatic::new(
|
||||
shared_tc_pool_wrapper.clone(),
|
||||
shared_tc_pool.clone(),
|
||||
tc_source_rx,
|
||||
PusTcDistributor::new(tm_sink_tx_sender, pus_router),
|
||||
PusReceiver::new(verif_reporter.clone(), pus_router),
|
||||
);
|
||||
|
||||
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
|
||||
let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source.clone())
|
||||
let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone());
|
||||
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
|
||||
.expect("creating UDP TMTC server failed");
|
||||
let mut udp_tmtc_server = UdpTmtcServer {
|
||||
udp_tc_server,
|
||||
tm_handler: StaticUdpTmHandler {
|
||||
tm_rx: tm_server_rx,
|
||||
tm_store: shared_tm_pool.clone(),
|
||||
tm_store: shared_tm_pool.clone_backing_pool(),
|
||||
},
|
||||
};
|
||||
|
||||
let tcp_server_cfg = ServerConfig::new(
|
||||
TCP_SERVER.id(),
|
||||
sock_addr,
|
||||
Duration::from_millis(400),
|
||||
4096,
|
||||
8192,
|
||||
);
|
||||
let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver);
|
||||
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
|
||||
let sync_tm_tcp_source = SyncTcpTmSource::new(200);
|
||||
let mut tcp_server = TcpTask::new(
|
||||
tcp_server_cfg,
|
||||
sync_tm_tcp_source.clone(),
|
||||
tc_source.clone(),
|
||||
PACKET_ID_VALIDATOR.clone(),
|
||||
tcp_ccsds_distributor,
|
||||
)
|
||||
.expect("tcp server creation failed");
|
||||
|
||||
let mut tm_sink = TmSinkStatic::new(
|
||||
shared_tm_pool_wrapper,
|
||||
sync_tm_tcp_source,
|
||||
tm_sink_rx,
|
||||
tm_server_tx,
|
||||
let mut acs_task = AcsTask::new(
|
||||
TmInSharedPoolSenderWithId::new(
|
||||
TmSenderId::AcsSubsystem as ChannelId,
|
||||
"ACS_TASK_SENDER",
|
||||
shared_tm_pool.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
),
|
||||
acs_thread_rx,
|
||||
verif_reporter,
|
||||
);
|
||||
|
||||
let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) =
|
||||
mpsc::channel();
|
||||
|
||||
let dummy_spi_interface = SpiDummyInterface::default();
|
||||
let shared_mgm_set = Arc::default();
|
||||
let mode_leaf_interface = MpscModeLeafInterface {
|
||||
request_rx: mgm_handler_mode_rx,
|
||||
reply_tx_to_pus: pus_mode_reply_tx,
|
||||
reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx,
|
||||
};
|
||||
let mut mgm_handler = MgmHandlerLis3Mdl::new(
|
||||
MGM_HANDLER_0,
|
||||
"MGM_0",
|
||||
mode_leaf_interface,
|
||||
mgm_handler_composite_rx,
|
||||
pus_hk_reply_tx,
|
||||
tm_sink_tx,
|
||||
dummy_spi_interface,
|
||||
shared_mgm_set,
|
||||
let mut tm_funnel = TmFunnelStatic::new(
|
||||
shared_tm_pool,
|
||||
sync_tm_tcp_source,
|
||||
tm_funnel_rx,
|
||||
tm_server_tx,
|
||||
);
|
||||
|
||||
info!("Starting TMTC and UDP task");
|
||||
let jh_udp_tmtc = thread::Builder::new()
|
||||
.name("SATRS tmtc-udp".to_string())
|
||||
.name("TMTC and UDP".to_string())
|
||||
.spawn(move || {
|
||||
info!("Running UDP server on port {SERVER_PORT}");
|
||||
loop {
|
||||
@ -230,7 +236,7 @@ fn static_tmtc_pool_main() {
|
||||
|
||||
info!("Starting TCP task");
|
||||
let jh_tcp = thread::Builder::new()
|
||||
.name("sat-rs tcp".to_string())
|
||||
.name("TCP".to_string())
|
||||
.spawn(move || {
|
||||
info!("Running TCP server on port {SERVER_PORT}");
|
||||
loop {
|
||||
@ -241,15 +247,15 @@ fn static_tmtc_pool_main() {
|
||||
|
||||
info!("Starting TM funnel task");
|
||||
let jh_tm_funnel = thread::Builder::new()
|
||||
.name("tm sink".to_string())
|
||||
.name("TM Funnel".to_string())
|
||||
.spawn(move || loop {
|
||||
tm_sink.operation();
|
||||
tm_funnel.operation();
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
info!("Starting event handling task");
|
||||
let jh_event_handling = thread::Builder::new()
|
||||
.name("sat-rs events".to_string())
|
||||
.name("Event".to_string())
|
||||
.spawn(move || loop {
|
||||
event_handler.periodic_operation();
|
||||
thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING));
|
||||
@ -258,16 +264,16 @@ fn static_tmtc_pool_main() {
|
||||
|
||||
info!("Starting AOCS thread");
|
||||
let jh_aocs = thread::Builder::new()
|
||||
.name("sat-rs aocs".to_string())
|
||||
.name("AOCS".to_string())
|
||||
.spawn(move || loop {
|
||||
mgm_handler.periodic_operation();
|
||||
acs_task.periodic_operation();
|
||||
thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
info!("Starting PUS handler thread");
|
||||
let jh_pus_handler = thread::Builder::new()
|
||||
.name("sat-rs pus".to_string())
|
||||
.name("PUS".to_string())
|
||||
.spawn(move || loop {
|
||||
pus_stack.periodic_operation();
|
||||
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));
|
||||
@ -294,98 +300,104 @@ fn static_tmtc_pool_main() {
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn dyn_tmtc_pool_main() {
|
||||
let (tc_source_tx, tc_source_rx) = mpsc::channel();
|
||||
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
|
||||
let (tm_server_tx, tm_server_rx) = mpsc::channel();
|
||||
|
||||
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
|
||||
let (mgm_handler_composite_tx, mgm_handler_composite_rx) =
|
||||
mpsc::channel::<GenericMessage<CompositeRequest>>();
|
||||
let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::<GenericMessage<ModeRequest>>();
|
||||
let (tc_source_tx, tc_source_rx) = channel();
|
||||
let (tm_funnel_tx, tm_funnel_rx) = channel();
|
||||
let (tm_server_tx, tm_server_rx) = channel();
|
||||
// Every software component which needs to generate verification telemetry, gets a cloned
|
||||
// verification reporter.
|
||||
let verif_reporter = create_verification_reporter(TmAsVecSenderWithId::new(
|
||||
TmSenderId::PusVerification as ChannelId,
|
||||
"verif_sender",
|
||||
tm_funnel_tx.clone(),
|
||||
));
|
||||
|
||||
let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32);
|
||||
let (acs_thread_tx, acs_thread_rx) = channel::<RequestWithToken>();
|
||||
// Some request are targetable. This map is used to retrieve sender handles based on a target ID.
|
||||
let mut request_map = GenericRequestRouter::default();
|
||||
request_map
|
||||
.composite_router_map
|
||||
.insert(MGM_HANDLER_0.raw(), mgm_handler_composite_tx);
|
||||
request_map
|
||||
.mode_router_map
|
||||
.insert(MGM_HANDLER_0.raw(), mgm_handler_mode_tx);
|
||||
request_map.0.insert(acs_target_id.into(), acs_thread_tx);
|
||||
|
||||
let tc_source = PusTcSourceProviderDynamic(tc_source_tx);
|
||||
|
||||
// Create event handling components
|
||||
// These sender handles are used to send event requests, for example to enable or disable
|
||||
// certain events.
|
||||
let (event_tx, event_rx) = mpsc::sync_channel(100);
|
||||
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
|
||||
// The event task is the core handler to perform the event routing and TM handling as specified
|
||||
// in the sat-rs documentation.
|
||||
let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_rx, event_request_rx);
|
||||
|
||||
let (pus_test_tx, pus_test_rx) = mpsc::channel();
|
||||
let (pus_event_tx, pus_event_rx) = mpsc::channel();
|
||||
let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
|
||||
let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
|
||||
let (pus_action_tx, pus_action_rx) = mpsc::channel();
|
||||
let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
|
||||
|
||||
let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
|
||||
let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
|
||||
let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
|
||||
let mut event_handler = EventHandler::new(
|
||||
TmAsVecSenderWithId::new(
|
||||
TmSenderId::AllEvents as ChannelId,
|
||||
"ALL_EVENTS_TX",
|
||||
tm_funnel_tx.clone(),
|
||||
),
|
||||
verif_reporter.clone(),
|
||||
event_request_rx,
|
||||
);
|
||||
|
||||
let (pus_test_tx, pus_test_rx) = channel();
|
||||
let (pus_event_tx, pus_event_rx) = channel();
|
||||
let (pus_sched_tx, pus_sched_rx) = channel();
|
||||
let (pus_hk_tx, pus_hk_rx) = channel();
|
||||
let (pus_action_tx, pus_action_rx) = channel();
|
||||
let pus_router = PusTcMpscRouter {
|
||||
test_tc_sender: pus_test_tx,
|
||||
event_tc_sender: pus_event_tx,
|
||||
sched_tc_sender: pus_sched_tx,
|
||||
hk_tc_sender: pus_hk_tx,
|
||||
action_tc_sender: pus_action_tx,
|
||||
mode_tc_sender: pus_mode_tx,
|
||||
test_service_receiver: pus_test_tx,
|
||||
event_service_receiver: pus_event_tx,
|
||||
sched_service_receiver: pus_sched_tx,
|
||||
hk_service_receiver: pus_hk_tx,
|
||||
action_service_receiver: pus_action_tx,
|
||||
};
|
||||
|
||||
let pus_test_service =
|
||||
create_test_service_dynamic(tm_funnel_tx.clone(), event_tx.clone(), pus_test_rx);
|
||||
let pus_test_service = create_test_service_dynamic(
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
event_handler.clone_event_sender(),
|
||||
pus_test_rx,
|
||||
);
|
||||
let pus_scheduler_service = create_scheduler_service_dynamic(
|
||||
tm_funnel_tx.clone(),
|
||||
tc_source_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
tc_source.0.clone(),
|
||||
pus_sched_rx,
|
||||
create_sched_tc_pool(),
|
||||
);
|
||||
|
||||
let pus_event_service =
|
||||
create_event_service_dynamic(tm_funnel_tx.clone(), pus_event_rx, event_request_tx);
|
||||
let pus_event_service = create_event_service_dynamic(
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
pus_event_rx,
|
||||
event_request_tx,
|
||||
);
|
||||
let pus_action_service = create_action_service_dynamic(
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
pus_action_rx,
|
||||
request_map.clone(),
|
||||
pus_action_reply_rx,
|
||||
);
|
||||
let pus_hk_service = create_hk_service_dynamic(
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter.clone(),
|
||||
pus_hk_rx,
|
||||
request_map.clone(),
|
||||
pus_hk_reply_rx,
|
||||
);
|
||||
let pus_mode_service = create_mode_service_dynamic(
|
||||
tm_funnel_tx.clone(),
|
||||
pus_mode_rx,
|
||||
request_map,
|
||||
pus_mode_reply_rx,
|
||||
);
|
||||
let mut pus_stack = PusStack::new(
|
||||
pus_test_service,
|
||||
pus_hk_service,
|
||||
pus_event_service,
|
||||
pus_action_service,
|
||||
pus_scheduler_service,
|
||||
pus_mode_service,
|
||||
pus_test_service,
|
||||
);
|
||||
|
||||
let ccsds_receiver = CcsdsReceiver { tc_source };
|
||||
|
||||
let mut tmtc_task = TcSourceTaskDynamic::new(
|
||||
tc_source_rx,
|
||||
PusTcDistributor::new(tm_funnel_tx.clone(), pus_router),
|
||||
PusReceiver::new(verif_reporter.clone(), pus_router),
|
||||
);
|
||||
|
||||
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
|
||||
let udp_tc_server = UdpTcServer::new(UDP_SERVER.id(), sock_addr, 2048, tc_source_tx.clone())
|
||||
let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone());
|
||||
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
|
||||
.expect("creating UDP TMTC server failed");
|
||||
let mut udp_tmtc_server = UdpTmtcServer {
|
||||
udp_tc_server,
|
||||
@ -394,47 +406,30 @@ fn dyn_tmtc_pool_main() {
|
||||
},
|
||||
};
|
||||
|
||||
let tcp_server_cfg = ServerConfig::new(
|
||||
TCP_SERVER.id(),
|
||||
sock_addr,
|
||||
Duration::from_millis(400),
|
||||
4096,
|
||||
8192,
|
||||
);
|
||||
let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver);
|
||||
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
|
||||
let sync_tm_tcp_source = SyncTcpTmSource::new(200);
|
||||
let mut tcp_server = TcpTask::new(
|
||||
tcp_server_cfg,
|
||||
sync_tm_tcp_source.clone(),
|
||||
tc_source_tx.clone(),
|
||||
PACKET_ID_VALIDATOR.clone(),
|
||||
tcp_ccsds_distributor,
|
||||
)
|
||||
.expect("tcp server creation failed");
|
||||
|
||||
let mut tm_funnel = TmSinkDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx);
|
||||
|
||||
let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) =
|
||||
mpsc::channel();
|
||||
let dummy_spi_interface = SpiDummyInterface::default();
|
||||
let shared_mgm_set = Arc::default();
|
||||
let mode_leaf_interface = MpscModeLeafInterface {
|
||||
request_rx: mgm_handler_mode_rx,
|
||||
reply_tx_to_pus: pus_mode_reply_tx,
|
||||
reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx,
|
||||
};
|
||||
let mut mgm_handler = MgmHandlerLis3Mdl::new(
|
||||
MGM_HANDLER_0,
|
||||
"MGM_0",
|
||||
mode_leaf_interface,
|
||||
mgm_handler_composite_rx,
|
||||
pus_hk_reply_tx,
|
||||
tm_funnel_tx,
|
||||
dummy_spi_interface,
|
||||
shared_mgm_set,
|
||||
let mut acs_task = AcsTask::new(
|
||||
TmAsVecSenderWithId::new(
|
||||
TmSenderId::AcsSubsystem as ChannelId,
|
||||
"ACS_TASK_SENDER",
|
||||
tm_funnel_tx.clone(),
|
||||
),
|
||||
acs_thread_rx,
|
||||
verif_reporter,
|
||||
);
|
||||
let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx);
|
||||
|
||||
info!("Starting TMTC and UDP task");
|
||||
let jh_udp_tmtc = thread::Builder::new()
|
||||
.name("sat-rs tmtc-udp".to_string())
|
||||
.name("TMTC and UDP".to_string())
|
||||
.spawn(move || {
|
||||
info!("Running UDP server on port {SERVER_PORT}");
|
||||
loop {
|
||||
@ -447,7 +442,7 @@ fn dyn_tmtc_pool_main() {
|
||||
|
||||
info!("Starting TCP task");
|
||||
let jh_tcp = thread::Builder::new()
|
||||
.name("sat-rs tcp".to_string())
|
||||
.name("TCP".to_string())
|
||||
.spawn(move || {
|
||||
info!("Running TCP server on port {SERVER_PORT}");
|
||||
loop {
|
||||
@ -458,7 +453,7 @@ fn dyn_tmtc_pool_main() {
|
||||
|
||||
info!("Starting TM funnel task");
|
||||
let jh_tm_funnel = thread::Builder::new()
|
||||
.name("sat-rs tm-sink".to_string())
|
||||
.name("TM Funnel".to_string())
|
||||
.spawn(move || loop {
|
||||
tm_funnel.operation();
|
||||
})
|
||||
@ -466,7 +461,7 @@ fn dyn_tmtc_pool_main() {
|
||||
|
||||
info!("Starting event handling task");
|
||||
let jh_event_handling = thread::Builder::new()
|
||||
.name("sat-rs events".to_string())
|
||||
.name("Event".to_string())
|
||||
.spawn(move || loop {
|
||||
event_handler.periodic_operation();
|
||||
thread::sleep(Duration::from_millis(FREQ_MS_EVENT_HANDLING));
|
||||
@ -475,16 +470,16 @@ fn dyn_tmtc_pool_main() {
|
||||
|
||||
info!("Starting AOCS thread");
|
||||
let jh_aocs = thread::Builder::new()
|
||||
.name("sat-rs aocs".to_string())
|
||||
.name("AOCS".to_string())
|
||||
.spawn(move || loop {
|
||||
mgm_handler.periodic_operation();
|
||||
acs_task.periodic_operation();
|
||||
thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
info!("Starting PUS handler thread");
|
||||
let jh_pus_handler = thread::Builder::new()
|
||||
.name("sat-rs pus".to_string())
|
||||
.name("PUS".to_string())
|
||||
.spawn(move || loop {
|
||||
pus_stack.periodic_operation();
|
||||
thread::sleep(Duration::from_millis(FREQ_MS_PUS_STACK));
|
||||
|
@ -1,747 +1,200 @@
|
||||
use log::warn;
|
||||
use satrs::action::{ActionRequest, ActionRequestVariant};
|
||||
use satrs::pool::SharedStaticMemoryPool;
|
||||
use satrs::pus::action::{
|
||||
ActionReplyPus, ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap,
|
||||
use log::{error, warn};
|
||||
use satrs::action::ActionRequest;
|
||||
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
|
||||
use satrs::pus::action::{PusActionToRequestConverter, PusService8ActionHandler};
|
||||
use satrs::pus::verification::std_mod::{
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
|
||||
};
|
||||
use satrs::pus::verification::{
|
||||
handle_completion_failure_with_generic_params, handle_step_failure_with_generic_params,
|
||||
FailParamHelper, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
|
||||
VerificationReportingProvider, VerificationToken,
|
||||
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
|
||||
};
|
||||
use satrs::pus::{
|
||||
ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
|
||||
EcssTcInVecConverter, EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver,
|
||||
MpscTmAsVecSender, PusPacketHandlingError, PusReplyHandler, PusServiceHelper,
|
||||
PusTcToRequestConverter,
|
||||
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
|
||||
EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult,
|
||||
PusPacketHandlingError, PusServiceHelper, TmAsVecSenderWithId, TmAsVecSenderWithMpsc,
|
||||
TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId,
|
||||
};
|
||||
use satrs::request::{GenericMessage, UniqueApidTargetId};
|
||||
use satrs::request::TargetAndApidId;
|
||||
use satrs::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket, PusServiceId};
|
||||
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
|
||||
use satrs_example::config::components::PUS_ACTION_SERVICE;
|
||||
use satrs_example::config::tmtc_err;
|
||||
use std::sync::mpsc;
|
||||
use std::time::Duration;
|
||||
use satrs::spacepackets::ecss::PusPacket;
|
||||
use satrs::tmtc::tm_helper::SharedTmPool;
|
||||
use satrs::{ChannelId, TargetId};
|
||||
use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID};
|
||||
use std::sync::mpsc::{self};
|
||||
|
||||
use crate::requests::GenericRequestRouter;
|
||||
|
||||
use super::{
|
||||
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
|
||||
PusTargetedRequestService, TargetedPusService,
|
||||
};
|
||||
|
||||
pub struct ActionReplyHandler {
|
||||
fail_data_buf: [u8; 128],
|
||||
}
|
||||
|
||||
impl Default for ActionReplyHandler {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
fail_data_buf: [0; 128],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusReplyHandler<ActivePusActionRequestStd, ActionReplyPus> for ActionReplyHandler {
|
||||
type Error = EcssTmtcError;
|
||||
|
||||
fn handle_unrequested_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<ActionReplyPus>,
|
||||
_tm_sender: &impl EcssTmSender,
|
||||
) -> Result<(), Self::Error> {
|
||||
warn!("received unexpected reply for service 8: {reply:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<ActionReplyPus>,
|
||||
active_request: &ActivePusActionRequestStd,
|
||||
tm_sender: &(impl EcssTmSender + ?Sized),
|
||||
verification_handler: &impl VerificationReportingProvider,
|
||||
timestamp: &[u8],
|
||||
) -> Result<bool, Self::Error> {
|
||||
let verif_token: VerificationToken<TcStateStarted> = active_request
|
||||
.token()
|
||||
.try_into()
|
||||
.expect("invalid token state");
|
||||
let remove_entry = match &reply.message.variant {
|
||||
ActionReplyVariant::CompletionFailed { error_code, params } => {
|
||||
let error_propagated = handle_completion_failure_with_generic_params(
|
||||
tm_sender,
|
||||
verif_token,
|
||||
verification_handler,
|
||||
FailParamHelper {
|
||||
error_code,
|
||||
params: params.as_ref(),
|
||||
timestamp,
|
||||
small_data_buf: &mut self.fail_data_buf,
|
||||
},
|
||||
)?;
|
||||
if !error_propagated {
|
||||
log::warn!(
|
||||
"error params for completion failure were not propated: {:?}",
|
||||
params.as_ref()
|
||||
);
|
||||
}
|
||||
true
|
||||
}
|
||||
ActionReplyVariant::StepFailed {
|
||||
error_code,
|
||||
step,
|
||||
params,
|
||||
} => {
|
||||
let error_propagated = handle_step_failure_with_generic_params(
|
||||
tm_sender,
|
||||
verif_token,
|
||||
verification_handler,
|
||||
FailParamHelper {
|
||||
error_code,
|
||||
params: params.as_ref(),
|
||||
timestamp,
|
||||
small_data_buf: &mut self.fail_data_buf,
|
||||
},
|
||||
&EcssEnumU16::new(*step),
|
||||
)?;
|
||||
if !error_propagated {
|
||||
log::warn!(
|
||||
"error params for completion failure were not propated: {:?}",
|
||||
params.as_ref()
|
||||
);
|
||||
}
|
||||
true
|
||||
}
|
||||
ActionReplyVariant::Completed => {
|
||||
verification_handler.completion_success(tm_sender, verif_token, timestamp)?;
|
||||
true
|
||||
}
|
||||
ActionReplyVariant::StepSuccess { step } => {
|
||||
verification_handler.step_success(
|
||||
tm_sender,
|
||||
&verif_token,
|
||||
timestamp,
|
||||
EcssEnumU16::new(*step),
|
||||
)?;
|
||||
false
|
||||
}
|
||||
_ => false,
|
||||
};
|
||||
Ok(remove_entry)
|
||||
}
|
||||
|
||||
fn handle_request_timeout(
|
||||
&mut self,
|
||||
active_request: &ActivePusActionRequestStd,
|
||||
tm_sender: &impl EcssTmSender,
|
||||
verification_handler: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
generic_pus_request_timeout_handler(
|
||||
tm_sender,
|
||||
active_request,
|
||||
verification_handler,
|
||||
time_stamp,
|
||||
"action",
|
||||
)
|
||||
}
|
||||
}
|
||||
use super::GenericRoutingErrorHandler;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ActionRequestConverter {}
|
||||
pub struct ExampleActionRequestConverter {}
|
||||
|
||||
impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for ActionRequestConverter {
|
||||
type Error = GenericConversionError;
|
||||
impl PusActionToRequestConverter for ExampleActionRequestConverter {
|
||||
type Error = PusPacketHandlingError;
|
||||
|
||||
fn convert(
|
||||
&mut self,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
tc: &PusTcReader,
|
||||
tm_sender: &(impl EcssTmSender + ?Sized),
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> {
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
) -> Result<(TargetId, ActionRequest), Self::Error> {
|
||||
let subservice = tc.subservice();
|
||||
let user_data = tc.user_data();
|
||||
if user_data.len() < 8 {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
|
||||
)
|
||||
.expect("Sending start failure failed");
|
||||
return Err(GenericConversionError::NotEnoughAppData {
|
||||
return Err(PusPacketHandlingError::NotEnoughAppData {
|
||||
expected: 8,
|
||||
found: user_data.len(),
|
||||
});
|
||||
}
|
||||
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
|
||||
let target_id = TargetAndApidId::from_pus_tc(tc).unwrap();
|
||||
let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap());
|
||||
if subservice == 128 {
|
||||
let req_variant = if user_data.len() == 8 {
|
||||
ActionRequestVariant::NoData
|
||||
} else {
|
||||
ActionRequestVariant::VecData(user_data[8..].to_vec())
|
||||
};
|
||||
Ok((
|
||||
ActivePusActionRequestStd::new(
|
||||
target_id.raw(),
|
||||
ActionRequest::UnsignedIdAndVecData {
|
||||
action_id,
|
||||
target_id_and_apid.into(),
|
||||
token.into(),
|
||||
Duration::from_secs(30),
|
||||
),
|
||||
ActionRequest::new(action_id, req_variant),
|
||||
data: user_data[8..].to_vec(),
|
||||
},
|
||||
))
|
||||
} else {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
|
||||
)
|
||||
.expect("Sending start failure failed");
|
||||
Err(GenericConversionError::InvalidSubservice(subservice))
|
||||
Err(PusPacketHandlingError::InvalidSubservice(subservice))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_action_service_static(
|
||||
tm_sender: PacketSenderWithSharedPool,
|
||||
shared_tm_store: SharedTmPool,
|
||||
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
|
||||
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
tc_pool: SharedStaticMemoryPool,
|
||||
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
action_router: GenericRequestRouter,
|
||||
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
|
||||
) -> ActionServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
|
||||
let action_request_handler = PusTargetedRequestService::new(
|
||||
) -> Pus8Wrapper<
|
||||
MpscTcReceiver,
|
||||
TmInSharedPoolSenderWithBoundedMpsc,
|
||||
EcssTcInSharedStoreConverter,
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
> {
|
||||
let action_srv_tm_sender = TmInSharedPoolSenderWithId::new(
|
||||
TmSenderId::PusAction as ChannelId,
|
||||
"PUS_8_TM_SENDER",
|
||||
shared_tm_store.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
);
|
||||
let action_srv_receiver = MpscTcReceiver::new(
|
||||
TcReceiverId::PusAction as ChannelId,
|
||||
"PUS_8_TC_RECV",
|
||||
pus_action_rx,
|
||||
);
|
||||
let pus_8_handler = PusService8ActionHandler::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_ACTION_SERVICE.id(),
|
||||
pus_action_rx,
|
||||
tm_sender,
|
||||
create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
|
||||
action_srv_receiver,
|
||||
action_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
|
||||
),
|
||||
ActionRequestConverter::default(),
|
||||
// TODO: Implementation which does not use run-time allocation? Maybe something like
|
||||
// a bounded wrapper which pre-allocates using [HashMap::with_capacity]..
|
||||
DefaultActiveActionRequestMap::default(),
|
||||
ActionReplyHandler::default(),
|
||||
ExampleActionRequestConverter::default(),
|
||||
action_router,
|
||||
reply_receiver,
|
||||
GenericRoutingErrorHandler::<8>::default(),
|
||||
);
|
||||
ActionServiceWrapper {
|
||||
service: action_request_handler,
|
||||
}
|
||||
Pus8Wrapper { pus_8_handler }
|
||||
}
|
||||
|
||||
pub fn create_action_service_dynamic(
|
||||
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
|
||||
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
|
||||
verif_reporter: VerificationReporterWithVecMpscSender,
|
||||
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
action_router: GenericRequestRouter,
|
||||
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
|
||||
) -> ActionServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
|
||||
let action_request_handler = PusTargetedRequestService::new(
|
||||
) -> Pus8Wrapper<
|
||||
MpscTcReceiver,
|
||||
TmAsVecSenderWithMpsc,
|
||||
EcssTcInVecConverter,
|
||||
VerificationReporterWithVecMpscSender,
|
||||
> {
|
||||
let action_srv_tm_sender = TmAsVecSenderWithId::new(
|
||||
TmSenderId::PusAction as ChannelId,
|
||||
"PUS_8_TM_SENDER",
|
||||
tm_funnel_tx.clone(),
|
||||
);
|
||||
let action_srv_receiver = MpscTcReceiver::new(
|
||||
TcReceiverId::PusAction as ChannelId,
|
||||
"PUS_8_TC_RECV",
|
||||
pus_action_rx,
|
||||
);
|
||||
let pus_8_handler = PusService8ActionHandler::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_ACTION_SERVICE.id(),
|
||||
pus_action_rx,
|
||||
tm_funnel_tx,
|
||||
create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
|
||||
action_srv_receiver,
|
||||
action_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInVecConverter::default(),
|
||||
),
|
||||
ActionRequestConverter::default(),
|
||||
DefaultActiveActionRequestMap::default(),
|
||||
ActionReplyHandler::default(),
|
||||
ExampleActionRequestConverter::default(),
|
||||
action_router,
|
||||
reply_receiver,
|
||||
GenericRoutingErrorHandler::<8>::default(),
|
||||
);
|
||||
ActionServiceWrapper {
|
||||
service: action_request_handler,
|
||||
}
|
||||
Pus8Wrapper { pus_8_handler }
|
||||
}
|
||||
|
||||
pub struct ActionServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
|
||||
pub(crate) service: PusTargetedRequestService<
|
||||
MpscTcReceiver,
|
||||
pub struct Pus8Wrapper<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> {
|
||||
pub(crate) pus_8_handler: PusService8ActionHandler<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
ActionRequestConverter,
|
||||
ActionReplyHandler,
|
||||
DefaultActiveActionRequestMap,
|
||||
ActivePusActionRequestStd,
|
||||
ActionRequest,
|
||||
ActionReplyPus,
|
||||
ExampleActionRequestConverter,
|
||||
GenericRequestRouter,
|
||||
GenericRoutingErrorHandler<8>,
|
||||
>,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
|
||||
for ActionServiceWrapper<TmSender, TcInMemConverter>
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> Pus8Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
|
||||
{
|
||||
const SERVICE_ID: u8 = PusServiceId::Action as u8;
|
||||
const SERVICE_STR: &'static str = "action";
|
||||
|
||||
delegate::delegate! {
|
||||
to self.service {
|
||||
fn poll_and_handle_next_tc(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, PusPacketHandlingError>;
|
||||
|
||||
fn poll_and_handle_next_reply(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, EcssTmtcError>;
|
||||
|
||||
fn check_for_request_timeouts(&mut self);
|
||||
pub fn handle_next_packet(&mut self) -> bool {
|
||||
match self.pus_8_handler.handle_one_tc() {
|
||||
Ok(result) => match result {
|
||||
PusPacketHandlerResult::RequestHandled => {}
|
||||
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
|
||||
warn!("PUS 8 partial packet handling success: {e:?}")
|
||||
}
|
||||
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
|
||||
warn!("PUS 8 invalid subservice {invalid}");
|
||||
}
|
||||
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
|
||||
warn!("PUS 8 subservice {subservice} not implemented");
|
||||
}
|
||||
PusPacketHandlerResult::Empty => {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
Err(error) => {
|
||||
error!("PUS packet handling error: {error:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use satrs::pus::test_util::{
|
||||
TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
|
||||
};
|
||||
use satrs::pus::verification;
|
||||
use satrs::pus::verification::test_util::TestVerificationReporter;
|
||||
use satrs::request::MessageMetadata;
|
||||
use satrs::ComponentId;
|
||||
use satrs::{
|
||||
res_code::ResultU16,
|
||||
spacepackets::{
|
||||
ecss::{
|
||||
tc::{PusTcCreator, PusTcSecondaryHeader},
|
||||
tm::PusTmReader,
|
||||
WritablePusPacket,
|
||||
},
|
||||
SpHeader,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
pus::tests::{PusConverterTestbench, ReplyHandlerTestbench, TargetedPusRequestTestbench},
|
||||
requests::CompositeRequest,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
impl
|
||||
TargetedPusRequestTestbench<
|
||||
ActionRequestConverter,
|
||||
ActionReplyHandler,
|
||||
DefaultActiveActionRequestMap,
|
||||
ActivePusActionRequestStd,
|
||||
ActionRequest,
|
||||
ActionReplyPus,
|
||||
>
|
||||
{
|
||||
pub fn new_for_action(owner_id: ComponentId, target_id: ComponentId) -> Self {
|
||||
let _ = env_logger::builder().is_test(true).try_init();
|
||||
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
|
||||
let (pus_action_tx, pus_action_rx) = mpsc::channel();
|
||||
let (action_reply_tx, action_reply_rx) = mpsc::channel();
|
||||
let (action_req_tx, action_req_rx) = mpsc::channel();
|
||||
let verif_reporter = TestVerificationReporter::new(owner_id);
|
||||
let mut generic_req_router = GenericRequestRouter::default();
|
||||
generic_req_router
|
||||
.composite_router_map
|
||||
.insert(target_id, action_req_tx);
|
||||
Self {
|
||||
service: PusTargetedRequestService::new(
|
||||
PusServiceHelper::new(
|
||||
owner_id,
|
||||
pus_action_rx,
|
||||
tm_funnel_tx.clone(),
|
||||
verif_reporter,
|
||||
EcssTcInVecConverter::default(),
|
||||
),
|
||||
ActionRequestConverter::default(),
|
||||
DefaultActiveActionRequestMap::default(),
|
||||
ActionReplyHandler::default(),
|
||||
generic_req_router,
|
||||
action_reply_rx,
|
||||
),
|
||||
request_id: None,
|
||||
pus_packet_tx: pus_action_tx,
|
||||
tm_funnel_rx,
|
||||
reply_tx: action_reply_tx,
|
||||
request_rx: action_req_rx,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_packet_started(&self) {
|
||||
self.service
|
||||
.service_helper
|
||||
.common
|
||||
.verif_reporter
|
||||
.check_next_is_started_success(
|
||||
self.service.service_helper.id(),
|
||||
self.request_id.expect("request ID not set").into(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn verify_packet_completed(&self) {
|
||||
self.service
|
||||
.service_helper
|
||||
.common
|
||||
.verif_reporter
|
||||
.check_next_is_completion_success(
|
||||
self.service.service_helper.id(),
|
||||
self.request_id.expect("request ID not set").into(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn verify_tm_empty(&self) {
|
||||
let packet = self.tm_funnel_rx.try_recv();
|
||||
if let Err(mpsc::TryRecvError::Empty) = packet {
|
||||
} else {
|
||||
let tm = packet.unwrap();
|
||||
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0;
|
||||
panic!("unexpected TM packet {unexpected_tm:?}");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_next_tc_is_handled_properly(&mut self, time_stamp: &[u8]) {
|
||||
let result = self.service.poll_and_handle_next_tc(time_stamp);
|
||||
if let Err(e) = result {
|
||||
panic!("unexpected error {:?}", e);
|
||||
}
|
||||
let result = result.unwrap();
|
||||
match result {
|
||||
HandlingStatus::HandledOne => (),
|
||||
_ => panic!("unexpected result {result:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_all_tcs_handled(&mut self, time_stamp: &[u8]) {
|
||||
let result = self.service.poll_and_handle_next_tc(time_stamp);
|
||||
if let Err(e) = result {
|
||||
panic!("unexpected error {:?}", e);
|
||||
}
|
||||
let result = result.unwrap();
|
||||
match result {
|
||||
HandlingStatus::Empty => (),
|
||||
_ => panic!("unexpected result {result:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) {
|
||||
let result = self.service.poll_and_handle_next_reply(time_stamp);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap(), HandlingStatus::HandledOne);
|
||||
}
|
||||
|
||||
pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) {
|
||||
let result = self.service.poll_and_handle_next_reply(time_stamp);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap(), HandlingStatus::Empty);
|
||||
}
|
||||
|
||||
pub fn add_tc(&mut self, tc: &PusTcCreator) {
|
||||
self.request_id = Some(verification::RequestId::new(tc).into());
|
||||
let token = self.service.service_helper.verif_reporter_mut().add_tc(tc);
|
||||
let accepted_token = self
|
||||
.service
|
||||
.service_helper
|
||||
.verif_reporter()
|
||||
.acceptance_success(self.service.service_helper.tm_sender(), token, &[0; 7])
|
||||
.expect("TC acceptance failed");
|
||||
self.service
|
||||
.service_helper
|
||||
.verif_reporter()
|
||||
.check_next_was_added(accepted_token.request_id());
|
||||
let id = self.service.service_helper.id();
|
||||
self.service
|
||||
.service_helper
|
||||
.verif_reporter()
|
||||
.check_next_is_acceptance_success(id, accepted_token.request_id());
|
||||
self.pus_packet_tx
|
||||
.send(EcssTcAndToken::new(
|
||||
PacketAsVec::new(self.service.service_helper.id(), tc.to_vec().unwrap()),
|
||||
accepted_token,
|
||||
))
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_request() {
|
||||
let mut testbench = TargetedPusRequestTestbench::new_for_action(
|
||||
TEST_COMPONENT_ID_0.id(),
|
||||
TEST_COMPONENT_ID_1.id(),
|
||||
);
|
||||
// Create a basic action request and verify forwarding.
|
||||
let sp_header = SpHeader::new_from_apid(TEST_APID);
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
|
||||
let action_id = 5_u32;
|
||||
let mut app_data: [u8; 8] = [0; 8];
|
||||
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes());
|
||||
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
|
||||
let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true);
|
||||
testbench.add_tc(&pus8_packet);
|
||||
let time_stamp: [u8; 7] = [0; 7];
|
||||
testbench.verify_next_tc_is_handled_properly(&time_stamp);
|
||||
testbench.verify_all_tcs_handled(&time_stamp);
|
||||
|
||||
testbench.verify_packet_started();
|
||||
|
||||
let possible_req = testbench.request_rx.try_recv();
|
||||
assert!(possible_req.is_ok());
|
||||
let req = possible_req.unwrap();
|
||||
if let CompositeRequest::Action(action_req) = req.message {
|
||||
assert_eq!(action_req.action_id, action_id);
|
||||
assert_eq!(action_req.variant, ActionRequestVariant::NoData);
|
||||
let action_reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
|
||||
testbench
|
||||
.reply_tx
|
||||
.send(GenericMessage::new(req.requestor_info, action_reply))
|
||||
.unwrap();
|
||||
} else {
|
||||
panic!("unexpected request type");
|
||||
}
|
||||
testbench.verify_next_reply_is_handled_properly(&time_stamp);
|
||||
testbench.verify_all_replies_handled(&time_stamp);
|
||||
|
||||
testbench.verify_packet_completed();
|
||||
testbench.verify_tm_empty();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_request_routing_error() {
|
||||
let mut testbench = TargetedPusRequestTestbench::new_for_action(
|
||||
TEST_COMPONENT_ID_0.id(),
|
||||
TEST_COMPONENT_ID_1.id(),
|
||||
);
|
||||
// Create a basic action request and verify forwarding.
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
|
||||
let action_id = 5_u32;
|
||||
let mut app_data: [u8; 8] = [0; 8];
|
||||
// Invalid ID, routing should fail.
|
||||
app_data[0..4].copy_from_slice(&0_u32.to_be_bytes());
|
||||
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
|
||||
let pus8_packet = PusTcCreator::new(
|
||||
SpHeader::new_from_apid(TEST_APID),
|
||||
sec_header,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
testbench.add_tc(&pus8_packet);
|
||||
let time_stamp: [u8; 7] = [0; 7];
|
||||
|
||||
let result = testbench.service.poll_and_handle_next_tc(&time_stamp);
|
||||
assert!(result.is_err());
|
||||
// Verify the correct result and completion failure.
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn converter_action_req_no_data() {
|
||||
let mut testbench = PusConverterTestbench::new(
|
||||
TEST_COMPONENT_ID_0.raw(),
|
||||
ActionRequestConverter::default(),
|
||||
);
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
|
||||
let action_id = 5_u32;
|
||||
let mut app_data: [u8; 8] = [0; 8];
|
||||
// Invalid ID, routing should fail.
|
||||
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
|
||||
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
|
||||
let pus8_packet = PusTcCreator::new(
|
||||
SpHeader::new_from_apid(TEST_APID),
|
||||
sec_header,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
let token = testbench.add_tc(&pus8_packet);
|
||||
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
|
||||
assert!(result.is_ok());
|
||||
let (active_req, request) = result.unwrap();
|
||||
if let ActionRequestVariant::NoData = request.variant {
|
||||
assert_eq!(request.action_id, action_id);
|
||||
assert_eq!(active_req.action_id, action_id);
|
||||
assert_eq!(
|
||||
active_req.target_id(),
|
||||
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0).raw()
|
||||
);
|
||||
assert_eq!(
|
||||
active_req.token().request_id(),
|
||||
testbench.request_id().unwrap()
|
||||
);
|
||||
} else {
|
||||
panic!("unexpected action request variant");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn converter_action_req_with_data() {
|
||||
let mut testbench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ActionRequestConverter::default());
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
|
||||
let action_id = 5_u32;
|
||||
let mut app_data: [u8; 16] = [0; 16];
|
||||
// Invalid ID, routing should fail.
|
||||
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
|
||||
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
|
||||
for i in 0..8 {
|
||||
app_data[i + 8] = i as u8;
|
||||
}
|
||||
let pus8_packet = PusTcCreator::new(
|
||||
SpHeader::new_from_apid(TEST_APID),
|
||||
sec_header,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
let token = testbench.add_tc(&pus8_packet);
|
||||
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
|
||||
assert!(result.is_ok());
|
||||
let (active_req, request) = result.unwrap();
|
||||
if let ActionRequestVariant::VecData(vec) = request.variant {
|
||||
assert_eq!(request.action_id, action_id);
|
||||
assert_eq!(active_req.action_id, action_id);
|
||||
assert_eq!(vec, app_data[8..].to_vec());
|
||||
} else {
|
||||
panic!("unexpected action request variant");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_completion_success() {
|
||||
let mut testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
|
||||
let action_id = 5_u32;
|
||||
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
|
||||
let active_action_req =
|
||||
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
|
||||
let reply = ActionReplyPus::new(action_id, ActionReplyVariant::Completed);
|
||||
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
|
||||
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
|
||||
assert!(result.is_ok());
|
||||
assert!(result.unwrap());
|
||||
testbench.verif_reporter.assert_full_completion_success(
|
||||
TEST_COMPONENT_ID_0.id(),
|
||||
req_id,
|
||||
None,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_completion_failure() {
|
||||
let mut testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
|
||||
let action_id = 5_u32;
|
||||
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
|
||||
let active_action_req =
|
||||
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
|
||||
let error_code = ResultU16::new(2, 3);
|
||||
let reply = ActionReplyPus::new(
|
||||
action_id,
|
||||
ActionReplyVariant::CompletionFailed {
|
||||
error_code,
|
||||
params: None,
|
||||
},
|
||||
);
|
||||
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
|
||||
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
|
||||
assert!(result.is_ok());
|
||||
assert!(result.unwrap());
|
||||
testbench.verif_reporter.assert_completion_failure(
|
||||
TEST_COMPONENT_ID_0.into(),
|
||||
req_id,
|
||||
None,
|
||||
error_code.raw() as u64,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_step_success() {
|
||||
let mut testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
|
||||
let action_id = 5_u32;
|
||||
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
|
||||
let active_action_req =
|
||||
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
|
||||
let reply = ActionReplyPus::new(action_id, ActionReplyVariant::StepSuccess { step: 1 });
|
||||
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
|
||||
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
|
||||
assert!(result.is_ok());
|
||||
// Entry should not be removed, completion not done yet.
|
||||
assert!(!result.unwrap());
|
||||
testbench.verif_reporter.check_next_was_added(req_id);
|
||||
testbench
|
||||
.verif_reporter
|
||||
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.raw(), req_id);
|
||||
testbench
|
||||
.verif_reporter
|
||||
.check_next_is_started_success(TEST_COMPONENT_ID_0.raw(), req_id);
|
||||
testbench
|
||||
.verif_reporter
|
||||
.check_next_is_step_success(TEST_COMPONENT_ID_0.raw(), req_id, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_step_failure() {
|
||||
let mut testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
|
||||
let action_id = 5_u32;
|
||||
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
|
||||
let active_action_req =
|
||||
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
|
||||
let error_code = ResultU16::new(2, 3);
|
||||
let reply = ActionReplyPus::new(
|
||||
action_id,
|
||||
ActionReplyVariant::StepFailed {
|
||||
error_code,
|
||||
step: 1,
|
||||
params: None,
|
||||
},
|
||||
);
|
||||
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
|
||||
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
|
||||
assert!(result.is_ok());
|
||||
assert!(result.unwrap());
|
||||
testbench.verif_reporter.check_next_was_added(req_id);
|
||||
testbench
|
||||
.verif_reporter
|
||||
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.id(), req_id);
|
||||
testbench
|
||||
.verif_reporter
|
||||
.check_next_is_started_success(TEST_COMPONENT_ID_0.id(), req_id);
|
||||
testbench.verif_reporter.check_next_is_step_failure(
|
||||
TEST_COMPONENT_ID_0.id(),
|
||||
req_id,
|
||||
error_code.raw().into(),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_unrequested_reply() {
|
||||
let mut testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
|
||||
let action_reply = ActionReplyPus::new(5_u32, ActionReplyVariant::Completed);
|
||||
let unrequested_reply =
|
||||
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
|
||||
// Right now this function does not do a lot. We simply check that it does not panic or do
|
||||
// weird stuff.
|
||||
let result = testbench.handle_unrequested_reply(&unrequested_reply);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_reply_timeout() {
|
||||
let mut testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
|
||||
let action_id = 5_u32;
|
||||
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
|
||||
let result = testbench.handle_request_timeout(
|
||||
&ActivePusActionRequestStd::new_from_common_req(action_id, active_request),
|
||||
&[],
|
||||
);
|
||||
assert!(result.is_ok());
|
||||
testbench.verif_reporter.assert_completion_failure(
|
||||
TEST_COMPONENT_ID_0.raw(),
|
||||
req_id,
|
||||
None,
|
||||
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
|
@ -1,115 +1,132 @@
|
||||
use std::sync::mpsc;
|
||||
|
||||
use crate::pus::create_verification_reporter;
|
||||
use satrs::pool::SharedStaticMemoryPool;
|
||||
use log::{error, warn};
|
||||
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
|
||||
use satrs::pus::event_man::EventRequestWithToken;
|
||||
use satrs::pus::event_srv::PusEventServiceHandler;
|
||||
use satrs::pus::verification::VerificationReporter;
|
||||
use satrs::pus::{
|
||||
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter,
|
||||
EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
|
||||
MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
|
||||
use satrs::pus::event_srv::PusService5EventHandler;
|
||||
use satrs::pus::verification::std_mod::{
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
|
||||
};
|
||||
use satrs::spacepackets::ecss::PusServiceId;
|
||||
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
|
||||
use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
|
||||
|
||||
use super::{DirectPusService, HandlingStatus};
|
||||
use satrs::pus::verification::VerificationReportingProvider;
|
||||
use satrs::pus::{
|
||||
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
|
||||
EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper,
|
||||
TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
|
||||
TmInSharedPoolSenderWithId,
|
||||
};
|
||||
use satrs::tmtc::tm_helper::SharedTmPool;
|
||||
use satrs::ChannelId;
|
||||
use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID};
|
||||
|
||||
pub fn create_event_service_static(
|
||||
tm_sender: PacketSenderWithSharedPool,
|
||||
shared_tm_store: SharedTmPool,
|
||||
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
|
||||
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
tc_pool: SharedStaticMemoryPool,
|
||||
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
event_request_tx: mpsc::Sender<EventRequestWithToken>,
|
||||
) -> EventServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
|
||||
let pus_5_handler = PusEventServiceHandler::new(
|
||||
) -> Pus5Wrapper<
|
||||
MpscTcReceiver,
|
||||
TmInSharedPoolSenderWithBoundedMpsc,
|
||||
EcssTcInSharedStoreConverter,
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
> {
|
||||
let event_srv_tm_sender = TmInSharedPoolSenderWithId::new(
|
||||
TmSenderId::PusEvent as ChannelId,
|
||||
"PUS_5_TM_SENDER",
|
||||
shared_tm_store.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
);
|
||||
let event_srv_receiver = MpscTcReceiver::new(
|
||||
TcReceiverId::PusEvent as ChannelId,
|
||||
"PUS_5_TC_RECV",
|
||||
pus_event_rx,
|
||||
);
|
||||
let pus_5_handler = PusService5EventHandler::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_EVENT_MANAGEMENT.id(),
|
||||
pus_event_rx,
|
||||
tm_sender,
|
||||
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
|
||||
event_srv_receiver,
|
||||
event_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
|
||||
),
|
||||
event_request_tx,
|
||||
);
|
||||
EventServiceWrapper {
|
||||
handler: pus_5_handler,
|
||||
}
|
||||
Pus5Wrapper { pus_5_handler }
|
||||
}
|
||||
|
||||
pub fn create_event_service_dynamic(
|
||||
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
|
||||
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
|
||||
verif_reporter: VerificationReporterWithVecMpscSender,
|
||||
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
event_request_tx: mpsc::Sender<EventRequestWithToken>,
|
||||
) -> EventServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
|
||||
let pus_5_handler = PusEventServiceHandler::new(
|
||||
) -> Pus5Wrapper<
|
||||
MpscTcReceiver,
|
||||
TmAsVecSenderWithMpsc,
|
||||
EcssTcInVecConverter,
|
||||
VerificationReporterWithVecMpscSender,
|
||||
> {
|
||||
let event_srv_tm_sender = TmAsVecSenderWithId::new(
|
||||
TmSenderId::PusEvent as ChannelId,
|
||||
"PUS_5_TM_SENDER",
|
||||
tm_funnel_tx,
|
||||
);
|
||||
let event_srv_receiver = MpscTcReceiver::new(
|
||||
TcReceiverId::PusEvent as ChannelId,
|
||||
"PUS_5_TC_RECV",
|
||||
pus_event_rx,
|
||||
);
|
||||
let pus_5_handler = PusService5EventHandler::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_EVENT_MANAGEMENT.id(),
|
||||
pus_event_rx,
|
||||
tm_funnel_tx,
|
||||
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
|
||||
event_srv_receiver,
|
||||
event_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInVecConverter::default(),
|
||||
),
|
||||
event_request_tx,
|
||||
);
|
||||
EventServiceWrapper {
|
||||
handler: pus_5_handler,
|
||||
}
|
||||
Pus5Wrapper { pus_5_handler }
|
||||
}
|
||||
|
||||
pub struct EventServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
|
||||
pub handler:
|
||||
PusEventServiceHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
pub struct Pus5Wrapper<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> {
|
||||
pub pus_5_handler:
|
||||
PusService5EventHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
|
||||
for EventServiceWrapper<TmSender, TcInMemConverter>
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> Pus5Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
|
||||
{
|
||||
const SERVICE_ID: u8 = PusServiceId::Event as u8;
|
||||
|
||||
const SERVICE_STR: &'static str = "events";
|
||||
|
||||
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
|
||||
let error_handler = |partial_error: &PartialPusHandlingError| {
|
||||
log::warn!(
|
||||
"PUS {}({}) partial error: {:?}",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
partial_error
|
||||
);
|
||||
};
|
||||
let result = self
|
||||
.handler
|
||||
.poll_and_handle_next_tc(error_handler, time_stamp);
|
||||
if let Err(e) = result {
|
||||
log::warn!(
|
||||
"PUS {}({}) error: {:?}",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
e
|
||||
);
|
||||
// To avoid permanent loops on continuous errors.
|
||||
return HandlingStatus::Empty;
|
||||
}
|
||||
match result.unwrap() {
|
||||
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
|
||||
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
|
||||
log::warn!(
|
||||
"PUS {}({}) subservice {} not implemented",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
subservice
|
||||
);
|
||||
}
|
||||
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
|
||||
log::warn!(
|
||||
"PUS {}({}) subservice {} not implemented",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
subservice
|
||||
);
|
||||
pub fn handle_next_packet(&mut self) -> bool {
|
||||
match self.pus_5_handler.handle_one_tc() {
|
||||
Ok(result) => match result {
|
||||
PusPacketHandlerResult::RequestHandled => {}
|
||||
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
|
||||
warn!("PUS 5 partial packet handling success: {e:?}")
|
||||
}
|
||||
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
|
||||
warn!("PUS 5 invalid subservice {invalid}");
|
||||
}
|
||||
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
|
||||
warn!("PUS 5 subservice {subservice} not implemented");
|
||||
}
|
||||
PusPacketHandlerResult::Empty => {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
Err(error) => {
|
||||
error!("PUS packet handling error: {error:?}")
|
||||
}
|
||||
}
|
||||
HandlingStatus::HandledOne
|
||||
false
|
||||
}
|
||||
}
|
||||
|
@ -1,126 +1,50 @@
|
||||
use derive_new::new;
|
||||
use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId};
|
||||
use satrs::pool::SharedStaticMemoryPool;
|
||||
use log::{error, warn};
|
||||
use satrs::hk::{CollectionIntervalFactor, HkRequest};
|
||||
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
|
||||
use satrs::pus::hk::{PusHkToRequestConverter, PusService3HkHandler};
|
||||
use satrs::pus::verification::std_mod::{
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
|
||||
};
|
||||
use satrs::pus::verification::{
|
||||
FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
|
||||
VerificationReportingProvider, VerificationToken,
|
||||
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
|
||||
};
|
||||
use satrs::pus::{
|
||||
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
|
||||
EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender,
|
||||
EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender,
|
||||
PusPacketHandlingError, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
|
||||
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
|
||||
EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult,
|
||||
PusPacketHandlingError, PusServiceHelper, TmAsVecSenderWithId, TmAsVecSenderWithMpsc,
|
||||
TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId,
|
||||
};
|
||||
use satrs::request::{GenericMessage, UniqueApidTargetId};
|
||||
use satrs::request::TargetAndApidId;
|
||||
use satrs::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs::spacepackets::ecss::{hk, PusPacket, PusServiceId};
|
||||
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
|
||||
use satrs_example::config::components::PUS_HK_SERVICE;
|
||||
use satrs_example::config::{hk_err, tmtc_err};
|
||||
use std::sync::mpsc;
|
||||
use std::time::Duration;
|
||||
use satrs::spacepackets::ecss::{hk, PusPacket};
|
||||
use satrs::tmtc::tm_helper::SharedTmPool;
|
||||
use satrs::{ChannelId, TargetId};
|
||||
use satrs_example::config::{hk_err, tmtc_err, TcReceiverId, TmSenderId, PUS_APID};
|
||||
use std::sync::mpsc::{self};
|
||||
|
||||
use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler};
|
||||
use crate::requests::GenericRequestRouter;
|
||||
|
||||
use super::{HandlingStatus, PusTargetedRequestService, TargetedPusService};
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, new)]
|
||||
pub struct HkReply {
|
||||
pub unique_id: UniqueId,
|
||||
pub variant: HkReplyVariant,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub enum HkReplyVariant {
|
||||
Ack,
|
||||
}
|
||||
use super::GenericRoutingErrorHandler;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct HkReplyHandler {}
|
||||
pub struct ExampleHkRequestConverter {}
|
||||
|
||||
impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
|
||||
type Error = EcssTmtcError;
|
||||
|
||||
fn handle_unrequested_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<HkReply>,
|
||||
_tm_sender: &impl EcssTmSender,
|
||||
) -> Result<(), Self::Error> {
|
||||
log::warn!("received unexpected reply for service 3: {reply:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<HkReply>,
|
||||
active_request: &ActivePusRequestStd,
|
||||
tm_sender: &impl EcssTmSender,
|
||||
verification_handler: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<bool, Self::Error> {
|
||||
let started_token: VerificationToken<TcStateStarted> = active_request
|
||||
.token()
|
||||
.try_into()
|
||||
.expect("invalid token state");
|
||||
match reply.message.variant {
|
||||
HkReplyVariant::Ack => {
|
||||
verification_handler
|
||||
.completion_success(tm_sender, started_token, time_stamp)
|
||||
.expect("sending completion success verification failed");
|
||||
}
|
||||
};
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn handle_request_timeout(
|
||||
&mut self,
|
||||
active_request: &ActivePusRequestStd,
|
||||
tm_sender: &impl EcssTmSender,
|
||||
verification_handler: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
generic_pus_request_timeout_handler(
|
||||
tm_sender,
|
||||
active_request,
|
||||
verification_handler,
|
||||
time_stamp,
|
||||
"HK",
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HkRequestConverter {
|
||||
timeout: Duration,
|
||||
}
|
||||
|
||||
impl Default for HkRequestConverter {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
timeout: Duration::from_secs(60),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConverter {
|
||||
type Error = GenericConversionError;
|
||||
impl PusHkToRequestConverter for ExampleHkRequestConverter {
|
||||
type Error = PusPacketHandlingError;
|
||||
|
||||
fn convert(
|
||||
&mut self,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
tc: &PusTcReader,
|
||||
tm_sender: &(impl EcssTmSender + ?Sized),
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> {
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
) -> Result<(TargetId, HkRequest), Self::Error> {
|
||||
let user_data = tc.user_data();
|
||||
if user_data.is_empty() {
|
||||
let user_data_len = user_data.len() as u32;
|
||||
let user_data_len_raw = user_data_len.to_be_bytes();
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new(
|
||||
time_stamp,
|
||||
@ -129,7 +53,7 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
|
||||
),
|
||||
)
|
||||
.expect("Sending start failure TM failed");
|
||||
return Err(GenericConversionError::NotEnoughAppData {
|
||||
return Err(PusPacketHandlingError::NotEnoughAppData {
|
||||
expected: 4,
|
||||
found: 0,
|
||||
});
|
||||
@ -143,402 +67,197 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
|
||||
let user_data_len = user_data.len() as u32;
|
||||
let user_data_len_raw = user_data_len.to_be_bytes();
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new(time_stamp, err, &user_data_len_raw),
|
||||
)
|
||||
.start_failure(token, FailParams::new(time_stamp, err, &user_data_len_raw))
|
||||
.expect("Sending start failure TM failed");
|
||||
return Err(GenericConversionError::NotEnoughAppData {
|
||||
return Err(PusPacketHandlingError::NotEnoughAppData {
|
||||
expected: 8,
|
||||
found: 4,
|
||||
});
|
||||
}
|
||||
let subservice = tc.subservice();
|
||||
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).expect("invalid tc format");
|
||||
let target_id = TargetAndApidId::from_pus_tc(tc).expect("invalid tc format");
|
||||
let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap());
|
||||
|
||||
let standard_subservice = hk::Subservice::try_from(subservice);
|
||||
if standard_subservice.is_err() {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]),
|
||||
)
|
||||
.expect("Sending start failure TM failed");
|
||||
return Err(GenericConversionError::InvalidSubservice(subservice));
|
||||
return Err(PusPacketHandlingError::InvalidSubservice(subservice));
|
||||
}
|
||||
let request = match standard_subservice.unwrap() {
|
||||
hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => {
|
||||
HkRequest::new(unique_id, HkRequestVariant::EnablePeriodic)
|
||||
}
|
||||
hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => {
|
||||
HkRequest::new(unique_id, HkRequestVariant::DisablePeriodic)
|
||||
}
|
||||
hk::Subservice::TcReportHkReportStructures => todo!(),
|
||||
hk::Subservice::TmHkPacket => todo!(),
|
||||
hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => {
|
||||
HkRequest::new(unique_id, HkRequestVariant::OneShot)
|
||||
}
|
||||
hk::Subservice::TcModifyDiagCollectionInterval
|
||||
| hk::Subservice::TcModifyHkCollectionInterval => {
|
||||
if user_data.len() < 12 {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new_no_fail_data(
|
||||
time_stamp,
|
||||
&tmtc_err::NOT_ENOUGH_APP_DATA,
|
||||
),
|
||||
)
|
||||
.expect("Sending start failure TM failed");
|
||||
return Err(GenericConversionError::NotEnoughAppData {
|
||||
expected: 12,
|
||||
found: user_data.len(),
|
||||
});
|
||||
Ok((
|
||||
target_id.into(),
|
||||
match standard_subservice.unwrap() {
|
||||
hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => {
|
||||
HkRequest::Enable(unique_id)
|
||||
}
|
||||
HkRequest::new(
|
||||
unique_id,
|
||||
HkRequestVariant::ModifyCollectionInterval(
|
||||
hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => {
|
||||
HkRequest::Disable(unique_id)
|
||||
}
|
||||
hk::Subservice::TcReportHkReportStructures => todo!(),
|
||||
hk::Subservice::TmHkPacket => todo!(),
|
||||
hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => {
|
||||
HkRequest::OneShot(unique_id)
|
||||
}
|
||||
hk::Subservice::TcModifyDiagCollectionInterval
|
||||
| hk::Subservice::TcModifyHkCollectionInterval => {
|
||||
if user_data.len() < 12 {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
token,
|
||||
FailParams::new_no_fail_data(
|
||||
time_stamp,
|
||||
&tmtc_err::NOT_ENOUGH_APP_DATA,
|
||||
),
|
||||
)
|
||||
.expect("Sending start failure TM failed");
|
||||
return Err(PusPacketHandlingError::NotEnoughAppData {
|
||||
expected: 12,
|
||||
found: user_data.len(),
|
||||
});
|
||||
}
|
||||
HkRequest::ModifyCollectionInterval(
|
||||
unique_id,
|
||||
CollectionIntervalFactor::from_be_bytes(
|
||||
user_data[8..12].try_into().unwrap(),
|
||||
),
|
||||
),
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new(
|
||||
time_stamp,
|
||||
&tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED,
|
||||
&[subservice],
|
||||
),
|
||||
)
|
||||
.expect("Sending start failure TM failed");
|
||||
return Err(GenericConversionError::InvalidSubservice(subservice));
|
||||
}
|
||||
};
|
||||
Ok((
|
||||
ActivePusRequestStd::new(target_id_and_apid.into(), token, self.timeout),
|
||||
request,
|
||||
}
|
||||
_ => {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
token,
|
||||
FailParams::new(
|
||||
time_stamp,
|
||||
&tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED,
|
||||
&[subservice],
|
||||
),
|
||||
)
|
||||
.expect("Sending start failure TM failed");
|
||||
return Err(PusPacketHandlingError::InvalidSubservice(subservice));
|
||||
}
|
||||
},
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_hk_service_static(
|
||||
tm_sender: PacketSenderWithSharedPool,
|
||||
shared_tm_store: SharedTmPool,
|
||||
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
|
||||
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
tc_pool: SharedStaticMemoryPool,
|
||||
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
request_router: GenericRequestRouter,
|
||||
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
|
||||
) -> HkServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
|
||||
let pus_3_handler = PusTargetedRequestService::new(
|
||||
) -> Pus3Wrapper<
|
||||
MpscTcReceiver,
|
||||
TmInSharedPoolSenderWithBoundedMpsc,
|
||||
EcssTcInSharedStoreConverter,
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
> {
|
||||
let hk_srv_tm_sender = TmInSharedPoolSenderWithId::new(
|
||||
TmSenderId::PusHk as ChannelId,
|
||||
"PUS_3_TM_SENDER",
|
||||
shared_tm_store.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
);
|
||||
let hk_srv_receiver =
|
||||
MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
|
||||
let pus_3_handler = PusService3HkHandler::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_HK_SERVICE.id(),
|
||||
pus_hk_rx,
|
||||
tm_sender,
|
||||
create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
|
||||
hk_srv_receiver,
|
||||
hk_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
|
||||
),
|
||||
HkRequestConverter::default(),
|
||||
DefaultActiveRequestMap::default(),
|
||||
HkReplyHandler::default(),
|
||||
ExampleHkRequestConverter::default(),
|
||||
request_router,
|
||||
reply_receiver,
|
||||
GenericRoutingErrorHandler::default(),
|
||||
);
|
||||
HkServiceWrapper {
|
||||
service: pus_3_handler,
|
||||
}
|
||||
Pus3Wrapper { pus_3_handler }
|
||||
}
|
||||
|
||||
pub fn create_hk_service_dynamic(
|
||||
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
|
||||
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
|
||||
verif_reporter: VerificationReporterWithVecMpscSender,
|
||||
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
request_router: GenericRequestRouter,
|
||||
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
|
||||
) -> HkServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
|
||||
let pus_3_handler = PusTargetedRequestService::new(
|
||||
) -> Pus3Wrapper<
|
||||
MpscTcReceiver,
|
||||
TmAsVecSenderWithMpsc,
|
||||
EcssTcInVecConverter,
|
||||
VerificationReporterWithVecMpscSender,
|
||||
> {
|
||||
let hk_srv_tm_sender = TmAsVecSenderWithId::new(
|
||||
TmSenderId::PusHk as ChannelId,
|
||||
"PUS_3_TM_SENDER",
|
||||
tm_funnel_tx.clone(),
|
||||
);
|
||||
let hk_srv_receiver =
|
||||
MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
|
||||
let pus_3_handler = PusService3HkHandler::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_HK_SERVICE.id(),
|
||||
pus_hk_rx,
|
||||
tm_funnel_tx,
|
||||
create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
|
||||
hk_srv_receiver,
|
||||
hk_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInVecConverter::default(),
|
||||
),
|
||||
HkRequestConverter::default(),
|
||||
DefaultActiveRequestMap::default(),
|
||||
HkReplyHandler::default(),
|
||||
ExampleHkRequestConverter::default(),
|
||||
request_router,
|
||||
reply_receiver,
|
||||
GenericRoutingErrorHandler::default(),
|
||||
);
|
||||
HkServiceWrapper {
|
||||
service: pus_3_handler,
|
||||
}
|
||||
Pus3Wrapper { pus_3_handler }
|
||||
}
|
||||
|
||||
pub struct HkServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
|
||||
pub(crate) service: PusTargetedRequestService<
|
||||
MpscTcReceiver,
|
||||
pub struct Pus3Wrapper<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> {
|
||||
pub(crate) pus_3_handler: PusService3HkHandler<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
HkRequestConverter,
|
||||
HkReplyHandler,
|
||||
DefaultActiveRequestMap<ActivePusRequestStd>,
|
||||
ActivePusRequestStd,
|
||||
HkRequest,
|
||||
HkReply,
|
||||
ExampleHkRequestConverter,
|
||||
GenericRequestRouter,
|
||||
GenericRoutingErrorHandler<3>,
|
||||
>,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
|
||||
for HkServiceWrapper<TmSender, TcInMemConverter>
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> Pus3Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
|
||||
{
|
||||
const SERVICE_ID: u8 = PusServiceId::Housekeeping as u8;
|
||||
const SERVICE_STR: &'static str = "housekeeping";
|
||||
|
||||
delegate::delegate! {
|
||||
to self.service {
|
||||
fn poll_and_handle_next_tc(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, PusPacketHandlingError>;
|
||||
|
||||
fn poll_and_handle_next_reply(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, EcssTmtcError>;
|
||||
|
||||
fn check_for_request_timeouts(&mut self);
|
||||
pub fn handle_next_packet(&mut self) -> bool {
|
||||
match self.pus_3_handler.handle_one_tc() {
|
||||
Ok(result) => match result {
|
||||
PusPacketHandlerResult::RequestHandled => {}
|
||||
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
|
||||
warn!("PUS 3 partial packet handling success: {e:?}")
|
||||
}
|
||||
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
|
||||
warn!("PUS 3 invalid subservice {invalid}");
|
||||
}
|
||||
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
|
||||
warn!("PUS 3 subservice {subservice} not implemented");
|
||||
}
|
||||
PusPacketHandlerResult::Empty => {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
Err(error) => {
|
||||
error!("PUS packet handling error: {error:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use satrs::pus::test_util::{
|
||||
TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
|
||||
};
|
||||
use satrs::request::MessageMetadata;
|
||||
use satrs::{
|
||||
hk::HkRequestVariant,
|
||||
pus::test_util::TEST_APID,
|
||||
request::GenericMessage,
|
||||
spacepackets::{
|
||||
ecss::{hk::Subservice, tc::PusTcCreator},
|
||||
SpHeader,
|
||||
},
|
||||
};
|
||||
use satrs_example::config::tmtc_err;
|
||||
|
||||
use crate::pus::{
|
||||
hk::HkReplyVariant,
|
||||
tests::{PusConverterTestbench, ReplyHandlerTestbench},
|
||||
};
|
||||
|
||||
use super::{HkReply, HkReplyHandler, HkRequestConverter};
|
||||
|
||||
#[test]
|
||||
fn hk_converter_one_shot_req() {
|
||||
let mut hk_bench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let target_id = TEST_UNIQUE_ID_0;
|
||||
let unique_id = 5_u32;
|
||||
let mut app_data: [u8; 8] = [0; 8];
|
||||
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
|
||||
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
|
||||
|
||||
let hk_req = PusTcCreator::new_simple(
|
||||
sp_header,
|
||||
3,
|
||||
Subservice::TcGenerateOneShotHk as u8,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
let accepted_token = hk_bench.add_tc(&hk_req);
|
||||
let (_active_req, req) = hk_bench
|
||||
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
|
||||
.expect("conversion failed");
|
||||
|
||||
assert_eq!(req.unique_id, unique_id);
|
||||
if let HkRequestVariant::OneShot = req.variant {
|
||||
} else {
|
||||
panic!("unexpected HK request")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hk_converter_enable_periodic_generation() {
|
||||
let mut hk_bench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let target_id = TEST_UNIQUE_ID_0;
|
||||
let unique_id = 5_u32;
|
||||
let mut app_data: [u8; 8] = [0; 8];
|
||||
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
|
||||
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
|
||||
let mut generic_check = |tc: &PusTcCreator| {
|
||||
let accepted_token = hk_bench.add_tc(tc);
|
||||
let (_active_req, req) = hk_bench
|
||||
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
|
||||
.expect("conversion failed");
|
||||
assert_eq!(req.unique_id, unique_id);
|
||||
if let HkRequestVariant::EnablePeriodic = req.variant {
|
||||
} else {
|
||||
panic!("unexpected HK request")
|
||||
}
|
||||
};
|
||||
let tc0 = PusTcCreator::new_simple(
|
||||
sp_header,
|
||||
3,
|
||||
Subservice::TcEnableHkGeneration as u8,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
generic_check(&tc0);
|
||||
let tc1 = PusTcCreator::new_simple(
|
||||
sp_header,
|
||||
3,
|
||||
Subservice::TcEnableDiagGeneration as u8,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
generic_check(&tc1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hk_conversion_disable_periodic_generation() {
|
||||
let mut hk_bench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let target_id = TEST_UNIQUE_ID_0;
|
||||
let unique_id = 5_u32;
|
||||
let mut app_data: [u8; 8] = [0; 8];
|
||||
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
|
||||
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
|
||||
let mut generic_check = |tc: &PusTcCreator| {
|
||||
let accepted_token = hk_bench.add_tc(tc);
|
||||
let (_active_req, req) = hk_bench
|
||||
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
|
||||
.expect("conversion failed");
|
||||
assert_eq!(req.unique_id, unique_id);
|
||||
if let HkRequestVariant::DisablePeriodic = req.variant {
|
||||
} else {
|
||||
panic!("unexpected HK request")
|
||||
}
|
||||
};
|
||||
let tc0 = PusTcCreator::new_simple(
|
||||
sp_header,
|
||||
3,
|
||||
Subservice::TcDisableHkGeneration as u8,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
generic_check(&tc0);
|
||||
let tc1 = PusTcCreator::new_simple(
|
||||
sp_header,
|
||||
3,
|
||||
Subservice::TcDisableDiagGeneration as u8,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
generic_check(&tc1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hk_conversion_modify_interval() {
|
||||
let mut hk_bench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let target_id = TEST_UNIQUE_ID_0;
|
||||
let unique_id = 5_u32;
|
||||
let mut app_data: [u8; 12] = [0; 12];
|
||||
let collection_interval_factor = 5_u32;
|
||||
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
|
||||
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
|
||||
app_data[8..12].copy_from_slice(&collection_interval_factor.to_be_bytes());
|
||||
|
||||
let mut generic_check = |tc: &PusTcCreator| {
|
||||
let accepted_token = hk_bench.add_tc(tc);
|
||||
let (_active_req, req) = hk_bench
|
||||
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
|
||||
.expect("conversion failed");
|
||||
assert_eq!(req.unique_id, unique_id);
|
||||
if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant {
|
||||
assert_eq!(interval_factor, collection_interval_factor);
|
||||
} else {
|
||||
panic!("unexpected HK request")
|
||||
}
|
||||
};
|
||||
let tc0 = PusTcCreator::new_simple(
|
||||
sp_header,
|
||||
3,
|
||||
Subservice::TcModifyHkCollectionInterval as u8,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
generic_check(&tc0);
|
||||
let tc1 = PusTcCreator::new_simple(
|
||||
sp_header,
|
||||
3,
|
||||
Subservice::TcModifyDiagCollectionInterval as u8,
|
||||
&app_data,
|
||||
true,
|
||||
);
|
||||
generic_check(&tc1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hk_reply_handler() {
|
||||
let mut reply_testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), HkReplyHandler::default());
|
||||
let sender_id = 2_u64;
|
||||
let apid_target_id = 3_u32;
|
||||
let unique_id = 5_u32;
|
||||
let (req_id, active_req) = reply_testbench.add_tc(TEST_APID, apid_target_id, &[]);
|
||||
let reply = GenericMessage::new(
|
||||
MessageMetadata::new(req_id.into(), sender_id),
|
||||
HkReply::new(unique_id, HkReplyVariant::Ack),
|
||||
);
|
||||
let result = reply_testbench.handle_reply(&reply, &active_req, &[]);
|
||||
assert!(result.is_ok());
|
||||
assert!(result.unwrap());
|
||||
reply_testbench
|
||||
.verif_reporter
|
||||
.assert_full_completion_success(TEST_COMPONENT_ID_0.raw(), req_id, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_unrequested_reply() {
|
||||
let mut testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
|
||||
let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack);
|
||||
let unrequested_reply =
|
||||
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
|
||||
// Right now this function does not do a lot. We simply check that it does not panic or do
|
||||
// weird stuff.
|
||||
let result = testbench.handle_unrequested_reply(&unrequested_reply);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_reply_timeout() {
|
||||
let mut testbench =
|
||||
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
|
||||
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_1, &[]);
|
||||
let result = testbench.handle_request_timeout(&active_request, &[]);
|
||||
assert!(result.is_ok());
|
||||
testbench.verif_reporter.assert_completion_failure(
|
||||
TEST_COMPONENT_ID_1.raw(),
|
||||
req_id,
|
||||
None,
|
||||
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
|
@ -1,149 +1,120 @@
|
||||
use crate::requests::GenericRequestRouter;
|
||||
use crate::tmtc::MpscStoreAndSendError;
|
||||
use log::warn;
|
||||
use satrs::pool::PoolAddr;
|
||||
use satrs::pus::verification::{
|
||||
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
|
||||
VerificationReporterCfg, VerificationReportingProvider, VerificationToken,
|
||||
};
|
||||
use satrs::pus::verification::{FailParams, VerificationReportingProvider};
|
||||
use satrs::pus::{
|
||||
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter,
|
||||
EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
|
||||
HandlingStatus, PusPacketHandlingError, PusReplyHandler, PusRequestRouter, PusServiceHelper,
|
||||
PusTcToRequestConverter, TcInMemory,
|
||||
EcssTcAndToken, GenericRoutingError, PusPacketHandlerResult, PusRoutingErrorHandler, TcInMemory,
|
||||
};
|
||||
use satrs::queue::{GenericReceiveError, GenericSendError};
|
||||
use satrs::request::{Apid, GenericMessage, MessageMetadata};
|
||||
use satrs::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
|
||||
use satrs::tmtc::{PacketAsVec, PacketInPool};
|
||||
use satrs::ComponentId;
|
||||
use satrs_example::config::components::PUS_ROUTING_SERVICE;
|
||||
use satrs::spacepackets::ecss::PusServiceId;
|
||||
use satrs::spacepackets::time::cds::CdsTime;
|
||||
use satrs::spacepackets::time::TimeWriter;
|
||||
use satrs_example::config::{tmtc_err, CustomPusServiceId};
|
||||
use satrs_example::TimeStampHelper;
|
||||
use std::fmt::Debug;
|
||||
use std::sync::mpsc::{self, Sender};
|
||||
use std::sync::mpsc::Sender;
|
||||
|
||||
pub mod action;
|
||||
pub mod event;
|
||||
pub mod hk;
|
||||
pub mod mode;
|
||||
pub mod scheduler;
|
||||
pub mod stack;
|
||||
pub mod test;
|
||||
|
||||
pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter {
|
||||
let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap();
|
||||
// Every software component which needs to generate verification telemetry, gets a cloned
|
||||
// verification reporter.
|
||||
VerificationReporter::new(owner_id, &verif_cfg)
|
||||
}
|
||||
|
||||
/// Simple router structure which forwards PUS telecommands to dedicated handlers.
|
||||
pub struct PusTcMpscRouter {
|
||||
pub test_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub event_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub sched_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub hk_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub action_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub mode_tc_sender: Sender<EcssTcAndToken>,
|
||||
pub test_service_receiver: Sender<EcssTcAndToken>,
|
||||
pub event_service_receiver: Sender<EcssTcAndToken>,
|
||||
pub sched_service_receiver: Sender<EcssTcAndToken>,
|
||||
pub hk_service_receiver: Sender<EcssTcAndToken>,
|
||||
pub action_service_receiver: Sender<EcssTcAndToken>,
|
||||
}
|
||||
|
||||
pub struct PusTcDistributor<TmSender: EcssTmSender> {
|
||||
pub id: ComponentId,
|
||||
pub tm_sender: TmSender,
|
||||
pub struct PusReceiver<VerificationReporter: VerificationReportingProvider> {
|
||||
pub verif_reporter: VerificationReporter,
|
||||
pub pus_router: PusTcMpscRouter,
|
||||
stamp_helper: TimeStampHelper,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
|
||||
pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self {
|
||||
struct TimeStampHelper {
|
||||
stamper: CdsTime,
|
||||
time_stamp: [u8; 7],
|
||||
}
|
||||
|
||||
impl TimeStampHelper {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
id: PUS_ROUTING_SERVICE.raw(),
|
||||
tm_sender,
|
||||
verif_reporter: create_verification_reporter(
|
||||
PUS_ROUTING_SERVICE.id(),
|
||||
PUS_ROUTING_SERVICE.apid,
|
||||
),
|
||||
stamper: CdsTime::new_with_u16_days(0, 0),
|
||||
time_stamp: [0; 7],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn stamp(&self) -> &[u8] {
|
||||
&self.time_stamp
|
||||
}
|
||||
|
||||
pub fn update_from_now(&mut self) {
|
||||
self.stamper
|
||||
.update_from_now()
|
||||
.expect("Updating timestamp failed");
|
||||
self.stamper
|
||||
.write_to_bytes(&mut self.time_stamp)
|
||||
.expect("Writing timestamp failed");
|
||||
}
|
||||
}
|
||||
|
||||
impl<VerificationReporter: VerificationReportingProvider> PusReceiver<VerificationReporter> {
|
||||
pub fn new(verif_reporter: VerificationReporter, pus_router: PusTcMpscRouter) -> Self {
|
||||
Self {
|
||||
verif_reporter,
|
||||
pus_router,
|
||||
stamp_helper: TimeStampHelper::default(),
|
||||
stamp_helper: TimeStampHelper::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_tc_packet_vec(
|
||||
impl<VerificationReporter: VerificationReportingProvider> PusReceiver<VerificationReporter> {
|
||||
pub fn handle_tc_packet(
|
||||
&mut self,
|
||||
packet_as_vec: PacketAsVec,
|
||||
) -> Result<HandlingStatus, GenericSendError> {
|
||||
self.handle_tc_generic(packet_as_vec.sender_id, None, &packet_as_vec.packet)
|
||||
}
|
||||
|
||||
pub fn handle_tc_packet_in_store(
|
||||
&mut self,
|
||||
packet_in_pool: PacketInPool,
|
||||
pus_tc_copy: &[u8],
|
||||
) -> Result<HandlingStatus, GenericSendError> {
|
||||
self.handle_tc_generic(
|
||||
packet_in_pool.sender_id,
|
||||
Some(packet_in_pool.store_addr),
|
||||
pus_tc_copy,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn handle_tc_generic(
|
||||
&mut self,
|
||||
sender_id: ComponentId,
|
||||
addr_opt: Option<PoolAddr>,
|
||||
raw_tc: &[u8],
|
||||
) -> Result<HandlingStatus, GenericSendError> {
|
||||
let pus_tc_result = PusTcReader::new(raw_tc);
|
||||
if pus_tc_result.is_err() {
|
||||
log::warn!(
|
||||
"error creating PUS TC from raw data received from {}: {}",
|
||||
sender_id,
|
||||
pus_tc_result.unwrap_err()
|
||||
);
|
||||
log::warn!("raw data: {:x?}", raw_tc);
|
||||
// TODO: Shouldn't this be an error?
|
||||
return Ok(HandlingStatus::HandledOne);
|
||||
}
|
||||
let pus_tc = pus_tc_result.unwrap().0;
|
||||
let init_token = self.verif_reporter.add_tc(&pus_tc);
|
||||
tc_in_memory: TcInMemory,
|
||||
service: u8,
|
||||
pus_tc: &PusTcReader,
|
||||
) -> Result<PusPacketHandlerResult, MpscStoreAndSendError> {
|
||||
let init_token = self.verif_reporter.add_tc(pus_tc);
|
||||
self.stamp_helper.update_from_now();
|
||||
let accepted_token = self
|
||||
.verif_reporter
|
||||
.acceptance_success(&self.tm_sender, init_token, self.stamp_helper.stamp())
|
||||
.acceptance_success(init_token, self.stamp_helper.stamp())
|
||||
.expect("Acceptance success failure");
|
||||
let service = PusServiceId::try_from(pus_tc.service());
|
||||
let tc_in_memory: TcInMemory = if let Some(store_addr) = addr_opt {
|
||||
PacketInPool::new(sender_id, store_addr).into()
|
||||
} else {
|
||||
PacketAsVec::new(sender_id, Vec::from(raw_tc)).into()
|
||||
};
|
||||
let service = PusServiceId::try_from(service);
|
||||
match service {
|
||||
Ok(standard_service) => match standard_service {
|
||||
PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken {
|
||||
tc_in_memory,
|
||||
token: Some(accepted_token.into()),
|
||||
})?,
|
||||
PusServiceId::Housekeeping => {
|
||||
self.pus_router.hk_tc_sender.send(EcssTcAndToken {
|
||||
PusServiceId::Test => {
|
||||
self.pus_router.test_service_receiver.send(EcssTcAndToken {
|
||||
tc_in_memory,
|
||||
token: Some(accepted_token.into()),
|
||||
})?
|
||||
}
|
||||
PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken {
|
||||
tc_in_memory,
|
||||
token: Some(accepted_token.into()),
|
||||
})?,
|
||||
PusServiceId::Scheduling => {
|
||||
self.pus_router.sched_tc_sender.send(EcssTcAndToken {
|
||||
PusServiceId::Housekeeping => {
|
||||
self.pus_router.hk_service_receiver.send(EcssTcAndToken {
|
||||
tc_in_memory,
|
||||
token: Some(accepted_token.into()),
|
||||
})?
|
||||
}
|
||||
PusServiceId::Event => {
|
||||
self.pus_router
|
||||
.event_service_receiver
|
||||
.send(EcssTcAndToken {
|
||||
tc_in_memory,
|
||||
token: Some(accepted_token.into()),
|
||||
})?
|
||||
}
|
||||
PusServiceId::Scheduling => {
|
||||
self.pus_router
|
||||
.sched_service_receiver
|
||||
.send(EcssTcAndToken {
|
||||
tc_in_memory,
|
||||
token: Some(accepted_token.into()),
|
||||
})?
|
||||
}
|
||||
_ => {
|
||||
let result = self.verif_reporter.start_failure(
|
||||
&self.tm_sender,
|
||||
accepted_token,
|
||||
FailParams::new(
|
||||
self.stamp_helper.stamp(),
|
||||
@ -159,20 +130,15 @@ impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
|
||||
Err(e) => {
|
||||
if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) {
|
||||
match custom_service {
|
||||
CustomPusServiceId::Mode => self
|
||||
.pus_router
|
||||
.mode_tc_sender
|
||||
.send(EcssTcAndToken {
|
||||
tc_in_memory,
|
||||
token: Some(accepted_token.into()),
|
||||
})
|
||||
.map_err(|_| GenericSendError::RxDisconnected)?,
|
||||
CustomPusServiceId::Mode => {
|
||||
// TODO: Fix mode service.
|
||||
//self.handle_mode_service(pus_tc, accepted_token)
|
||||
}
|
||||
CustomPusServiceId::Health => {}
|
||||
}
|
||||
} else {
|
||||
self.verif_reporter
|
||||
.start_failure(
|
||||
&self.tm_sender,
|
||||
accepted_token,
|
||||
FailParams::new(
|
||||
self.stamp_helper.stamp(),
|
||||
@ -184,600 +150,59 @@ impl<TmSender: EcssTmSender> PusTcDistributor<TmSender> {
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(HandlingStatus::HandledOne)
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait TargetedPusService {
|
||||
const SERVICE_ID: u8;
|
||||
const SERVICE_STR: &'static str;
|
||||
#[derive(Default)]
|
||||
pub struct GenericRoutingErrorHandler<const SERVICE_ID: u8> {}
|
||||
|
||||
fn poll_and_handle_next_tc_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
|
||||
let result = self.poll_and_handle_next_tc(time_stamp);
|
||||
if let Err(e) = result {
|
||||
log::error!(
|
||||
"PUS service {}({})packet handling error: {:?}",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
e
|
||||
);
|
||||
// To avoid permanent loops on error cases.
|
||||
return HandlingStatus::Empty;
|
||||
}
|
||||
result.unwrap()
|
||||
}
|
||||
impl<const SERVICE_ID: u8> PusRoutingErrorHandler for GenericRoutingErrorHandler<SERVICE_ID> {
|
||||
type Error = satrs::pus::GenericRoutingError;
|
||||
|
||||
fn poll_and_handle_next_reply_default_handler(&mut self, time_stamp: &[u8]) -> HandlingStatus {
|
||||
// This only fails if all senders disconnected. Treat it like an empty queue.
|
||||
self.poll_and_handle_next_reply(time_stamp)
|
||||
.unwrap_or_else(|e| {
|
||||
warn!(
|
||||
"PUS servce {}({}): Handling reply failed with error {:?}",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
e
|
||||
);
|
||||
HandlingStatus::Empty
|
||||
})
|
||||
}
|
||||
|
||||
fn poll_and_handle_next_tc(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, PusPacketHandlingError>;
|
||||
|
||||
fn poll_and_handle_next_reply(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, EcssTmtcError>;
|
||||
|
||||
fn check_for_request_timeouts(&mut self);
|
||||
}
|
||||
|
||||
/// Generic trait for services which handle packets directly. Kept minimal right now because
|
||||
/// of the difficulty to allow flexible user code for these services..
|
||||
pub trait DirectPusService {
|
||||
const SERVICE_ID: u8;
|
||||
const SERVICE_STR: &'static str;
|
||||
|
||||
fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus;
|
||||
}
|
||||
|
||||
/// This is a generic handler class for all PUS services where a PUS telecommand is converted
|
||||
/// to a targeted request.
|
||||
///
|
||||
/// The generic steps for this process are the following
|
||||
///
|
||||
/// 1. Poll for TC packets
|
||||
/// 2. Convert the raw packets to a [PusTcReader].
|
||||
/// 3. Convert the PUS TC to a typed request using the [PusTcToRequestConverter].
|
||||
/// 4. Route the requests using the [GenericRequestRouter].
|
||||
/// 5. Add the request to the active request map using the [ActiveRequestMapProvider] abstraction.
|
||||
/// 6. Check for replies which complete the forwarded request. The handler takes care of
|
||||
/// the verification process.
|
||||
/// 7. Check for timeouts of active requests. Generally, the timeout on the service level should
|
||||
/// be highest expected timeout for the given target.
|
||||
///
|
||||
/// The handler exposes the following API:
|
||||
///
|
||||
/// 1. [Self::poll_and_handle_next_tc] which tries to poll and handle one TC packet, covering
|
||||
/// steps 1-5.
|
||||
/// 2. [Self::poll_and_check_next_reply] which tries to poll and handle one reply, covering step 6.
|
||||
/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7.
|
||||
pub struct PusTargetedRequestService<
|
||||
TcReceiver: EcssTcReceiver,
|
||||
TmSender: EcssTmSender,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
|
||||
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
|
||||
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
|
||||
ActiveRequestInfo: ActiveRequestProvider,
|
||||
RequestType,
|
||||
ReplyType,
|
||||
> {
|
||||
pub service_helper:
|
||||
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
pub request_router: GenericRequestRouter,
|
||||
pub request_converter: RequestConverter,
|
||||
pub active_request_map: ActiveRequestMap,
|
||||
pub reply_handler: ReplyHandler,
|
||||
pub reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
|
||||
phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>,
|
||||
}
|
||||
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiver,
|
||||
TmSender: EcssTmSender,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
|
||||
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
|
||||
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
|
||||
ActiveRequestInfo: ActiveRequestProvider,
|
||||
RequestType,
|
||||
ReplyType,
|
||||
>
|
||||
PusTargetedRequestService<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
RequestConverter,
|
||||
ReplyHandler,
|
||||
ActiveRequestMap,
|
||||
ActiveRequestInfo,
|
||||
RequestType,
|
||||
ReplyType,
|
||||
>
|
||||
where
|
||||
GenericRequestRouter: PusRequestRouter<RequestType, Error = GenericRoutingError>,
|
||||
{
|
||||
pub fn new(
|
||||
service_helper: PusServiceHelper<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
fn handle_error(
|
||||
&self,
|
||||
target_id: satrs::TargetId,
|
||||
token: satrs::pus::verification::VerificationToken<
|
||||
satrs::pus::verification::TcStateAccepted,
|
||||
>,
|
||||
request_converter: RequestConverter,
|
||||
active_request_map: ActiveRequestMap,
|
||||
reply_hook: ReplyHandler,
|
||||
request_router: GenericRequestRouter,
|
||||
reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
service_helper,
|
||||
request_converter,
|
||||
active_request_map,
|
||||
reply_handler: reply_hook,
|
||||
request_router,
|
||||
reply_receiver,
|
||||
phantom: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll_and_handle_next_tc(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
return Ok(HandlingStatus::Empty);
|
||||
}
|
||||
let ecss_tc_and_token = possible_packet.unwrap();
|
||||
self.service_helper
|
||||
.tc_in_mem_converter_mut()
|
||||
.cache(&ecss_tc_and_token.tc_in_memory)?;
|
||||
let tc = self.service_helper.tc_in_mem_converter().convert()?;
|
||||
let (mut request_info, request) = match self.request_converter.convert(
|
||||
ecss_tc_and_token.token,
|
||||
&tc,
|
||||
self.service_helper.tm_sender(),
|
||||
&self.service_helper.common.verif_reporter,
|
||||
time_stamp,
|
||||
) {
|
||||
Ok((info, req)) => (info, req),
|
||||
Err(e) => {
|
||||
self.handle_conversion_to_request_error(&e, ecss_tc_and_token.token, time_stamp);
|
||||
return Err(e.into());
|
||||
}
|
||||
};
|
||||
let accepted_token: VerificationToken<TcStateAccepted> = request_info
|
||||
.token()
|
||||
.try_into()
|
||||
.expect("token not in expected accepted state");
|
||||
let verif_request_id = verification::RequestId::new(&tc).raw();
|
||||
match self.request_router.route(
|
||||
MessageMetadata::new(verif_request_id, self.service_helper.id()),
|
||||
request_info.target_id(),
|
||||
request,
|
||||
) {
|
||||
Ok(()) => {
|
||||
let started_token = self
|
||||
.service_helper
|
||||
.verif_reporter()
|
||||
.start_success(
|
||||
&self.service_helper.common.tm_sender,
|
||||
accepted_token,
|
||||
time_stamp,
|
||||
)
|
||||
.expect("Start success failure");
|
||||
request_info.set_token(started_token.into());
|
||||
self.active_request_map
|
||||
.insert(&verif_request_id, request_info);
|
||||
}
|
||||
Err(e) => {
|
||||
self.request_router.handle_error_generic(
|
||||
&request_info,
|
||||
&tc,
|
||||
e.clone(),
|
||||
self.service_helper.tm_sender(),
|
||||
self.service_helper.verif_reporter(),
|
||||
time_stamp,
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Ok(HandlingStatus::HandledOne)
|
||||
}
|
||||
|
||||
fn handle_conversion_to_request_error(
|
||||
&mut self,
|
||||
error: &GenericConversionError,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
_tc: &PusTcReader,
|
||||
error: Self::Error,
|
||||
time_stamp: &[u8],
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
) {
|
||||
warn!("Routing request for service {SERVICE_ID} failed: {error:?}");
|
||||
match error {
|
||||
GenericConversionError::WrongService(service) => {
|
||||
let service_slice: [u8; 1] = [*service];
|
||||
self.service_helper
|
||||
.verif_reporter()
|
||||
.completion_failure(
|
||||
self.service_helper.tm_sender(),
|
||||
GenericRoutingError::UnknownTargetId(id) => {
|
||||
let mut fail_data: [u8; 8] = [0; 8];
|
||||
fail_data.copy_from_slice(&id.to_be_bytes());
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
token,
|
||||
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SERVICE, &service_slice),
|
||||
FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data),
|
||||
)
|
||||
.expect("Sending completion failure failed");
|
||||
.expect("Sending start failure failed");
|
||||
}
|
||||
GenericConversionError::InvalidSubservice(subservice) => {
|
||||
let subservice_slice: [u8; 1] = [*subservice];
|
||||
self.service_helper
|
||||
.verif_reporter()
|
||||
.completion_failure(
|
||||
self.service_helper.tm_sender(),
|
||||
GenericRoutingError::SendError(_) => {
|
||||
let mut fail_data: [u8; 8] = [0; 8];
|
||||
fail_data.copy_from_slice(&target_id.to_be_bytes());
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
token,
|
||||
FailParams::new(
|
||||
time_stamp,
|
||||
&tmtc_err::INVALID_PUS_SUBSERVICE,
|
||||
&subservice_slice,
|
||||
),
|
||||
FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data),
|
||||
)
|
||||
.expect("Sending completion failure failed");
|
||||
.expect("Sending start failure failed");
|
||||
}
|
||||
GenericConversionError::NotEnoughAppData { expected, found } => {
|
||||
let mut context_info = (*found as u32).to_be_bytes().to_vec();
|
||||
context_info.extend_from_slice(&(*expected as u32).to_be_bytes());
|
||||
self.service_helper
|
||||
.verif_reporter()
|
||||
.completion_failure(
|
||||
self.service_helper.tm_sender(),
|
||||
GenericRoutingError::NotEnoughAppData { expected, found } => {
|
||||
let mut context_info = (found as u32).to_be_bytes().to_vec();
|
||||
context_info.extend_from_slice(&(expected as u32).to_be_bytes());
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
token,
|
||||
FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info),
|
||||
)
|
||||
.expect("Sending completion failure failed");
|
||||
}
|
||||
// Do nothing.. this is service-level and can not be handled generically here.
|
||||
GenericConversionError::InvalidAppData(_) => (),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll_and_handle_next_reply(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, EcssTmtcError> {
|
||||
match self.reply_receiver.try_recv() {
|
||||
Ok(reply) => {
|
||||
self.handle_reply(&reply, time_stamp)?;
|
||||
Ok(HandlingStatus::HandledOne)
|
||||
}
|
||||
Err(e) => match e {
|
||||
mpsc::TryRecvError::Empty => Ok(HandlingStatus::Empty),
|
||||
mpsc::TryRecvError::Disconnected => Err(EcssTmtcError::Receive(
|
||||
GenericReceiveError::TxDisconnected(None),
|
||||
)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<ReplyType>,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
let active_req_opt = self.active_request_map.get(reply.request_id());
|
||||
if active_req_opt.is_none() {
|
||||
self.reply_handler
|
||||
.handle_unrequested_reply(reply, &self.service_helper.common.tm_sender)?;
|
||||
return Ok(());
|
||||
}
|
||||
let active_request = active_req_opt.unwrap();
|
||||
let result = self.reply_handler.handle_reply(
|
||||
reply,
|
||||
active_request,
|
||||
&self.service_helper.common.tm_sender,
|
||||
&self.service_helper.common.verif_reporter,
|
||||
time_stamp,
|
||||
);
|
||||
if result.is_err() || (result.is_ok() && *result.as_ref().unwrap()) {
|
||||
self.active_request_map.remove(reply.request_id());
|
||||
}
|
||||
result.map(|_| ())
|
||||
}
|
||||
|
||||
pub fn check_for_request_timeouts(&mut self) {
|
||||
let mut requests_to_delete = Vec::new();
|
||||
self.active_request_map
|
||||
.for_each(|request_id, request_info| {
|
||||
if request_info.has_timed_out() {
|
||||
requests_to_delete.push(*request_id);
|
||||
}
|
||||
});
|
||||
if !requests_to_delete.is_empty() {
|
||||
for request_id in requests_to_delete {
|
||||
self.active_request_map.remove(request_id);
|
||||
.expect("Sending start failure failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Generic timeout handling: Handle the verification failure with a dedicated return code
|
||||
/// and also log the error.
|
||||
pub fn generic_pus_request_timeout_handler(
|
||||
sender: &(impl EcssTmSender + ?Sized),
|
||||
active_request: &(impl ActiveRequestProvider + Debug),
|
||||
verification_handler: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
service_str: &'static str,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
log::warn!("timeout for active request {active_request:?} on {service_str} service");
|
||||
let started_token: VerificationToken<TcStateStarted> = active_request
|
||||
.token()
|
||||
.try_into()
|
||||
.expect("token not in expected started state");
|
||||
verification_handler.completion_failure(
|
||||
sender,
|
||||
started_token,
|
||||
FailParams::new(time_stamp, &tmtc_err::REQUEST_TIMEOUT, &[]),
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use std::time::Duration;
|
||||
|
||||
use satrs::pus::test_util::TEST_COMPONENT_ID_0;
|
||||
use satrs::pus::{MpscTmAsVecSender, PusTmVariant};
|
||||
use satrs::request::RequestId;
|
||||
use satrs::{
|
||||
pus::{
|
||||
verification::test_util::TestVerificationReporter, ActivePusRequestStd,
|
||||
ActiveRequestMapProvider, EcssTcInVecConverter, MpscTcReceiver,
|
||||
},
|
||||
request::UniqueApidTargetId,
|
||||
spacepackets::{
|
||||
ecss::{
|
||||
tc::{PusTcCreator, PusTcSecondaryHeader},
|
||||
WritablePusPacket,
|
||||
},
|
||||
SpHeader,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::requests::CompositeRequest;
|
||||
|
||||
use super::*;
|
||||
|
||||
// Testbench dedicated to the testing of [PusReplyHandler]s
|
||||
pub struct ReplyHandlerTestbench<
|
||||
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
|
||||
ActiveRequestInfo: ActiveRequestProvider,
|
||||
Reply,
|
||||
> {
|
||||
pub id: ComponentId,
|
||||
pub verif_reporter: TestVerificationReporter,
|
||||
pub reply_handler: ReplyHandler,
|
||||
pub tm_receiver: mpsc::Receiver<PacketAsVec>,
|
||||
pub default_timeout: Duration,
|
||||
tm_sender: MpscTmAsVecSender,
|
||||
phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>,
|
||||
}
|
||||
|
||||
impl<
|
||||
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
|
||||
ActiveRequestInfo: ActiveRequestProvider,
|
||||
Reply,
|
||||
> ReplyHandlerTestbench<ReplyHandler, ActiveRequestInfo, Reply>
|
||||
{
|
||||
pub fn new(owner_id: ComponentId, reply_handler: ReplyHandler) -> Self {
|
||||
let test_verif_reporter = TestVerificationReporter::new(owner_id);
|
||||
let (tm_sender, tm_receiver) = mpsc::channel();
|
||||
Self {
|
||||
id: TEST_COMPONENT_ID_0.raw(),
|
||||
verif_reporter: test_verif_reporter,
|
||||
reply_handler,
|
||||
default_timeout: Duration::from_secs(30),
|
||||
tm_sender,
|
||||
tm_receiver,
|
||||
phantom: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_tc(
|
||||
&mut self,
|
||||
apid: u16,
|
||||
apid_target: u32,
|
||||
time_stamp: &[u8],
|
||||
) -> (verification::RequestId, ActivePusRequestStd) {
|
||||
let sp_header = SpHeader::new_from_apid(apid);
|
||||
let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0);
|
||||
let init = self.verif_reporter.add_tc(&PusTcCreator::new(
|
||||
sp_header,
|
||||
sec_header_dummy,
|
||||
&[],
|
||||
true,
|
||||
));
|
||||
let accepted = self
|
||||
.verif_reporter
|
||||
.acceptance_success(&self.tm_sender, init, time_stamp)
|
||||
.expect("acceptance failed");
|
||||
let started = self
|
||||
.verif_reporter
|
||||
.start_success(&self.tm_sender, accepted, time_stamp)
|
||||
.expect("start failed");
|
||||
(
|
||||
started.request_id(),
|
||||
ActivePusRequestStd::new(
|
||||
UniqueApidTargetId::new(apid, apid_target).raw(),
|
||||
started,
|
||||
self.default_timeout,
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn handle_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<Reply>,
|
||||
active_request: &ActiveRequestInfo,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<bool, ReplyHandler::Error> {
|
||||
self.reply_handler.handle_reply(
|
||||
reply,
|
||||
active_request,
|
||||
&self.tm_sender,
|
||||
&self.verif_reporter,
|
||||
time_stamp,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn handle_unrequested_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<Reply>,
|
||||
) -> Result<(), ReplyHandler::Error> {
|
||||
self.reply_handler
|
||||
.handle_unrequested_reply(reply, &self.tm_sender)
|
||||
}
|
||||
pub fn handle_request_timeout(
|
||||
&mut self,
|
||||
active_request_info: &ActiveRequestInfo,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<(), ReplyHandler::Error> {
|
||||
self.reply_handler.handle_request_timeout(
|
||||
active_request_info,
|
||||
&self.tm_sender,
|
||||
&self.verif_reporter,
|
||||
time_stamp,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DummySender {}
|
||||
|
||||
/// Dummy sender component which does nothing on the [Self::send_tm] call.
|
||||
///
|
||||
/// Useful for unit tests.
|
||||
impl EcssTmSender for DummySender {
|
||||
fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Testbench dedicated to the testing of [PusTcToRequestConverter]s
|
||||
pub struct PusConverterTestbench<
|
||||
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
|
||||
ActiveRequestInfo: ActiveRequestProvider,
|
||||
Request,
|
||||
> {
|
||||
pub id: ComponentId,
|
||||
pub verif_reporter: TestVerificationReporter,
|
||||
pub converter: Converter,
|
||||
dummy_sender: DummySender,
|
||||
current_request_id: Option<verification::RequestId>,
|
||||
current_packet: Option<Vec<u8>>,
|
||||
phantom: std::marker::PhantomData<(ActiveRequestInfo, Request)>,
|
||||
}
|
||||
|
||||
impl<
|
||||
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
|
||||
ActiveRequestInfo: ActiveRequestProvider,
|
||||
Request,
|
||||
> PusConverterTestbench<Converter, ActiveRequestInfo, Request>
|
||||
{
|
||||
pub fn new(owner_id: ComponentId, converter: Converter) -> Self {
|
||||
let test_verif_reporter = TestVerificationReporter::new(owner_id);
|
||||
Self {
|
||||
id: owner_id,
|
||||
verif_reporter: test_verif_reporter,
|
||||
converter,
|
||||
dummy_sender: DummySender::default(),
|
||||
current_request_id: None,
|
||||
current_packet: None,
|
||||
phantom: std::marker::PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
|
||||
let token = self.verif_reporter.add_tc(tc);
|
||||
self.current_request_id = Some(verification::RequestId::new(tc));
|
||||
self.current_packet = Some(tc.to_vec().unwrap());
|
||||
self.verif_reporter
|
||||
.acceptance_success(&self.dummy_sender, token, &[])
|
||||
.expect("acceptance failed")
|
||||
}
|
||||
|
||||
pub fn request_id(&self) -> Option<verification::RequestId> {
|
||||
self.current_request_id
|
||||
}
|
||||
|
||||
pub fn convert(
|
||||
&mut self,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
time_stamp: &[u8],
|
||||
expected_apid: u16,
|
||||
expected_apid_target: u32,
|
||||
) -> Result<(ActiveRequestInfo, Request), Converter::Error> {
|
||||
if self.current_packet.is_none() {
|
||||
return Err(GenericConversionError::InvalidAppData(
|
||||
"call add_tc first".to_string(),
|
||||
));
|
||||
}
|
||||
let current_packet = self.current_packet.take().unwrap();
|
||||
let tc_reader = PusTcReader::new(¤t_packet).unwrap();
|
||||
let (active_info, request) = self.converter.convert(
|
||||
token,
|
||||
&tc_reader.0,
|
||||
&self.dummy_sender,
|
||||
&self.verif_reporter,
|
||||
time_stamp,
|
||||
)?;
|
||||
assert_eq!(
|
||||
active_info.token().request_id(),
|
||||
self.request_id().expect("no request id is set")
|
||||
);
|
||||
assert_eq!(
|
||||
active_info.target_id(),
|
||||
UniqueApidTargetId::new(expected_apid, expected_apid_target).raw()
|
||||
);
|
||||
Ok((active_info, request))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TargetedPusRequestTestbench<
|
||||
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
|
||||
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
|
||||
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
|
||||
ActiveRequestInfo: ActiveRequestProvider,
|
||||
RequestType,
|
||||
ReplyType,
|
||||
> {
|
||||
pub service: PusTargetedRequestService<
|
||||
MpscTcReceiver,
|
||||
MpscTmAsVecSender,
|
||||
EcssTcInVecConverter,
|
||||
TestVerificationReporter,
|
||||
RequestConverter,
|
||||
ReplyHandler,
|
||||
ActiveRequestMap,
|
||||
ActiveRequestInfo,
|
||||
RequestType,
|
||||
ReplyType,
|
||||
>,
|
||||
pub request_id: Option<RequestId>,
|
||||
pub tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
|
||||
pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>,
|
||||
pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>,
|
||||
pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
|
||||
}
|
||||
}
|
||||
|
@ -1,416 +0,0 @@
|
||||
use derive_new::new;
|
||||
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
|
||||
use std::sync::mpsc;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::requests::GenericRequestRouter;
|
||||
use satrs::pool::SharedStaticMemoryPool;
|
||||
use satrs::pus::verification::VerificationReporter;
|
||||
use satrs::pus::{
|
||||
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
|
||||
EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, PusPacketHandlingError,
|
||||
PusServiceHelper,
|
||||
};
|
||||
use satrs::request::GenericMessage;
|
||||
use satrs::{
|
||||
mode::{ModeAndSubmode, ModeReply, ModeRequest},
|
||||
pus::{
|
||||
mode::Subservice,
|
||||
verification::{
|
||||
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider,
|
||||
VerificationToken,
|
||||
},
|
||||
ActivePusRequestStd, ActiveRequestProvider, EcssTmSender, EcssTmtcError,
|
||||
GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
|
||||
},
|
||||
request::UniqueApidTargetId,
|
||||
spacepackets::{
|
||||
ecss::{
|
||||
tc::PusTcReader,
|
||||
tm::{PusTmCreator, PusTmSecondaryHeader},
|
||||
PusPacket,
|
||||
},
|
||||
SpHeader,
|
||||
},
|
||||
ComponentId,
|
||||
};
|
||||
use satrs_example::config::components::PUS_MODE_SERVICE;
|
||||
use satrs_example::config::{mode_err, tmtc_err, CustomPusServiceId};
|
||||
|
||||
use super::{
|
||||
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
|
||||
PusTargetedRequestService, TargetedPusService,
|
||||
};
|
||||
|
||||
#[derive(new)]
|
||||
pub struct ModeReplyHandler {
|
||||
owner_id: ComponentId,
|
||||
}
|
||||
|
||||
impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
|
||||
type Error = EcssTmtcError;
|
||||
|
||||
fn handle_unrequested_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<ModeReply>,
|
||||
_tm_sender: &impl EcssTmSender,
|
||||
) -> Result<(), Self::Error> {
|
||||
log::warn!("received unexpected reply for mode service 5: {reply:?}");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn handle_reply(
|
||||
&mut self,
|
||||
reply: &GenericMessage<ModeReply>,
|
||||
active_request: &ActivePusRequestStd,
|
||||
tm_sender: &impl EcssTmSender,
|
||||
verification_handler: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<bool, Self::Error> {
|
||||
let started_token: VerificationToken<TcStateStarted> = active_request
|
||||
.token()
|
||||
.try_into()
|
||||
.expect("invalid token state");
|
||||
match reply.message {
|
||||
ModeReply::ModeReply(mode_reply) => {
|
||||
let mut source_data: [u8; 12] = [0; 12];
|
||||
mode_reply
|
||||
.write_to_be_bytes(&mut source_data)
|
||||
.expect("writing mode reply failed");
|
||||
let req_id = verification::RequestId::from(reply.request_id());
|
||||
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0);
|
||||
let sec_header =
|
||||
PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp);
|
||||
let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true);
|
||||
tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?;
|
||||
verification_handler.completion_success(tm_sender, started_token, time_stamp)?;
|
||||
}
|
||||
ModeReply::CantReachMode(error_code) => {
|
||||
verification_handler.completion_failure(
|
||||
tm_sender,
|
||||
started_token,
|
||||
FailParams::new(time_stamp, &error_code, &[]),
|
||||
)?;
|
||||
}
|
||||
ModeReply::WrongMode { expected, reached } => {
|
||||
let mut error_info: [u8; 24] = [0; 24];
|
||||
let mut written_len = expected
|
||||
.write_to_be_bytes(&mut error_info[0..ModeAndSubmode::RAW_LEN])
|
||||
.expect("writing expected mode failed");
|
||||
written_len += reached
|
||||
.write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..])
|
||||
.expect("writing reached mode failed");
|
||||
verification_handler.completion_failure(
|
||||
tm_sender,
|
||||
started_token,
|
||||
FailParams::new(
|
||||
time_stamp,
|
||||
&mode_err::WRONG_MODE,
|
||||
&error_info[..written_len],
|
||||
),
|
||||
)?;
|
||||
}
|
||||
};
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn handle_request_timeout(
|
||||
&mut self,
|
||||
active_request: &ActivePusRequestStd,
|
||||
tm_sender: &impl EcssTmSender,
|
||||
verification_handler: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<(), Self::Error> {
|
||||
generic_pus_request_timeout_handler(
|
||||
tm_sender,
|
||||
active_request,
|
||||
verification_handler,
|
||||
time_stamp,
|
||||
"HK",
|
||||
)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ModeRequestConverter {}
|
||||
|
||||
impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestConverter {
|
||||
type Error = GenericConversionError;
|
||||
|
||||
fn convert(
|
||||
&mut self,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
tc: &PusTcReader,
|
||||
tm_sender: &(impl EcssTmSender + ?Sized),
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> {
|
||||
let subservice = tc.subservice();
|
||||
let user_data = tc.user_data();
|
||||
let not_enough_app_data = |expected: usize| {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
|
||||
)
|
||||
.expect("Sending start failure failed");
|
||||
Err(GenericConversionError::NotEnoughAppData {
|
||||
expected,
|
||||
found: user_data.len(),
|
||||
})
|
||||
};
|
||||
if user_data.len() < core::mem::size_of::<u32>() {
|
||||
return not_enough_app_data(4);
|
||||
}
|
||||
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
|
||||
let active_request =
|
||||
ActivePusRequestStd::new(target_id_and_apid.into(), token, Duration::from_secs(30));
|
||||
let subservice_typed = Subservice::try_from(subservice);
|
||||
let invalid_subservice = || {
|
||||
// Invalid subservice
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
tm_sender,
|
||||
token,
|
||||
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
|
||||
)
|
||||
.expect("Sending start failure failed");
|
||||
Err(GenericConversionError::InvalidSubservice(subservice))
|
||||
};
|
||||
if subservice_typed.is_err() {
|
||||
return invalid_subservice();
|
||||
}
|
||||
let subservice_typed = subservice_typed.unwrap();
|
||||
match subservice_typed {
|
||||
Subservice::TcSetMode => {
|
||||
if user_data.len() < core::mem::size_of::<u32>() + ModeAndSubmode::RAW_LEN {
|
||||
return not_enough_app_data(4 + ModeAndSubmode::RAW_LEN);
|
||||
}
|
||||
let mode_and_submode = ModeAndSubmode::from_be_bytes(&tc.user_data()[4..])
|
||||
.expect("mode and submode extraction failed");
|
||||
Ok((active_request, ModeRequest::SetMode(mode_and_submode)))
|
||||
}
|
||||
Subservice::TcReadMode => Ok((active_request, ModeRequest::ReadMode)),
|
||||
Subservice::TcAnnounceMode => Ok((active_request, ModeRequest::AnnounceMode)),
|
||||
Subservice::TcAnnounceModeRecursive => {
|
||||
Ok((active_request, ModeRequest::AnnounceModeRecursive))
|
||||
}
|
||||
_ => invalid_subservice(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_mode_service_static(
|
||||
tm_sender: PacketSenderWithSharedPool,
|
||||
tc_pool: SharedStaticMemoryPool,
|
||||
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
mode_router: GenericRequestRouter,
|
||||
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
|
||||
) -> ModeServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
|
||||
let mode_request_handler = PusTargetedRequestService::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_MODE_SERVICE.id(),
|
||||
pus_action_rx,
|
||||
tm_sender,
|
||||
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
|
||||
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
|
||||
),
|
||||
ModeRequestConverter::default(),
|
||||
DefaultActiveRequestMap::default(),
|
||||
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
|
||||
mode_router,
|
||||
reply_receiver,
|
||||
);
|
||||
ModeServiceWrapper {
|
||||
service: mode_request_handler,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_mode_service_dynamic(
|
||||
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
|
||||
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
mode_router: GenericRequestRouter,
|
||||
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
|
||||
) -> ModeServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
|
||||
let mode_request_handler = PusTargetedRequestService::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_MODE_SERVICE.id(),
|
||||
pus_action_rx,
|
||||
tm_funnel_tx,
|
||||
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
|
||||
EcssTcInVecConverter::default(),
|
||||
),
|
||||
ModeRequestConverter::default(),
|
||||
DefaultActiveRequestMap::default(),
|
||||
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
|
||||
mode_router,
|
||||
reply_receiver,
|
||||
);
|
||||
ModeServiceWrapper {
|
||||
service: mode_request_handler,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ModeServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
|
||||
pub(crate) service: PusTargetedRequestService<
|
||||
MpscTcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
ModeRequestConverter,
|
||||
ModeReplyHandler,
|
||||
DefaultActiveRequestMap<ActivePusRequestStd>,
|
||||
ActivePusRequestStd,
|
||||
ModeRequest,
|
||||
ModeReply,
|
||||
>,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
|
||||
for ModeServiceWrapper<TmSender, TcInMemConverter>
|
||||
{
|
||||
const SERVICE_ID: u8 = CustomPusServiceId::Mode as u8;
|
||||
const SERVICE_STR: &'static str = "mode";
|
||||
|
||||
delegate::delegate! {
|
||||
to self.service {
|
||||
fn poll_and_handle_next_tc(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, PusPacketHandlingError>;
|
||||
|
||||
fn poll_and_handle_next_reply(
|
||||
&mut self,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<HandlingStatus, EcssTmtcError>;
|
||||
|
||||
fn check_for_request_timeouts(&mut self);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};
|
||||
use satrs::request::MessageMetadata;
|
||||
use satrs::{
|
||||
mode::{ModeAndSubmode, ModeReply, ModeRequest},
|
||||
pus::mode::Subservice,
|
||||
request::GenericMessage,
|
||||
spacepackets::{
|
||||
ecss::tc::{PusTcCreator, PusTcSecondaryHeader},
|
||||
SpHeader,
|
||||
},
|
||||
};
|
||||
use satrs_example::config::tmtc_err;
|
||||
|
||||
use crate::pus::{
|
||||
mode::ModeReplyHandler,
|
||||
tests::{PusConverterTestbench, ReplyHandlerTestbench},
|
||||
};
|
||||
|
||||
use super::ModeRequestConverter;
|
||||
|
||||
#[test]
|
||||
fn mode_converter_read_mode_request() {
|
||||
let mut testbench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8);
|
||||
let mut app_data: [u8; 4] = [0; 4];
|
||||
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
|
||||
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
|
||||
let token = testbench.add_tc(&tc);
|
||||
let (_active_req, req) = testbench
|
||||
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
|
||||
.expect("conversion has failed");
|
||||
assert_eq!(req, ModeRequest::ReadMode);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mode_converter_set_mode_request() {
|
||||
let mut testbench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8);
|
||||
let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN];
|
||||
let mode_and_submode = ModeAndSubmode::new(2, 1);
|
||||
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
|
||||
mode_and_submode
|
||||
.write_to_be_bytes(&mut app_data[4..])
|
||||
.unwrap();
|
||||
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
|
||||
let token = testbench.add_tc(&tc);
|
||||
let (_active_req, req) = testbench
|
||||
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
|
||||
.expect("conversion has failed");
|
||||
assert_eq!(req, ModeRequest::SetMode(mode_and_submode));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mode_converter_announce_mode() {
|
||||
let mut testbench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8);
|
||||
let mut app_data: [u8; 4] = [0; 4];
|
||||
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
|
||||
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
|
||||
let token = testbench.add_tc(&tc);
|
||||
let (_active_req, req) = testbench
|
||||
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
|
||||
.expect("conversion has failed");
|
||||
assert_eq!(req, ModeRequest::AnnounceMode);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mode_converter_announce_mode_recursively() {
|
||||
let mut testbench =
|
||||
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let sec_header =
|
||||
PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8);
|
||||
let mut app_data: [u8; 4] = [0; 4];
|
||||
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
|
||||
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
|
||||
let token = testbench.add_tc(&tc);
|
||||
let (_active_req, req) = testbench
|
||||
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
|
||||
.expect("conversion has failed");
|
||||
assert_eq!(req, ModeRequest::AnnounceModeRecursive);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_unrequested_reply() {
|
||||
let mut testbench = ReplyHandlerTestbench::new(
|
||||
TEST_COMPONENT_ID_0.id(),
|
||||
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
|
||||
);
|
||||
let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1));
|
||||
let unrequested_reply =
|
||||
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply);
|
||||
// Right now this function does not do a lot. We simply check that it does not panic or do
|
||||
// weird stuff.
|
||||
let result = testbench.handle_unrequested_reply(&unrequested_reply);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reply_handling_reply_timeout() {
|
||||
let mut testbench = ReplyHandlerTestbench::new(
|
||||
TEST_COMPONENT_ID_0.id(),
|
||||
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
|
||||
);
|
||||
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
|
||||
let result = testbench.handle_request_timeout(&active_request, &[]);
|
||||
assert!(result.is_ok());
|
||||
testbench.verif_reporter.assert_completion_failure(
|
||||
TEST_COMPONENT_ID_0.raw(),
|
||||
req_id,
|
||||
None,
|
||||
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
|
||||
);
|
||||
}
|
||||
}
|
@ -1,74 +1,68 @@
|
||||
use std::sync::mpsc;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::pus::create_verification_reporter;
|
||||
use log::info;
|
||||
use satrs::pool::{PoolProvider, StaticMemoryPool};
|
||||
use log::{error, info, warn};
|
||||
use satrs::pool::{PoolProvider, StaticMemoryPool, StoreAddr};
|
||||
use satrs::pus::scheduler::{PusScheduler, TcInfo};
|
||||
use satrs::pus::scheduler_srv::PusSchedServiceHandler;
|
||||
use satrs::pus::verification::VerificationReporter;
|
||||
use satrs::pus::{
|
||||
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter,
|
||||
EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSender, MpscTcReceiver,
|
||||
MpscTmAsVecSender, PartialPusHandlingError, PusServiceHelper,
|
||||
use satrs::pus::scheduler_srv::PusService11SchedHandler;
|
||||
use satrs::pus::verification::std_mod::{
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
|
||||
};
|
||||
use satrs::spacepackets::ecss::PusServiceId;
|
||||
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool};
|
||||
use satrs::ComponentId;
|
||||
use satrs_example::config::components::PUS_SCHED_SERVICE;
|
||||
use satrs::pus::verification::VerificationReportingProvider;
|
||||
use satrs::pus::{
|
||||
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
|
||||
EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper,
|
||||
TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
|
||||
TmInSharedPoolSenderWithId,
|
||||
};
|
||||
use satrs::tmtc::tm_helper::SharedTmPool;
|
||||
use satrs::ChannelId;
|
||||
use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID};
|
||||
|
||||
use super::{DirectPusService, HandlingStatus};
|
||||
use crate::tmtc::PusTcSourceProviderSharedPool;
|
||||
|
||||
pub trait TcReleaser {
|
||||
fn release(&mut self, sender_id: ComponentId, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool;
|
||||
fn release(&mut self, enabled: bool, info: &TcInfo, tc: &[u8]) -> bool;
|
||||
}
|
||||
|
||||
impl TcReleaser for PacketSenderWithSharedPool {
|
||||
fn release(
|
||||
&mut self,
|
||||
sender_id: ComponentId,
|
||||
enabled: bool,
|
||||
_info: &TcInfo,
|
||||
tc: &[u8],
|
||||
) -> bool {
|
||||
impl TcReleaser for PusTcSourceProviderSharedPool {
|
||||
fn release(&mut self, enabled: bool, _info: &TcInfo, tc: &[u8]) -> bool {
|
||||
if enabled {
|
||||
let shared_pool = self.shared_pool.get_mut();
|
||||
// Transfer TC from scheduler TC pool to shared TC pool.
|
||||
let released_tc_addr = shared_pool
|
||||
.0
|
||||
let released_tc_addr = self
|
||||
.shared_pool
|
||||
.pool
|
||||
.write()
|
||||
.expect("locking pool failed")
|
||||
.add(tc)
|
||||
.expect("adding TC to shared pool failed");
|
||||
self.sender
|
||||
.send(PacketInPool::new(sender_id, released_tc_addr))
|
||||
self.tc_source
|
||||
.send(released_tc_addr)
|
||||
.expect("sending TC to TC source failed");
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl TcReleaser for mpsc::Sender<PacketAsVec> {
|
||||
fn release(
|
||||
&mut self,
|
||||
sender_id: ComponentId,
|
||||
enabled: bool,
|
||||
_info: &TcInfo,
|
||||
tc: &[u8],
|
||||
) -> bool {
|
||||
impl TcReleaser for mpsc::Sender<Vec<u8>> {
|
||||
fn release(&mut self, enabled: bool, _info: &TcInfo, tc: &[u8]) -> bool {
|
||||
if enabled {
|
||||
// Send released TC to centralized TC source.
|
||||
self.send(PacketAsVec::new(sender_id, tc.to_vec()))
|
||||
self.send(tc.to_vec())
|
||||
.expect("sending TC to TC source failed");
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
pub struct SchedulingServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
|
||||
{
|
||||
pub pus_11_handler: PusSchedServiceHandler<
|
||||
MpscTcReceiver,
|
||||
pub struct Pus11Wrapper<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> {
|
||||
pub pus_11_handler: PusService11SchedHandler<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
@ -79,68 +73,16 @@ pub struct SchedulingServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: Ec
|
||||
pub tc_releaser: Box<dyn TcReleaser + Send>,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
|
||||
for SchedulingServiceWrapper<TmSender, TcInMemConverter>
|
||||
{
|
||||
const SERVICE_ID: u8 = PusServiceId::Verification as u8;
|
||||
|
||||
const SERVICE_STR: &'static str = "verification";
|
||||
|
||||
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> HandlingStatus {
|
||||
let error_handler = |partial_error: &PartialPusHandlingError| {
|
||||
log::warn!(
|
||||
"PUS {}({}) partial error: {:?}",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
partial_error
|
||||
);
|
||||
};
|
||||
|
||||
let result = self.pus_11_handler.poll_and_handle_next_tc(
|
||||
error_handler,
|
||||
time_stamp,
|
||||
&mut self.sched_tc_pool,
|
||||
);
|
||||
if let Err(e) = result {
|
||||
log::warn!(
|
||||
"PUS {}({}) error: {:?}",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
e
|
||||
);
|
||||
// To avoid permanent loops on continuous errors.
|
||||
return HandlingStatus::Empty;
|
||||
}
|
||||
match result.unwrap() {
|
||||
DirectPusPacketHandlerResult::Handled(handling_status) => return handling_status,
|
||||
DirectPusPacketHandlerResult::CustomSubservice(subservice, _) => {
|
||||
log::warn!(
|
||||
"PUS {}({}) subservice {} not implemented",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
subservice
|
||||
);
|
||||
}
|
||||
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
|
||||
log::warn!(
|
||||
"PUS {}({}) subservice {} not implemented",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
subservice
|
||||
);
|
||||
}
|
||||
}
|
||||
HandlingStatus::HandledOne
|
||||
}
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
|
||||
SchedulingServiceWrapper<TmSender, TcInMemConverter>
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> Pus11Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
|
||||
{
|
||||
pub fn release_tcs(&mut self) {
|
||||
let id = self.pus_11_handler.service_helper.id();
|
||||
let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool {
|
||||
self.tc_releaser.release(id, enabled, info, tc)
|
||||
self.tc_releaser.release(enabled, info, tc)
|
||||
};
|
||||
|
||||
self.pus_11_handler
|
||||
@ -160,27 +102,69 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
|
||||
info!("{released_tcs} TC(s) released from scheduler");
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_next_packet(&mut self) -> bool {
|
||||
match self.pus_11_handler.handle_one_tc(&mut self.sched_tc_pool) {
|
||||
Ok(result) => match result {
|
||||
PusPacketHandlerResult::RequestHandled => {}
|
||||
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
|
||||
warn!("PUS11 partial packet handling success: {e:?}")
|
||||
}
|
||||
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
|
||||
warn!("PUS11 invalid subservice {invalid}");
|
||||
}
|
||||
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
|
||||
warn!("PUS11: Subservice {subservice} not implemented");
|
||||
}
|
||||
PusPacketHandlerResult::Empty => {
|
||||
return true;
|
||||
}
|
||||
},
|
||||
Err(error) => {
|
||||
error!("PUS packet handling error: {error:?}")
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_scheduler_service_static(
|
||||
tm_sender: PacketSenderWithSharedPool,
|
||||
tc_releaser: PacketSenderWithSharedPool,
|
||||
shared_tm_store: SharedTmPool,
|
||||
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
|
||||
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
tc_releaser: PusTcSourceProviderSharedPool,
|
||||
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
sched_tc_pool: StaticMemoryPool,
|
||||
) -> SchedulingServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
|
||||
) -> Pus11Wrapper<
|
||||
MpscTcReceiver,
|
||||
TmInSharedPoolSenderWithBoundedMpsc,
|
||||
EcssTcInSharedStoreConverter,
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
> {
|
||||
let sched_srv_tm_sender = TmInSharedPoolSenderWithId::new(
|
||||
TmSenderId::PusSched as ChannelId,
|
||||
"PUS_11_TM_SENDER",
|
||||
shared_tm_store.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
);
|
||||
let sched_srv_receiver = MpscTcReceiver::new(
|
||||
TcReceiverId::PusSched as ChannelId,
|
||||
"PUS_11_TC_RECV",
|
||||
pus_sched_rx,
|
||||
);
|
||||
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
|
||||
.expect("Creating PUS Scheduler failed");
|
||||
let pus_11_handler = PusSchedServiceHandler::new(
|
||||
let pus_11_handler = PusService11SchedHandler::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_SCHED_SERVICE.id(),
|
||||
pus_sched_rx,
|
||||
tm_sender,
|
||||
create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
|
||||
EcssTcInSharedStoreConverter::new(tc_releaser.shared_packet_store().0.clone(), 2048),
|
||||
sched_srv_receiver,
|
||||
sched_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInSharedStoreConverter::new(tc_releaser.clone_backing_pool(), 2048),
|
||||
),
|
||||
scheduler,
|
||||
);
|
||||
SchedulingServiceWrapper {
|
||||
Pus11Wrapper {
|
||||
pus_11_handler,
|
||||
sched_tc_pool,
|
||||
releaser_buf: [0; 4096],
|
||||
@ -189,26 +173,40 @@ pub fn create_scheduler_service_static(
|
||||
}
|
||||
|
||||
pub fn create_scheduler_service_dynamic(
|
||||
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
|
||||
tc_source_sender: mpsc::Sender<PacketAsVec>,
|
||||
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
|
||||
verif_reporter: VerificationReporterWithVecMpscSender,
|
||||
tc_source_sender: mpsc::Sender<Vec<u8>>,
|
||||
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
sched_tc_pool: StaticMemoryPool,
|
||||
) -> SchedulingServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
|
||||
//let sched_srv_receiver =
|
||||
//MpscTcReceiver::new(PUS_SCHED_SERVICE.raw(), "PUS_11_TC_RECV", pus_sched_rx);
|
||||
) -> Pus11Wrapper<
|
||||
MpscTcReceiver,
|
||||
TmAsVecSenderWithMpsc,
|
||||
EcssTcInVecConverter,
|
||||
VerificationReporterWithVecMpscSender,
|
||||
> {
|
||||
let sched_srv_tm_sender = TmAsVecSenderWithId::new(
|
||||
TmSenderId::PusSched as ChannelId,
|
||||
"PUS_11_TM_SENDER",
|
||||
tm_funnel_tx,
|
||||
);
|
||||
let sched_srv_receiver = MpscTcReceiver::new(
|
||||
TcReceiverId::PusSched as ChannelId,
|
||||
"PUS_11_TC_RECV",
|
||||
pus_sched_rx,
|
||||
);
|
||||
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
|
||||
.expect("Creating PUS Scheduler failed");
|
||||
let pus_11_handler = PusSchedServiceHandler::new(
|
||||
let pus_11_handler = PusService11SchedHandler::new(
|
||||
PusServiceHelper::new(
|
||||
PUS_SCHED_SERVICE.id(),
|
||||
pus_sched_rx,
|
||||
tm_funnel_tx,
|
||||
create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
|
||||
sched_srv_receiver,
|
||||
sched_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInVecConverter::default(),
|
||||
),
|
||||
scheduler,
|
||||
);
|
||||
SchedulingServiceWrapper {
|
||||
Pus11Wrapper {
|
||||
pus_11_handler,
|
||||
sched_tc_pool,
|
||||
releaser_buf: [0; 4096],
|
||||
|
@ -1,95 +1,71 @@
|
||||
use crate::pus::mode::ModeServiceWrapper;
|
||||
use derive_new::new;
|
||||
use satrs::{
|
||||
pus::{EcssTcInMemConverter, EcssTmSender},
|
||||
spacepackets::time::{cds, TimeWriter},
|
||||
use satrs::pus::{
|
||||
verification::VerificationReportingProvider, EcssTcInMemConverter, EcssTcReceiverCore,
|
||||
EcssTmSenderCore,
|
||||
};
|
||||
|
||||
use super::{
|
||||
action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
|
||||
scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, DirectPusService,
|
||||
HandlingStatus, TargetedPusService,
|
||||
action::Pus8Wrapper, event::Pus5Wrapper, hk::Pus3Wrapper, scheduler::Pus11Wrapper,
|
||||
test::Service17CustomWrapper,
|
||||
};
|
||||
|
||||
// TODO: For better extensibility, we could create 2 vectors: One for direct PUS services and one
|
||||
// for targeted services..
|
||||
#[derive(new)]
|
||||
pub struct PusStack<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> {
|
||||
test_srv: TestCustomServiceWrapper<TmSender, TcInMemConverter>,
|
||||
hk_srv_wrapper: HkServiceWrapper<TmSender, TcInMemConverter>,
|
||||
event_srv: EventServiceWrapper<TmSender, TcInMemConverter>,
|
||||
action_srv_wrapper: ActionServiceWrapper<TmSender, TcInMemConverter>,
|
||||
schedule_srv: SchedulingServiceWrapper<TmSender, TcInMemConverter>,
|
||||
mode_srv: ModeServiceWrapper<TmSender, TcInMemConverter>,
|
||||
pub struct PusStack<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> {
|
||||
event_srv: Pus5Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
hk_srv: Pus3Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
action_srv: Pus8Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
schedule_srv: Pus11Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
test_srv: Service17CustomWrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
|
||||
PusStack<TmSender, TcInMemConverter>
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> PusStack<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
|
||||
{
|
||||
pub fn new(
|
||||
hk_srv: Pus3Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
event_srv: Pus5Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
action_srv: Pus8Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
schedule_srv: Pus11Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
test_srv: Service17CustomWrapper<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
>,
|
||||
) -> Self {
|
||||
Self {
|
||||
event_srv,
|
||||
action_srv,
|
||||
schedule_srv,
|
||||
test_srv,
|
||||
hk_srv,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
// Release all telecommands which reached their release time before calling the service
|
||||
// handlers.
|
||||
self.schedule_srv.release_tcs();
|
||||
let timestamp = cds::CdsTime::now_with_u16_days()
|
||||
.expect("time stamp generation error")
|
||||
.to_vec()
|
||||
.unwrap();
|
||||
let mut loop_count = 0_u32;
|
||||
// Hot loop which will run continuously until all request and reply handling is done.
|
||||
loop {
|
||||
let mut nothing_to_do = true;
|
||||
Self::direct_service_checker(&mut self.test_srv, ×tamp, &mut nothing_to_do);
|
||||
Self::direct_service_checker(&mut self.schedule_srv, ×tamp, &mut nothing_to_do);
|
||||
Self::direct_service_checker(&mut self.event_srv, ×tamp, &mut nothing_to_do);
|
||||
Self::targeted_service_checker(
|
||||
&mut self.action_srv_wrapper,
|
||||
×tamp,
|
||||
&mut nothing_to_do,
|
||||
);
|
||||
Self::targeted_service_checker(
|
||||
&mut self.hk_srv_wrapper,
|
||||
×tamp,
|
||||
&mut nothing_to_do,
|
||||
);
|
||||
Self::targeted_service_checker(&mut self.mode_srv, ×tamp, &mut nothing_to_do);
|
||||
if nothing_to_do {
|
||||
// Timeout checking is only done once.
|
||||
self.action_srv_wrapper.check_for_request_timeouts();
|
||||
self.hk_srv_wrapper.check_for_request_timeouts();
|
||||
self.mode_srv.check_for_request_timeouts();
|
||||
let mut all_queues_empty = true;
|
||||
let mut is_srv_finished = |srv_handler_finished: bool| {
|
||||
if !srv_handler_finished {
|
||||
all_queues_empty = false;
|
||||
}
|
||||
};
|
||||
is_srv_finished(self.test_srv.handle_next_packet());
|
||||
is_srv_finished(self.schedule_srv.handle_next_packet());
|
||||
is_srv_finished(self.event_srv.handle_next_packet());
|
||||
is_srv_finished(self.action_srv.handle_next_packet());
|
||||
is_srv_finished(self.hk_srv.handle_next_packet());
|
||||
if all_queues_empty {
|
||||
break;
|
||||
}
|
||||
// Safety mechanism to avoid infinite loops.
|
||||
loop_count += 1;
|
||||
if loop_count >= 500 {
|
||||
log::warn!("reached PUS stack loop count 500, breaking");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn direct_service_checker<S: DirectPusService>(
|
||||
service: &mut S,
|
||||
timestamp: &[u8],
|
||||
nothing_to_do: &mut bool,
|
||||
) {
|
||||
let handling_status = service.poll_and_handle_next_tc(timestamp);
|
||||
if handling_status == HandlingStatus::HandledOne {
|
||||
*nothing_to_do = false;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn targeted_service_checker<S: TargetedPusService>(
|
||||
service: &mut S,
|
||||
timestamp: &[u8],
|
||||
nothing_to_do: &mut bool,
|
||||
) {
|
||||
let request_handling = service.poll_and_handle_next_tc_default_handler(timestamp);
|
||||
let reply_handling = service.poll_and_handle_next_reply_default_handler(timestamp);
|
||||
if request_handling == HandlingStatus::HandledOne
|
||||
|| reply_handling == HandlingStatus::HandledOne
|
||||
{
|
||||
*nothing_to_do = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,158 +1,175 @@
|
||||
use crate::pus::create_verification_reporter;
|
||||
use log::info;
|
||||
use satrs::event_man::{EventMessage, EventMessageU32};
|
||||
use satrs::pool::SharedStaticMemoryPool;
|
||||
use log::{info, warn};
|
||||
use satrs::params::Params;
|
||||
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
|
||||
use satrs::pus::test::PusService17TestHandler;
|
||||
use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
|
||||
use satrs::pus::{
|
||||
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter,
|
||||
EcssTmSender, MpscTcReceiver, MpscTmAsVecSender, PusServiceHelper,
|
||||
use satrs::pus::verification::{FailParams, VerificationReportingProvider};
|
||||
use satrs::pus::verification::{
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
|
||||
};
|
||||
use satrs::pus::{
|
||||
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTcReceiverCore,
|
||||
EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper,
|
||||
TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
|
||||
TmInSharedPoolSenderWithId,
|
||||
};
|
||||
use satrs::pus::{EcssTcInSharedStoreConverter, PartialPusHandlingError};
|
||||
use satrs::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
|
||||
use satrs::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
|
||||
use satrs_example::config::components::PUS_TEST_SERVICE;
|
||||
use satrs_example::config::{tmtc_err, TEST_EVENT};
|
||||
use std::sync::mpsc;
|
||||
|
||||
use super::{DirectPusService, HandlingStatus};
|
||||
use satrs::spacepackets::ecss::PusPacket;
|
||||
use satrs::spacepackets::time::cds::CdsTime;
|
||||
use satrs::spacepackets::time::TimeWriter;
|
||||
use satrs::tmtc::tm_helper::SharedTmPool;
|
||||
use satrs::ChannelId;
|
||||
use satrs::{events::EventU32, pus::EcssTcInSharedStoreConverter};
|
||||
use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID, TEST_EVENT};
|
||||
use std::sync::mpsc::{self, Sender};
|
||||
|
||||
pub fn create_test_service_static(
|
||||
tm_sender: PacketSenderWithSharedPool,
|
||||
shared_tm_store: SharedTmPool,
|
||||
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
|
||||
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
tc_pool: SharedStaticMemoryPool,
|
||||
event_sender: mpsc::SyncSender<EventMessageU32>,
|
||||
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
|
||||
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
) -> TestCustomServiceWrapper<PacketSenderWithSharedPool, EcssTcInSharedStoreConverter> {
|
||||
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
|
||||
PUS_TEST_SERVICE.id(),
|
||||
) -> Service17CustomWrapper<
|
||||
MpscTcReceiver,
|
||||
TmInSharedPoolSenderWithBoundedMpsc,
|
||||
EcssTcInSharedStoreConverter,
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
> {
|
||||
let test_srv_tm_sender = TmInSharedPoolSenderWithId::new(
|
||||
TmSenderId::PusTest as ChannelId,
|
||||
"PUS_17_TM_SENDER",
|
||||
shared_tm_store.clone(),
|
||||
tm_funnel_tx.clone(),
|
||||
);
|
||||
let test_srv_receiver = MpscTcReceiver::new(
|
||||
TcReceiverId::PusTest as ChannelId,
|
||||
"PUS_17_TC_RECV",
|
||||
pus_test_rx,
|
||||
tm_sender,
|
||||
create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
|
||||
);
|
||||
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
|
||||
test_srv_receiver,
|
||||
test_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
|
||||
));
|
||||
TestCustomServiceWrapper {
|
||||
handler: pus17_handler,
|
||||
event_tx: event_sender,
|
||||
Service17CustomWrapper {
|
||||
pus17_handler,
|
||||
test_srv_event_sender: event_sender,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn create_test_service_dynamic(
|
||||
tm_funnel_tx: mpsc::Sender<PacketAsVec>,
|
||||
event_sender: mpsc::SyncSender<EventMessageU32>,
|
||||
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
|
||||
verif_reporter: VerificationReporterWithVecMpscSender,
|
||||
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
|
||||
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
|
||||
) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
|
||||
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
|
||||
PUS_TEST_SERVICE.id(),
|
||||
) -> Service17CustomWrapper<
|
||||
MpscTcReceiver,
|
||||
TmAsVecSenderWithMpsc,
|
||||
EcssTcInVecConverter,
|
||||
VerificationReporterWithVecMpscSender,
|
||||
> {
|
||||
let test_srv_tm_sender = TmAsVecSenderWithId::new(
|
||||
TmSenderId::PusTest as ChannelId,
|
||||
"PUS_17_TM_SENDER",
|
||||
tm_funnel_tx.clone(),
|
||||
);
|
||||
let test_srv_receiver = MpscTcReceiver::new(
|
||||
TcReceiverId::PusTest as ChannelId,
|
||||
"PUS_17_TC_RECV",
|
||||
pus_test_rx,
|
||||
tm_funnel_tx,
|
||||
create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
|
||||
);
|
||||
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
|
||||
test_srv_receiver,
|
||||
test_srv_tm_sender,
|
||||
PUS_APID,
|
||||
verif_reporter.clone(),
|
||||
EcssTcInVecConverter::default(),
|
||||
));
|
||||
TestCustomServiceWrapper {
|
||||
handler: pus17_handler,
|
||||
event_tx: event_sender,
|
||||
Service17CustomWrapper {
|
||||
pus17_handler,
|
||||
test_srv_event_sender: event_sender,
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TestCustomServiceWrapper<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter>
|
||||
{
|
||||
pub handler:
|
||||
PusService17TestHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
pub event_tx: mpsc::SyncSender<EventMessageU32>,
|
||||
pub struct Service17CustomWrapper<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> {
|
||||
pub pus17_handler:
|
||||
PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
pub test_srv_event_sender: Sender<(EventU32, Option<Params>)>,
|
||||
}
|
||||
|
||||
impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusService
|
||||
for TestCustomServiceWrapper<TmSender, TcInMemConverter>
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> Service17CustomWrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
|
||||
{
|
||||
const SERVICE_ID: u8 = PusServiceId::Test as u8;
|
||||
|
||||
const SERVICE_STR: &'static str = "test";
|
||||
|
||||
fn poll_and_handle_next_tc(&mut self, timestamp: &[u8]) -> HandlingStatus {
|
||||
let error_handler = |partial_error: &PartialPusHandlingError| {
|
||||
log::warn!(
|
||||
"PUS {}({}) partial error: {:?}",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
partial_error
|
||||
);
|
||||
};
|
||||
let res = self
|
||||
.handler
|
||||
.poll_and_handle_next_tc(error_handler, timestamp);
|
||||
if let Err(e) = res {
|
||||
log::warn!(
|
||||
"PUS {}({}) error: {:?}",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
e
|
||||
);
|
||||
// To avoid permanent loops on continuous errors.
|
||||
return HandlingStatus::Empty;
|
||||
pub fn handle_next_packet(&mut self) -> bool {
|
||||
let res = self.pus17_handler.handle_one_tc();
|
||||
if res.is_err() {
|
||||
warn!("PUS17 handler failed with error {:?}", res.unwrap_err());
|
||||
return true;
|
||||
}
|
||||
match res.unwrap() {
|
||||
DirectPusPacketHandlerResult::Handled(handling_status) => {
|
||||
if handling_status == HandlingStatus::HandledOne {
|
||||
info!("Received PUS ping command TC[17,1]");
|
||||
info!("Sent ping reply PUS TM[17,2]");
|
||||
}
|
||||
return handling_status;
|
||||
PusPacketHandlerResult::RequestHandled => {
|
||||
info!("Received PUS ping command TC[17,1]");
|
||||
info!("Sent ping reply PUS TM[17,2]");
|
||||
}
|
||||
DirectPusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
|
||||
log::warn!(
|
||||
"PUS {}({}) subservice {} not implemented",
|
||||
Self::SERVICE_ID,
|
||||
Self::SERVICE_STR,
|
||||
subservice
|
||||
PusPacketHandlerResult::RequestHandledPartialSuccess(partial_err) => {
|
||||
warn!(
|
||||
"Handled PUS ping command with partial success: {:?}",
|
||||
partial_err
|
||||
);
|
||||
}
|
||||
DirectPusPacketHandlerResult::CustomSubservice(subservice, token) => {
|
||||
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
|
||||
warn!("PUS17: Subservice {subservice} not implemented")
|
||||
}
|
||||
PusPacketHandlerResult::CustomSubservice(subservice, token) => {
|
||||
let (tc, _) = PusTcReader::new(
|
||||
self.handler
|
||||
self.pus17_handler
|
||||
.service_helper
|
||||
.tc_in_mem_converter
|
||||
.tc_slice_raw(),
|
||||
)
|
||||
.unwrap();
|
||||
let time_stamper = CdsTime::now_with_u16_days().unwrap();
|
||||
let mut stamp_buf: [u8; 7] = [0; 7];
|
||||
time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
|
||||
if subservice == 128 {
|
||||
info!("generating test event");
|
||||
self.event_tx
|
||||
.send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
|
||||
info!("Generating test event");
|
||||
self.test_srv_event_sender
|
||||
.send((TEST_EVENT.into(), None))
|
||||
.expect("Sending test event failed");
|
||||
match self.handler.service_helper.verif_reporter().start_success(
|
||||
self.handler.service_helper.tm_sender(),
|
||||
token,
|
||||
timestamp,
|
||||
) {
|
||||
Ok(started_token) => {
|
||||
if let Err(e) = self
|
||||
.handler
|
||||
.service_helper
|
||||
.verif_reporter()
|
||||
.completion_success(
|
||||
self.handler.service_helper.tm_sender(),
|
||||
started_token,
|
||||
timestamp,
|
||||
)
|
||||
{
|
||||
error_handler(&PartialPusHandlingError::Verification(e));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error_handler(&PartialPusHandlingError::Verification(e));
|
||||
}
|
||||
}
|
||||
let start_token = self
|
||||
.pus17_handler
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.start_success(token, &stamp_buf)
|
||||
.expect("Error sending start success");
|
||||
self.pus17_handler
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.completion_success(start_token, &stamp_buf)
|
||||
.expect("Error sending completion success");
|
||||
} else {
|
||||
let fail_data = [tc.subservice()];
|
||||
self.handler
|
||||
self.pus17_handler
|
||||
.service_helper
|
||||
.verif_reporter()
|
||||
.common
|
||||
.verification_handler
|
||||
.start_failure(
|
||||
self.handler.service_helper.tm_sender(),
|
||||
token,
|
||||
FailParams::new(
|
||||
timestamp,
|
||||
&stamp_buf,
|
||||
&tmtc_err::INVALID_PUS_SUBSERVICE,
|
||||
&fail_data,
|
||||
),
|
||||
@ -160,7 +177,10 @@ impl<TmSender: EcssTmSender, TcInMemConverter: EcssTcInMemConverter> DirectPusSe
|
||||
.expect("Sending start failure verification failed");
|
||||
}
|
||||
}
|
||||
PusPacketHandlerResult::Empty => {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
HandlingStatus::HandledOne
|
||||
false
|
||||
}
|
||||
}
|
||||
|
45
satrs-example/src/queue.rs
Normal file
45
satrs-example/src/queue.rs
Normal file
@ -0,0 +1,45 @@
|
||||
/// Generic error type for sending something via a message queue.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum GenericSendError {
|
||||
RxDisconnected,
|
||||
QueueFull(Option<u32>),
|
||||
}
|
||||
|
||||
impl Display for GenericSendError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
GenericSendError::RxDisconnected => {
|
||||
write!(f, "rx side has disconnected")
|
||||
}
|
||||
GenericSendError::QueueFull(max_cap) => {
|
||||
write!(f, "queue with max capacity of {max_cap:?} is full")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl Error for GenericSendError {}
|
||||
|
||||
/// Generic error type for sending something via a message queue.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub enum GenericRecvError {
|
||||
Empty,
|
||||
TxDisconnected,
|
||||
}
|
||||
|
||||
impl Display for GenericRecvError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
Self::TxDisconnected => {
|
||||
write!(f, "tx side has disconnected")
|
||||
}
|
||||
Self::Empty => {
|
||||
write!(f, "nothing to receive")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl Error for GenericRecvError {}
|
@ -1,152 +1,94 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::mpsc;
|
||||
|
||||
use log::warn;
|
||||
use derive_new::new;
|
||||
use satrs::action::ActionRequest;
|
||||
use satrs::hk::HkRequest;
|
||||
use satrs::mode::ModeRequest;
|
||||
use satrs::pus::verification::{
|
||||
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
|
||||
};
|
||||
use satrs::pus::{ActiveRequestProvider, EcssTmSender, GenericRoutingError, PusRequestRouter};
|
||||
use satrs::pus::action::PusActionRequestRouter;
|
||||
use satrs::pus::hk::PusHkRequestRouter;
|
||||
use satrs::pus::verification::{TcStateAccepted, VerificationToken};
|
||||
use satrs::pus::GenericRoutingError;
|
||||
use satrs::queue::GenericSendError;
|
||||
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
|
||||
use satrs::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs::spacepackets::ecss::PusPacket;
|
||||
use satrs::ComponentId;
|
||||
use satrs_example::config::components::PUS_ROUTING_SERVICE;
|
||||
use satrs_example::config::tmtc_err;
|
||||
use satrs::TargetId;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
#[allow(dead_code)]
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
#[non_exhaustive]
|
||||
pub enum CompositeRequest {
|
||||
pub enum Request {
|
||||
Hk(HkRequest),
|
||||
Mode(ModeRequest),
|
||||
Action(ActionRequest),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct GenericRequestRouter {
|
||||
pub id: ComponentId,
|
||||
// All messages which do not have a dedicated queue.
|
||||
pub composite_router_map: HashMap<ComponentId, mpsc::Sender<GenericMessage<CompositeRequest>>>,
|
||||
pub mode_router_map: HashMap<ComponentId, mpsc::Sender<GenericMessage<ModeRequest>>>,
|
||||
#[derive(Clone, Eq, PartialEq, Debug, new)]
|
||||
pub struct TargetedRequest {
|
||||
pub(crate) target_id: TargetId,
|
||||
pub(crate) request: Request,
|
||||
}
|
||||
|
||||
impl Default for GenericRequestRouter {
|
||||
fn default() -> Self {
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub struct RequestWithToken {
|
||||
pub(crate) targeted_request: TargetedRequest,
|
||||
pub(crate) token: VerificationToken<TcStateAccepted>,
|
||||
}
|
||||
|
||||
impl RequestWithToken {
|
||||
pub fn new(
|
||||
target_id: TargetId,
|
||||
request: Request,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
) -> Self {
|
||||
Self {
|
||||
id: PUS_ROUTING_SERVICE.raw(),
|
||||
composite_router_map: Default::default(),
|
||||
mode_router_map: Default::default(),
|
||||
targeted_request: TargetedRequest::new(target_id, request),
|
||||
token,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl GenericRequestRouter {
|
||||
pub(crate) fn handle_error_generic(
|
||||
&self,
|
||||
active_request: &impl ActiveRequestProvider,
|
||||
tc: &PusTcReader,
|
||||
error: GenericRoutingError,
|
||||
tm_sender: &(impl EcssTmSender + ?Sized),
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
time_stamp: &[u8],
|
||||
) {
|
||||
warn!(
|
||||
"Routing request for service {} failed: {error:?}",
|
||||
tc.service()
|
||||
);
|
||||
let accepted_token: VerificationToken<TcStateAccepted> = active_request
|
||||
.token()
|
||||
.try_into()
|
||||
.expect("token is not in accepted state");
|
||||
match error {
|
||||
GenericRoutingError::UnknownTargetId(id) => {
|
||||
let apid_target_id = UniqueApidTargetId::from(id);
|
||||
warn!("Target APID for request: {}", apid_target_id.apid);
|
||||
warn!("Target Unique ID for request: {}", apid_target_id.unique_id);
|
||||
let mut fail_data: [u8; 8] = [0; 8];
|
||||
fail_data.copy_from_slice(&id.to_be_bytes());
|
||||
verif_reporter
|
||||
.completion_failure(
|
||||
tm_sender,
|
||||
accepted_token,
|
||||
FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data),
|
||||
)
|
||||
.expect("Sending start failure failed");
|
||||
}
|
||||
GenericRoutingError::Send(_) => {
|
||||
let mut fail_data: [u8; 8] = [0; 8];
|
||||
fail_data.copy_from_slice(&active_request.target_id().to_be_bytes());
|
||||
verif_reporter
|
||||
.completion_failure(
|
||||
tm_sender,
|
||||
accepted_token,
|
||||
FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data),
|
||||
)
|
||||
.expect("Sending start failure failed");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
impl PusRequestRouter<HkRequest> for GenericRequestRouter {
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct GenericRequestRouter(pub HashMap<TargetId, mpsc::Sender<RequestWithToken>>);
|
||||
|
||||
impl PusHkRequestRouter for GenericRequestRouter {
|
||||
type Error = GenericRoutingError;
|
||||
|
||||
fn route(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
target_id: ComponentId,
|
||||
target_id: TargetId,
|
||||
hk_request: HkRequest,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
) -> Result<(), Self::Error> {
|
||||
if let Some(sender) = self.composite_router_map.get(&target_id) {
|
||||
if let Some(sender) = self.0.get(&target_id) {
|
||||
sender
|
||||
.send(GenericMessage::new(
|
||||
requestor_info,
|
||||
CompositeRequest::Hk(hk_request),
|
||||
.send(RequestWithToken::new(
|
||||
target_id,
|
||||
Request::Hk(hk_request),
|
||||
token,
|
||||
))
|
||||
.map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?;
|
||||
return Ok(());
|
||||
.map_err(|_| GenericRoutingError::SendError(GenericSendError::RxDisconnected))?;
|
||||
}
|
||||
Err(GenericRoutingError::UnknownTargetId(target_id))
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl PusRequestRouter<ActionRequest> for GenericRequestRouter {
|
||||
impl PusActionRequestRouter for GenericRequestRouter {
|
||||
type Error = GenericRoutingError;
|
||||
|
||||
fn route(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
target_id: ComponentId,
|
||||
target_id: TargetId,
|
||||
action_request: ActionRequest,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
) -> Result<(), Self::Error> {
|
||||
if let Some(sender) = self.composite_router_map.get(&target_id) {
|
||||
if let Some(sender) = self.0.get(&target_id) {
|
||||
sender
|
||||
.send(GenericMessage::new(
|
||||
requestor_info,
|
||||
CompositeRequest::Action(action_request),
|
||||
.send(RequestWithToken::new(
|
||||
target_id,
|
||||
Request::Action(action_request),
|
||||
token,
|
||||
))
|
||||
.map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?;
|
||||
return Ok(());
|
||||
.map_err(|_| GenericRoutingError::SendError(GenericSendError::RxDisconnected))?;
|
||||
}
|
||||
Err(GenericRoutingError::UnknownTargetId(target_id))
|
||||
}
|
||||
}
|
||||
|
||||
impl PusRequestRouter<ModeRequest> for GenericRequestRouter {
|
||||
type Error = GenericRoutingError;
|
||||
|
||||
fn route(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
target_id: ComponentId,
|
||||
request: ModeRequest,
|
||||
) -> Result<(), Self::Error> {
|
||||
if let Some(sender) = self.mode_router_map.get(&target_id) {
|
||||
sender
|
||||
.send(GenericMessage::new(requestor_info, request))
|
||||
.map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?;
|
||||
return Ok(());
|
||||
}
|
||||
Err(GenericRoutingError::UnknownTargetId(target_id))
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
133
satrs-example/src/tcp.rs
Normal file
133
satrs-example/src/tcp.rs
Normal file
@ -0,0 +1,133 @@
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
|
||||
use log::{info, warn};
|
||||
use satrs::{
|
||||
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer},
|
||||
pus::ReceivesEcssPusTc,
|
||||
spacepackets::PacketId,
|
||||
tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, TmPacketSourceCore},
|
||||
};
|
||||
use satrs_example::config::PUS_APID;
|
||||
|
||||
use crate::ccsds::CcsdsReceiver;
|
||||
|
||||
pub const PACKET_ID_LOOKUP: &[PacketId] = &[PacketId::const_tc(true, PUS_APID)];
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub struct SyncTcpTmSource {
|
||||
tm_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
max_packets_stored: usize,
|
||||
pub silent_packet_overwrite: bool,
|
||||
}
|
||||
|
||||
impl SyncTcpTmSource {
|
||||
pub fn new(max_packets_stored: usize) -> Self {
|
||||
Self {
|
||||
tm_queue: Arc::default(),
|
||||
max_packets_stored,
|
||||
silent_packet_overwrite: true,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_tm(&mut self, tm: &[u8]) {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failec");
|
||||
if tm_queue.len() > self.max_packets_stored {
|
||||
if !self.silent_packet_overwrite {
|
||||
warn!("TPC TM source is full, deleting oldest packet");
|
||||
}
|
||||
tm_queue.pop_front();
|
||||
}
|
||||
tm_queue.push_back(tm.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
impl TmPacketSourceCore for SyncTcpTmSource {
|
||||
type Error = ();
|
||||
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failed");
|
||||
if !tm_queue.is_empty() {
|
||||
let next_vec = tm_queue.front().unwrap();
|
||||
if buffer.len() < next_vec.len() {
|
||||
panic!(
|
||||
"provided buffer too small, must be at least {} bytes",
|
||||
next_vec.len()
|
||||
);
|
||||
}
|
||||
let next_vec = tm_queue.pop_front().unwrap();
|
||||
buffer[0..next_vec.len()].copy_from_slice(&next_vec);
|
||||
if next_vec.len() > 9 {
|
||||
let service = next_vec[7];
|
||||
let subservice = next_vec[8];
|
||||
info!("Sending PUS TM[{service},{subservice}]")
|
||||
} else {
|
||||
info!("Sending PUS TM");
|
||||
}
|
||||
return Ok(next_vec.len());
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
pub type TcpServerType<TcSource, MpscErrorType> = TcpSpacepacketsServer<
|
||||
(),
|
||||
CcsdsError<MpscErrorType>,
|
||||
SyncTcpTmSource,
|
||||
CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
|
||||
>;
|
||||
|
||||
pub struct TcpTask<
|
||||
TcSource: ReceivesCcsdsTc<Error = MpscErrorType>
|
||||
+ ReceivesEcssPusTc<Error = MpscErrorType>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
MpscErrorType: 'static,
|
||||
> {
|
||||
server: TcpServerType<TcSource, MpscErrorType>,
|
||||
}
|
||||
|
||||
impl<
|
||||
TcSource: ReceivesCcsdsTc<Error = MpscErrorType>
|
||||
+ ReceivesEcssPusTc<Error = MpscErrorType>
|
||||
+ Clone
|
||||
+ Send
|
||||
+ 'static,
|
||||
MpscErrorType: 'static + core::fmt::Debug,
|
||||
> TcpTask<TcSource, MpscErrorType>
|
||||
{
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tm_source: SyncTcpTmSource,
|
||||
tc_receiver: CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
Ok(Self {
|
||||
server: TcpSpacepacketsServer::new(
|
||||
cfg,
|
||||
tm_source,
|
||||
tc_receiver,
|
||||
Box::new(PACKET_ID_LOOKUP),
|
||||
)?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
loop {
|
||||
let result = self.server.handle_next_connection();
|
||||
match result {
|
||||
Ok(conn_result) => {
|
||||
info!(
|
||||
"Served {} TMs and {} TCs for client {:?}",
|
||||
conn_result.num_sent_tms, conn_result.num_received_tcs, conn_result.addr
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("TCP server error: {e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -4,18 +4,18 @@ use std::{
|
||||
};
|
||||
|
||||
use log::info;
|
||||
use satrs::tmtc::{PacketAsVec, PacketInPool, SharedPacketPool};
|
||||
use satrs::{
|
||||
pool::PoolProvider,
|
||||
pool::{PoolProvider, StoreAddr},
|
||||
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
|
||||
spacepackets::{
|
||||
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
|
||||
time::cds::MIN_CDS_FIELD_LEN,
|
||||
CcsdsPacket,
|
||||
},
|
||||
tmtc::tm_helper::SharedTmPool,
|
||||
};
|
||||
|
||||
use crate::interface::tcp::SyncTcpTmSource;
|
||||
use crate::tcp::SyncTcpTmSource;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct CcsdsSeqCounterMap {
|
||||
@ -70,28 +70,23 @@ impl TmFunnelCommon {
|
||||
}
|
||||
|
||||
fn packet_printout(tm: &PusTmZeroCopyWriter) {
|
||||
info!(
|
||||
"Sending PUS TM[{},{}] with APID {}",
|
||||
tm.service(),
|
||||
tm.subservice(),
|
||||
tm.apid()
|
||||
);
|
||||
info!("Sending PUS TM[{},{}]", tm.service(), tm.subservice());
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TmSinkStatic {
|
||||
pub struct TmFunnelStatic {
|
||||
common: TmFunnelCommon,
|
||||
shared_tm_store: SharedPacketPool,
|
||||
tm_funnel_rx: mpsc::Receiver<PacketInPool>,
|
||||
tm_server_tx: mpsc::SyncSender<PacketInPool>,
|
||||
shared_tm_store: SharedTmPool,
|
||||
tm_funnel_rx: mpsc::Receiver<StoreAddr>,
|
||||
tm_server_tx: mpsc::SyncSender<StoreAddr>,
|
||||
}
|
||||
|
||||
impl TmSinkStatic {
|
||||
impl TmFunnelStatic {
|
||||
pub fn new(
|
||||
shared_tm_store: SharedPacketPool,
|
||||
shared_tm_store: SharedTmPool,
|
||||
sync_tm_tcp_source: SyncTcpTmSource,
|
||||
tm_funnel_rx: mpsc::Receiver<PacketInPool>,
|
||||
tm_server_tx: mpsc::SyncSender<PacketInPool>,
|
||||
tm_funnel_rx: mpsc::Receiver<StoreAddr>,
|
||||
tm_server_tx: mpsc::SyncSender<StoreAddr>,
|
||||
) -> Self {
|
||||
Self {
|
||||
common: TmFunnelCommon::new(sync_tm_tcp_source),
|
||||
@ -102,14 +97,14 @@ impl TmSinkStatic {
|
||||
}
|
||||
|
||||
pub fn operation(&mut self) {
|
||||
if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() {
|
||||
if let Ok(addr) = self.tm_funnel_rx.recv() {
|
||||
// Read the TM, set sequence counter and message counter, and finally update
|
||||
// the CRC.
|
||||
let shared_pool = self.shared_tm_store.0.clone();
|
||||
let shared_pool = self.shared_tm_store.clone_backing_pool();
|
||||
let mut pool_guard = shared_pool.write().expect("Locking TM pool failed");
|
||||
let mut tm_copy = Vec::new();
|
||||
pool_guard
|
||||
.modify(&pus_tm_in_pool.store_addr, |buf| {
|
||||
.modify(&addr, |buf| {
|
||||
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN)
|
||||
.expect("Creating TM zero copy writer failed");
|
||||
self.common.apply_packet_processing(zero_copy_writer);
|
||||
@ -117,7 +112,7 @@ impl TmSinkStatic {
|
||||
})
|
||||
.expect("Reading TM from pool failed");
|
||||
self.tm_server_tx
|
||||
.send(pus_tm_in_pool)
|
||||
.send(addr)
|
||||
.expect("Sending TM to server failed");
|
||||
// We could also do this step in the update closure, but I'd rather avoid this, could
|
||||
// lead to nested locking.
|
||||
@ -126,17 +121,17 @@ impl TmSinkStatic {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TmSinkDynamic {
|
||||
pub struct TmFunnelDynamic {
|
||||
common: TmFunnelCommon,
|
||||
tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
|
||||
tm_server_tx: mpsc::Sender<PacketAsVec>,
|
||||
tm_funnel_rx: mpsc::Receiver<Vec<u8>>,
|
||||
tm_server_tx: mpsc::Sender<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl TmSinkDynamic {
|
||||
impl TmFunnelDynamic {
|
||||
pub fn new(
|
||||
sync_tm_tcp_source: SyncTcpTmSource,
|
||||
tm_funnel_rx: mpsc::Receiver<PacketAsVec>,
|
||||
tm_server_tx: mpsc::Sender<PacketAsVec>,
|
||||
tm_funnel_rx: mpsc::Receiver<Vec<u8>>,
|
||||
tm_server_tx: mpsc::Sender<Vec<u8>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
common: TmFunnelCommon::new(sync_tm_tcp_source),
|
||||
@ -149,13 +144,13 @@ impl TmSinkDynamic {
|
||||
if let Ok(mut tm) = self.tm_funnel_rx.recv() {
|
||||
// Read the TM, set sequence counter and message counter, and finally update
|
||||
// the CRC.
|
||||
let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN)
|
||||
let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm, MIN_CDS_FIELD_LEN)
|
||||
.expect("Creating TM zero copy writer failed");
|
||||
self.common.apply_packet_processing(zero_copy_writer);
|
||||
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
|
||||
self.tm_server_tx
|
||||
.send(tm)
|
||||
.send(tm.clone())
|
||||
.expect("Sending TM to server failed");
|
||||
self.common.sync_tm_tcp_source.add_tm(&tm);
|
||||
}
|
||||
}
|
||||
}
|
213
satrs-example/src/tmtc.rs
Normal file
213
satrs-example/src/tmtc.rs
Normal file
@ -0,0 +1,213 @@
|
||||
use log::warn;
|
||||
use satrs::pus::verification::std_mod::{
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
|
||||
};
|
||||
use satrs::pus::{EcssTcAndToken, ReceivesEcssPusTc};
|
||||
use satrs::spacepackets::SpHeader;
|
||||
use std::sync::mpsc::{self, Receiver, SendError, Sender, SyncSender, TryRecvError};
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::pus::PusReceiver;
|
||||
use satrs::pool::{PoolProvider, SharedStaticMemoryPool, StoreAddr, StoreError};
|
||||
use satrs::spacepackets::ecss::tc::PusTcReader;
|
||||
use satrs::spacepackets::ecss::PusPacket;
|
||||
use satrs::tmtc::ReceivesCcsdsTc;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Error)]
|
||||
pub enum MpscStoreAndSendError {
|
||||
#[error("Store error: {0}")]
|
||||
Store(#[from] StoreError),
|
||||
#[error("TC send error: {0}")]
|
||||
TcSend(#[from] SendError<EcssTcAndToken>),
|
||||
#[error("TMTC send error: {0}")]
|
||||
TmTcSend(#[from] SendError<StoreAddr>),
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SharedTcPool {
|
||||
pub pool: SharedStaticMemoryPool,
|
||||
}
|
||||
|
||||
impl SharedTcPool {
|
||||
pub fn add_pus_tc(&mut self, pus_tc: &PusTcReader) -> Result<StoreAddr, StoreError> {
|
||||
let mut pg = self.pool.write().expect("error locking TC store");
|
||||
let addr = pg.free_element(pus_tc.len_packed(), |buf| {
|
||||
buf[0..pus_tc.len_packed()].copy_from_slice(pus_tc.raw_data());
|
||||
})?;
|
||||
Ok(addr)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct PusTcSourceProviderSharedPool {
|
||||
pub tc_source: SyncSender<StoreAddr>,
|
||||
pub shared_pool: SharedTcPool,
|
||||
}
|
||||
|
||||
impl PusTcSourceProviderSharedPool {
|
||||
#[allow(dead_code)]
|
||||
pub fn clone_backing_pool(&self) -> SharedStaticMemoryPool {
|
||||
self.shared_pool.pool.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl ReceivesEcssPusTc for PusTcSourceProviderSharedPool {
|
||||
type Error = MpscStoreAndSendError;
|
||||
|
||||
fn pass_pus_tc(&mut self, _: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
|
||||
let addr = self.shared_pool.add_pus_tc(pus_tc)?;
|
||||
self.tc_source.send(addr)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ReceivesCcsdsTc for PusTcSourceProviderSharedPool {
|
||||
type Error = MpscStoreAndSendError;
|
||||
|
||||
fn pass_ccsds(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut pool = self.shared_pool.pool.write().expect("locking pool failed");
|
||||
let addr = pool.add(tc_raw)?;
|
||||
drop(pool);
|
||||
self.tc_source.send(addr)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Newtype, can not implement necessary traits on MPSC sender directly because of orphan rules.
|
||||
#[derive(Clone)]
|
||||
pub struct PusTcSourceProviderDynamic(pub Sender<Vec<u8>>);
|
||||
|
||||
impl ReceivesEcssPusTc for PusTcSourceProviderDynamic {
|
||||
type Error = SendError<Vec<u8>>;
|
||||
|
||||
fn pass_pus_tc(&mut self, _: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
|
||||
self.0.send(pus_tc.raw_data().to_vec())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ReceivesCcsdsTc for PusTcSourceProviderDynamic {
|
||||
type Error = mpsc::SendError<Vec<u8>>;
|
||||
|
||||
fn pass_ccsds(&mut self, _: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
self.0.send(tc_raw.to_vec())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// TC source components where static pools are the backing memory of the received telecommands.
|
||||
pub struct TcSourceTaskStatic {
|
||||
shared_tc_pool: SharedTcPool,
|
||||
tc_receiver: Receiver<StoreAddr>,
|
||||
tc_buf: [u8; 4096],
|
||||
pus_receiver: PusReceiver<VerificationReporterWithSharedPoolMpscBoundedSender>,
|
||||
}
|
||||
|
||||
impl TcSourceTaskStatic {
|
||||
pub fn new(
|
||||
shared_tc_pool: SharedTcPool,
|
||||
tc_receiver: Receiver<StoreAddr>,
|
||||
pus_receiver: PusReceiver<VerificationReporterWithSharedPoolMpscBoundedSender>,
|
||||
) -> Self {
|
||||
Self {
|
||||
shared_tc_pool,
|
||||
tc_receiver,
|
||||
tc_buf: [0; 4096],
|
||||
pus_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
self.poll_tc();
|
||||
}
|
||||
|
||||
pub fn poll_tc(&mut self) -> bool {
|
||||
match self.tc_receiver.try_recv() {
|
||||
Ok(addr) => {
|
||||
let pool = self
|
||||
.shared_tc_pool
|
||||
.pool
|
||||
.read()
|
||||
.expect("locking tc pool failed");
|
||||
pool.read(&addr, &mut self.tc_buf)
|
||||
.expect("reading pool failed");
|
||||
drop(pool);
|
||||
match PusTcReader::new(&self.tc_buf) {
|
||||
Ok((pus_tc, _)) => {
|
||||
self.pus_receiver
|
||||
.handle_tc_packet(
|
||||
satrs::pus::TcInMemory::StoreAddr(addr),
|
||||
pus_tc.service(),
|
||||
&pus_tc,
|
||||
)
|
||||
.ok();
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("error creating PUS TC from raw data: {e}");
|
||||
warn!("raw data: {:x?}", self.tc_buf);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => match e {
|
||||
TryRecvError::Empty => false,
|
||||
TryRecvError::Disconnected => {
|
||||
warn!("tmtc thread: sender disconnected");
|
||||
false
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TC source components where the heap is the backing memory of the received telecommands.
|
||||
pub struct TcSourceTaskDynamic {
|
||||
pub tc_receiver: Receiver<Vec<u8>>,
|
||||
pus_receiver: PusReceiver<VerificationReporterWithVecMpscSender>,
|
||||
}
|
||||
|
||||
impl TcSourceTaskDynamic {
|
||||
pub fn new(
|
||||
tc_receiver: Receiver<Vec<u8>>,
|
||||
pus_receiver: PusReceiver<VerificationReporterWithVecMpscSender>,
|
||||
) -> Self {
|
||||
Self {
|
||||
tc_receiver,
|
||||
pus_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
self.poll_tc();
|
||||
}
|
||||
|
||||
pub fn poll_tc(&mut self) -> bool {
|
||||
match self.tc_receiver.try_recv() {
|
||||
Ok(tc) => match PusTcReader::new(&tc) {
|
||||
Ok((pus_tc, _)) => {
|
||||
self.pus_receiver
|
||||
.handle_tc_packet(
|
||||
satrs::pus::TcInMemory::Vec(tc.clone()),
|
||||
pus_tc.service(),
|
||||
&pus_tc,
|
||||
)
|
||||
.ok();
|
||||
true
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("error creating PUS TC from raw data: {e}");
|
||||
warn!("raw data: {:x?}", tc);
|
||||
true
|
||||
}
|
||||
},
|
||||
Err(e) => match e {
|
||||
TryRecvError::Empty => false,
|
||||
TryRecvError::Disconnected => {
|
||||
warn!("tmtc thread: sender disconnected");
|
||||
false
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
pub mod tc_source;
|
||||
pub mod tm_sink;
|
@ -1,107 +0,0 @@
|
||||
use satrs::{
|
||||
pool::PoolProvider,
|
||||
pus::HandlingStatus,
|
||||
tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool, SharedPacketPool},
|
||||
};
|
||||
use std::sync::mpsc::{self, TryRecvError};
|
||||
|
||||
use satrs::pus::MpscTmAsVecSender;
|
||||
|
||||
use crate::pus::PusTcDistributor;
|
||||
|
||||
// TC source components where static pools are the backing memory of the received telecommands.
|
||||
pub struct TcSourceTaskStatic {
|
||||
shared_tc_pool: SharedPacketPool,
|
||||
tc_receiver: mpsc::Receiver<PacketInPool>,
|
||||
tc_buf: [u8; 4096],
|
||||
pus_distributor: PusTcDistributor<PacketSenderWithSharedPool>,
|
||||
}
|
||||
|
||||
impl TcSourceTaskStatic {
|
||||
pub fn new(
|
||||
shared_tc_pool: SharedPacketPool,
|
||||
tc_receiver: mpsc::Receiver<PacketInPool>,
|
||||
pus_receiver: PusTcDistributor<PacketSenderWithSharedPool>,
|
||||
) -> Self {
|
||||
Self {
|
||||
shared_tc_pool,
|
||||
tc_receiver,
|
||||
tc_buf: [0; 4096],
|
||||
pus_distributor: pus_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
self.poll_tc();
|
||||
}
|
||||
|
||||
pub fn poll_tc(&mut self) -> HandlingStatus {
|
||||
// Right now, we only expect ECSS PUS packets.
|
||||
// If packets like CFDP are expected, we might have to check the APID first.
|
||||
match self.tc_receiver.try_recv() {
|
||||
Ok(packet_in_pool) => {
|
||||
let pool = self
|
||||
.shared_tc_pool
|
||||
.0
|
||||
.read()
|
||||
.expect("locking tc pool failed");
|
||||
pool.read(&packet_in_pool.store_addr, &mut self.tc_buf)
|
||||
.expect("reading pool failed");
|
||||
drop(pool);
|
||||
self.pus_distributor
|
||||
.handle_tc_packet_in_store(packet_in_pool, &self.tc_buf)
|
||||
.ok();
|
||||
HandlingStatus::HandledOne
|
||||
}
|
||||
Err(e) => match e {
|
||||
TryRecvError::Empty => HandlingStatus::Empty,
|
||||
TryRecvError::Disconnected => {
|
||||
log::warn!("tmtc thread: sender disconnected");
|
||||
HandlingStatus::Empty
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TC source components where the heap is the backing memory of the received telecommands.
|
||||
pub struct TcSourceTaskDynamic {
|
||||
pub tc_receiver: mpsc::Receiver<PacketAsVec>,
|
||||
pus_distributor: PusTcDistributor<MpscTmAsVecSender>,
|
||||
}
|
||||
|
||||
impl TcSourceTaskDynamic {
|
||||
pub fn new(
|
||||
tc_receiver: mpsc::Receiver<PacketAsVec>,
|
||||
pus_receiver: PusTcDistributor<MpscTmAsVecSender>,
|
||||
) -> Self {
|
||||
Self {
|
||||
tc_receiver,
|
||||
pus_distributor: pus_receiver,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn periodic_operation(&mut self) {
|
||||
self.poll_tc();
|
||||
}
|
||||
|
||||
pub fn poll_tc(&mut self) -> HandlingStatus {
|
||||
// Right now, we only expect ECSS PUS packets.
|
||||
// If packets like CFDP are expected, we might have to check the APID first.
|
||||
match self.tc_receiver.try_recv() {
|
||||
Ok(packet_as_vec) => {
|
||||
self.pus_distributor
|
||||
.handle_tc_packet_vec(packet_as_vec)
|
||||
.ok();
|
||||
HandlingStatus::HandledOne
|
||||
}
|
||||
Err(e) => match e {
|
||||
TryRecvError::Empty => HandlingStatus::Empty,
|
||||
TryRecvError::Disconnected => {
|
||||
log::warn!("tmtc thread: sender disconnected");
|
||||
HandlingStatus::Empty
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
@ -1,13 +1,13 @@
|
||||
use core::fmt::Debug;
|
||||
use std::net::{SocketAddr, UdpSocket};
|
||||
use std::sync::mpsc;
|
||||
use std::{
|
||||
net::{SocketAddr, UdpSocket},
|
||||
sync::mpsc::Receiver,
|
||||
};
|
||||
|
||||
use log::{info, warn};
|
||||
use satrs::pus::HandlingStatus;
|
||||
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderRaw};
|
||||
use satrs::{
|
||||
hal::std::udp_server::{ReceiveResult, UdpTcServer},
|
||||
pool::{PoolProviderWithGuards, SharedStaticMemoryPool},
|
||||
pool::{PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr},
|
||||
tmtc::CcsdsError,
|
||||
};
|
||||
|
||||
pub trait UdpTmHandler {
|
||||
@ -15,20 +15,20 @@ pub trait UdpTmHandler {
|
||||
}
|
||||
|
||||
pub struct StaticUdpTmHandler {
|
||||
pub tm_rx: mpsc::Receiver<PacketInPool>,
|
||||
pub tm_rx: Receiver<StoreAddr>,
|
||||
pub tm_store: SharedStaticMemoryPool,
|
||||
}
|
||||
|
||||
impl UdpTmHandler for StaticUdpTmHandler {
|
||||
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, &recv_addr: &SocketAddr) {
|
||||
while let Ok(pus_tm_in_pool) = self.tm_rx.try_recv() {
|
||||
while let Ok(addr) = self.tm_rx.try_recv() {
|
||||
let store_lock = self.tm_store.write();
|
||||
if store_lock.is_err() {
|
||||
warn!("Locking TM store failed");
|
||||
continue;
|
||||
}
|
||||
let mut store_lock = store_lock.unwrap();
|
||||
let pg = store_lock.read_with_guard(pus_tm_in_pool.store_addr);
|
||||
let pg = store_lock.read_with_guard(addr);
|
||||
let read_res = pg.read_as_vec();
|
||||
if read_res.is_err() {
|
||||
warn!("Error reading TM pool data");
|
||||
@ -44,20 +44,20 @@ impl UdpTmHandler for StaticUdpTmHandler {
|
||||
}
|
||||
|
||||
pub struct DynamicUdpTmHandler {
|
||||
pub tm_rx: mpsc::Receiver<PacketAsVec>,
|
||||
pub tm_rx: Receiver<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl UdpTmHandler for DynamicUdpTmHandler {
|
||||
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr) {
|
||||
while let Ok(tm) = self.tm_rx.try_recv() {
|
||||
if tm.packet.len() > 9 {
|
||||
let service = tm.packet[7];
|
||||
let subservice = tm.packet[8];
|
||||
if tm.len() > 9 {
|
||||
let service = tm[7];
|
||||
let subservice = tm[8];
|
||||
info!("Sending PUS TM[{service},{subservice}]")
|
||||
} else {
|
||||
info!("Sending PUS TM");
|
||||
}
|
||||
let result = socket.send_to(&tm.packet, recv_addr);
|
||||
let result = socket.send_to(&tm, recv_addr);
|
||||
if let Err(e) = result {
|
||||
warn!("Sending TM with UDP socket failed: {e}")
|
||||
}
|
||||
@ -65,57 +65,49 @@ impl UdpTmHandler for DynamicUdpTmHandler {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UdpTmtcServer<
|
||||
TcSender: PacketSenderRaw<Error = SendError>,
|
||||
TmHandler: UdpTmHandler,
|
||||
SendError,
|
||||
> {
|
||||
pub udp_tc_server: UdpTcServer<TcSender, SendError>,
|
||||
pub struct UdpTmtcServer<TmHandler: UdpTmHandler, SendError> {
|
||||
pub udp_tc_server: UdpTcServer<CcsdsError<SendError>>,
|
||||
pub tm_handler: TmHandler,
|
||||
}
|
||||
|
||||
impl<
|
||||
TcSender: PacketSenderRaw<Error = SendError>,
|
||||
TmHandler: UdpTmHandler,
|
||||
SendError: Debug + 'static,
|
||||
> UdpTmtcServer<TcSender, TmHandler, SendError>
|
||||
impl<TmHandler: UdpTmHandler, SendError: core::fmt::Debug + 'static>
|
||||
UdpTmtcServer<TmHandler, SendError>
|
||||
{
|
||||
pub fn periodic_operation(&mut self) {
|
||||
loop {
|
||||
if self.poll_tc_server() == HandlingStatus::Empty {
|
||||
break;
|
||||
}
|
||||
}
|
||||
while self.poll_tc_server() {}
|
||||
if let Some(recv_addr) = self.udp_tc_server.last_sender() {
|
||||
self.tm_handler
|
||||
.send_tm_to_udp_client(&self.udp_tc_server.socket, &recv_addr);
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_tc_server(&mut self) -> HandlingStatus {
|
||||
fn poll_tc_server(&mut self) -> bool {
|
||||
match self.udp_tc_server.try_recv_tc() {
|
||||
Ok(_) => HandlingStatus::HandledOne,
|
||||
Err(e) => {
|
||||
match e {
|
||||
ReceiveResult::NothingReceived => (),
|
||||
ReceiveResult::Io(e) => {
|
||||
warn!("IO error {e}");
|
||||
Ok(_) => true,
|
||||
Err(e) => match e {
|
||||
ReceiveResult::ReceiverError(e) => match e {
|
||||
CcsdsError::ByteConversionError(e) => {
|
||||
warn!("packet error: {e:?}");
|
||||
true
|
||||
}
|
||||
ReceiveResult::Send(send_error) => {
|
||||
warn!("send error {send_error:?}");
|
||||
CcsdsError::CustomError(e) => {
|
||||
warn!("mpsc custom error {e:?}");
|
||||
true
|
||||
}
|
||||
},
|
||||
ReceiveResult::IoError(e) => {
|
||||
warn!("IO error {e}");
|
||||
false
|
||||
}
|
||||
HandlingStatus::Empty
|
||||
}
|
||||
ReceiveResult::NothingReceived => false,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::net::Ipv4Addr;
|
||||
use std::{
|
||||
cell::RefCell,
|
||||
collections::VecDeque,
|
||||
net::IpAddr,
|
||||
sync::{Arc, Mutex},
|
||||
@ -126,26 +118,21 @@ mod tests {
|
||||
ecss::{tc::PusTcCreator, WritablePusPacket},
|
||||
SpHeader,
|
||||
},
|
||||
tmtc::PacketSenderRaw,
|
||||
ComponentId,
|
||||
tmtc::ReceivesTcCore,
|
||||
};
|
||||
use satrs_example::config::{components, OBSW_SERVER_ADDR};
|
||||
use satrs_example::config::{OBSW_SERVER_ADDR, PUS_APID};
|
||||
|
||||
use super::*;
|
||||
|
||||
const UDP_SERVER_ID: ComponentId = 0x05;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct TestSender {
|
||||
tc_vec: RefCell<VecDeque<PacketAsVec>>,
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct TestReceiver {
|
||||
tc_vec: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
|
||||
impl PacketSenderRaw for TestSender {
|
||||
type Error = ();
|
||||
|
||||
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut mut_queue = self.tc_vec.borrow_mut();
|
||||
mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
|
||||
impl ReceivesTcCore for TestReceiver {
|
||||
type Error = CcsdsError<()>;
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
self.tc_vec.lock().unwrap().push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -164,10 +151,9 @@ mod tests {
|
||||
#[test]
|
||||
fn test_basic() {
|
||||
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
|
||||
let test_receiver = TestSender::default();
|
||||
// let tc_queue = test_receiver.tc_vec.clone();
|
||||
let udp_tc_server =
|
||||
UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, test_receiver).unwrap();
|
||||
let test_receiver = TestReceiver::default();
|
||||
let tc_queue = test_receiver.tc_vec.clone();
|
||||
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap();
|
||||
let tm_handler = TestTmHandler::default();
|
||||
let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
|
||||
let mut udp_dyn_server = UdpTmtcServer {
|
||||
@ -175,18 +161,16 @@ mod tests {
|
||||
tm_handler,
|
||||
};
|
||||
udp_dyn_server.periodic_operation();
|
||||
let queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow();
|
||||
assert!(queue.is_empty());
|
||||
assert!(tc_queue.lock().unwrap().is_empty());
|
||||
assert!(tm_handler_calls.lock().unwrap().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_transactions() {
|
||||
let sock_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 0);
|
||||
let test_receiver = TestSender::default();
|
||||
// let tc_queue = test_receiver.tc_vec.clone();
|
||||
let udp_tc_server =
|
||||
UdpTcServer::new(UDP_SERVER_ID, sock_addr, 2048, test_receiver).unwrap();
|
||||
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), 0);
|
||||
let test_receiver = TestReceiver::default();
|
||||
let tc_queue = test_receiver.tc_vec.clone();
|
||||
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(test_receiver)).unwrap();
|
||||
let server_addr = udp_tc_server.socket.local_addr().unwrap();
|
||||
let tm_handler = TestTmHandler::default();
|
||||
let tm_handler_calls = tm_handler.addrs_to_send_to.clone();
|
||||
@ -194,21 +178,20 @@ mod tests {
|
||||
udp_tc_server,
|
||||
tm_handler,
|
||||
};
|
||||
let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0);
|
||||
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true)
|
||||
let mut sph = SpHeader::tc_unseg(PUS_APID, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true)
|
||||
.to_vec()
|
||||
.unwrap();
|
||||
let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed");
|
||||
let client_addr = client.local_addr().unwrap();
|
||||
println!("{}", server_addr);
|
||||
client.send_to(&ping_tc, server_addr).unwrap();
|
||||
client.connect(server_addr).unwrap();
|
||||
client.send(&ping_tc).unwrap();
|
||||
udp_dyn_server.periodic_operation();
|
||||
{
|
||||
let mut queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow_mut();
|
||||
assert!(!queue.is_empty());
|
||||
let packet_with_sender = queue.pop_front().unwrap();
|
||||
assert_eq!(packet_with_sender.packet, ping_tc);
|
||||
assert_eq!(packet_with_sender.sender_id, UDP_SERVER_ID);
|
||||
let mut tc_queue = tc_queue.lock().unwrap();
|
||||
assert!(!tc_queue.is_empty());
|
||||
let received_tc = tc_queue.pop_front().unwrap();
|
||||
assert_eq!(received_tc, ping_tc);
|
||||
}
|
||||
|
||||
{
|
||||
@ -219,9 +202,7 @@ mod tests {
|
||||
assert_eq!(received_addr, client_addr);
|
||||
}
|
||||
udp_dyn_server.periodic_operation();
|
||||
let queue = udp_dyn_server.udp_tc_server.tc_sender.tc_vec.borrow();
|
||||
assert!(queue.is_empty());
|
||||
drop(queue);
|
||||
assert!(tc_queue.lock().unwrap().is_empty());
|
||||
// Still tries to send to the same client.
|
||||
{
|
||||
let mut tm_handler_calls = tm_handler_calls.lock().unwrap();
|
@ -8,10 +8,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# [unreleased]
|
||||
|
||||
# [v0.1.2] 2024-04-17
|
||||
|
||||
Allow `satrs-shared` from `v0.1.3` to `<v0.2`.
|
||||
|
||||
# [v0.1.1] 2024-02-17
|
||||
|
||||
- Bumped `spacepackets` to v0.10.0
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "satrs-mib"
|
||||
version = "0.1.2"
|
||||
version = "0.1.1"
|
||||
edition = "2021"
|
||||
rust-version = "1.61"
|
||||
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
|
||||
@ -23,12 +23,13 @@ version = "1"
|
||||
optional = true
|
||||
|
||||
[dependencies.satrs-shared]
|
||||
version = ">=0.1.3, <0.2"
|
||||
path = "../satrs-shared"
|
||||
version = "0.1.2"
|
||||
features = ["serde"]
|
||||
|
||||
[dependencies.satrs-mib-codegen]
|
||||
path = "codegen"
|
||||
version = "0.1.2"
|
||||
version = "0.1.1"
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1"
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "satrs-mib-codegen"
|
||||
version = "0.1.2"
|
||||
version = "0.1.1"
|
||||
edition = "2021"
|
||||
description = "satrs-mib proc macro implementation"
|
||||
homepage = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
|
||||
@ -28,7 +28,8 @@ features = ["full"]
|
||||
trybuild = { version = "1", features = ["diff"] }
|
||||
|
||||
[dev-dependencies.satrs-shared]
|
||||
version = ">=0.1.3, <0.2"
|
||||
version = "0.1.2"
|
||||
path = "../../satrs-shared"
|
||||
|
||||
[dev-dependencies.satrs-mib]
|
||||
path = ".."
|
||||
|
@ -10,14 +10,9 @@ serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
log = "0.4"
|
||||
thiserror = "1"
|
||||
fern = "0.5"
|
||||
humantime = "2"
|
||||
|
||||
[dependencies.asynchronix]
|
||||
version = "0.2.1"
|
||||
git = "https://github.com/asynchronics/asynchronix.git"
|
||||
branch = "main"
|
||||
features = ["serde"]
|
||||
|
||||
[dependencies.satrs]
|
||||
path = "../satrs"
|
||||
|
@ -189,11 +189,11 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_basic_mgm_request() {
|
||||
let mut sim_testbench = SimTestbench::new();
|
||||
let request = SimRequest::new_with_epoch_time(MgmRequest::RequestSensorData);
|
||||
let request = SimRequest::new(MgmRequest::RequestSensorData);
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
let sim_reply = sim_testbench.try_receive_next_reply();
|
||||
assert!(sim_reply.is_some());
|
||||
@ -212,11 +212,11 @@ pub mod tests {
|
||||
let mut sim_testbench = SimTestbench::new();
|
||||
switch_device_on(&mut sim_testbench, PcduSwitch::Mgm);
|
||||
|
||||
let mut request = SimRequest::new_with_epoch_time(MgmRequest::RequestSensorData);
|
||||
let mut request = SimRequest::new(MgmRequest::RequestSensorData);
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
let mut sim_reply_res = sim_testbench.try_receive_next_reply();
|
||||
assert!(sim_reply_res.is_some());
|
||||
@ -226,11 +226,11 @@ pub mod tests {
|
||||
.expect("failed to deserialize MGM sensor values");
|
||||
sim_testbench.step_by(Duration::from_millis(50));
|
||||
|
||||
request = SimRequest::new_with_epoch_time(MgmRequest::RequestSensorData);
|
||||
request = SimRequest::new(MgmRequest::RequestSensorData);
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
sim_reply_res = sim_testbench.try_receive_next_reply();
|
||||
assert!(sim_reply_res.is_some());
|
||||
@ -245,11 +245,11 @@ pub mod tests {
|
||||
#[test]
|
||||
fn test_basic_mgt_request_is_off() {
|
||||
let mut sim_testbench = SimTestbench::new();
|
||||
let request = SimRequest::new_with_epoch_time(MgtRequest::RequestHk);
|
||||
let request = SimRequest::new(MgtRequest::RequestHk);
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
let sim_reply_res = sim_testbench.try_receive_next_reply();
|
||||
assert!(sim_reply_res.is_none());
|
||||
@ -259,12 +259,12 @@ pub mod tests {
|
||||
fn test_basic_mgt_request_is_on() {
|
||||
let mut sim_testbench = SimTestbench::new();
|
||||
switch_device_on(&mut sim_testbench, PcduSwitch::Mgt);
|
||||
let request = SimRequest::new_with_epoch_time(MgtRequest::RequestHk);
|
||||
let request = SimRequest::new(MgtRequest::RequestHk);
|
||||
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
let sim_reply_res = sim_testbench.try_receive_next_reply();
|
||||
assert!(sim_reply_res.is_some());
|
||||
@ -281,11 +281,11 @@ pub mod tests {
|
||||
}
|
||||
|
||||
fn check_mgt_hk(sim_testbench: &mut SimTestbench, expected_hk_set: MgtHkSet) {
|
||||
let request = SimRequest::new_with_epoch_time(MgtRequest::RequestHk);
|
||||
let request = SimRequest::new(MgtRequest::RequestHk);
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
let sim_reply_res = sim_testbench.try_receive_next_reply();
|
||||
assert!(sim_reply_res.is_some());
|
||||
@ -309,14 +309,14 @@ pub mod tests {
|
||||
y: 200,
|
||||
z: 1000,
|
||||
};
|
||||
let request = SimRequest::new_with_epoch_time(MgtRequest::ApplyTorque {
|
||||
let request = SimRequest::new(MgtRequest::ApplyTorque {
|
||||
duration: Duration::from_millis(100),
|
||||
dipole: commanded_dipole,
|
||||
});
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step_by(Duration::from_millis(5));
|
||||
|
||||
check_mgt_hk(
|
||||
|
@ -49,27 +49,25 @@ impl SimController {
|
||||
}
|
||||
|
||||
pub fn run(&mut self, start_time: MonotonicTime, udp_polling_interval_ms: u64) {
|
||||
let mut t = start_time;
|
||||
let mut t = start_time + Duration::from_millis(udp_polling_interval_ms);
|
||||
self.sys_clock.synchronize(t);
|
||||
loop {
|
||||
let t_old = t;
|
||||
// Check for UDP requests every millisecond. Shift the simulator ahead here to prevent
|
||||
// replies lying in the past.
|
||||
t += Duration::from_millis(udp_polling_interval_ms);
|
||||
self.sys_clock.synchronize(t);
|
||||
self.handle_sim_requests(t_old);
|
||||
self.simulation
|
||||
.step_until(t)
|
||||
.expect("simulation step failed");
|
||||
self.handle_sim_requests();
|
||||
|
||||
self.sys_clock.synchronize(t);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_sim_requests(&mut self, old_timestamp: MonotonicTime) {
|
||||
pub fn handle_sim_requests(&mut self) {
|
||||
loop {
|
||||
match self.request_receiver.try_recv() {
|
||||
Ok(request) => {
|
||||
if request.timestamp < old_timestamp {
|
||||
log::warn!("stale data with timestamp {:?} received", request.timestamp);
|
||||
}
|
||||
if let Err(e) = match request.target() {
|
||||
SimTarget::SimCtrl => self.handle_ctrl_request(&request),
|
||||
SimTarget::Mgm => self.handle_mgm_request(&request),
|
||||
@ -174,11 +172,11 @@ mod tests {
|
||||
#[test]
|
||||
fn test_basic_ping() {
|
||||
let mut sim_testbench = SimTestbench::new();
|
||||
let request = SimRequest::new_with_epoch_time(SimCtrlRequest::Ping);
|
||||
let request = SimRequest::new(SimCtrlRequest::Ping);
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending sim ctrl request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
let sim_reply = sim_testbench.try_receive_next_reply();
|
||||
assert!(sim_reply.is_some());
|
||||
|
@ -86,14 +86,14 @@ pub(crate) mod tests {
|
||||
switch: PcduSwitch,
|
||||
target: SwitchStateBinary,
|
||||
) {
|
||||
let request = SimRequest::new_with_epoch_time(PcduRequest::SwitchDevice {
|
||||
let request = SimRequest::new(PcduRequest::SwitchDevice {
|
||||
switch,
|
||||
state: target,
|
||||
});
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM switch request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
}
|
||||
|
||||
@ -113,11 +113,11 @@ pub(crate) mod tests {
|
||||
}
|
||||
|
||||
fn check_switch_state(sim_testbench: &mut SimTestbench, expected_switch_map: &SwitchMap) {
|
||||
let request = SimRequest::new_with_epoch_time(PcduRequest::RequestSwitchInfo);
|
||||
let request = SimRequest::new(PcduRequest::RequestSwitchInfo);
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step();
|
||||
let sim_reply = sim_testbench.try_receive_next_reply();
|
||||
assert!(sim_reply.is_some());
|
||||
@ -143,11 +143,11 @@ pub(crate) mod tests {
|
||||
#[test]
|
||||
fn test_pcdu_switcher_request() {
|
||||
let mut sim_testbench = SimTestbench::new();
|
||||
let request = SimRequest::new_with_epoch_time(PcduRequest::RequestSwitchInfo);
|
||||
let request = SimRequest::new(PcduRequest::RequestSwitchInfo);
|
||||
sim_testbench
|
||||
.send_request(request)
|
||||
.expect("sending MGM request failed");
|
||||
sim_testbench.handle_sim_requests_time_agnostic();
|
||||
sim_testbench.handle_sim_requests();
|
||||
sim_testbench.step_by(Duration::from_millis(1));
|
||||
|
||||
let sim_reply = sim_testbench.try_receive_next_reply();
|
||||
|
@ -1,8 +1,5 @@
|
||||
use asynchronix::time::MonotonicTime;
|
||||
use serde::{de::DeserializeOwned, Deserialize, Serialize};
|
||||
|
||||
pub const SIM_CTRL_UDP_PORT: u16 = 7303;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum SimTarget {
|
||||
SimCtrl,
|
||||
@ -22,7 +19,6 @@ pub struct SimMessage {
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub struct SimRequest {
|
||||
inner: SimMessage,
|
||||
pub timestamp: MonotonicTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
@ -57,22 +53,12 @@ pub trait SimMessageProvider: Serialize + DeserializeOwned + Clone + Sized {
|
||||
}
|
||||
|
||||
impl SimRequest {
|
||||
pub fn new_with_epoch_time<T: SerializableSimMsgPayload<SimRequest>>(
|
||||
serializable_request: T,
|
||||
) -> Self {
|
||||
Self::new(serializable_request, MonotonicTime::EPOCH)
|
||||
}
|
||||
|
||||
pub fn new<T: SerializableSimMsgPayload<SimRequest>>(
|
||||
serializable_request: T,
|
||||
timestamp: MonotonicTime,
|
||||
) -> Self {
|
||||
pub fn new<T: SerializableSimMsgPayload<SimRequest>>(serializable_request: T) -> Self {
|
||||
Self {
|
||||
inner: SimMessage {
|
||||
target: T::TARGET,
|
||||
payload: serde_json::to_string(&serializable_request).unwrap(),
|
||||
},
|
||||
timestamp,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -377,7 +363,7 @@ pub mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_basic_request() {
|
||||
let sim_request = SimRequest::new_with_epoch_time(DummyRequest::Ping);
|
||||
let sim_request = SimRequest::new(DummyRequest::Ping);
|
||||
assert_eq!(sim_request.target(), SimTarget::SimCtrl);
|
||||
assert_eq!(sim_request.msg_type(), SimMessageType::Request);
|
||||
let dummy_request =
|
||||
|
@ -3,7 +3,7 @@ use asynchronix::simulation::{Mailbox, SimInit};
|
||||
use asynchronix::time::{MonotonicTime, SystemClock};
|
||||
use controller::SimController;
|
||||
use eps::PcduModel;
|
||||
use satrs_minisim::{SimReply, SimRequest, SIM_CTRL_UDP_PORT};
|
||||
use satrs_minisim::{SimReply, SimRequest};
|
||||
use std::sync::mpsc;
|
||||
use std::thread;
|
||||
use std::time::{Duration, SystemTime};
|
||||
@ -83,38 +83,14 @@ fn main() {
|
||||
let t0 = MonotonicTime::EPOCH;
|
||||
let mut sim_ctrl =
|
||||
create_sim_controller(ThreadingModel::Default, t0, reply_sender, request_receiver);
|
||||
// Configure logger at runtime
|
||||
fern::Dispatch::new()
|
||||
// Perform allocation-free log formatting
|
||||
.format(|out, message, record| {
|
||||
out.finish(format_args!(
|
||||
"[{} {} {}] {}",
|
||||
humantime::format_rfc3339(std::time::SystemTime::now()),
|
||||
record.level(),
|
||||
record.target(),
|
||||
message
|
||||
))
|
||||
})
|
||||
// Add blanket level filter -
|
||||
.level(log::LevelFilter::Debug)
|
||||
// - and per-module overrides
|
||||
// Output to stdout, files, and other Dispatch configurations
|
||||
.chain(std::io::stdout())
|
||||
.chain(fern::log_file("output.log").expect("could not open log output file"))
|
||||
// Apply globally
|
||||
.apply()
|
||||
.expect("could not apply logger configuration");
|
||||
|
||||
log::info!("starting simulation thread");
|
||||
// This thread schedules the simulator.
|
||||
let sim_thread = thread::spawn(move || {
|
||||
sim_ctrl.run(t0, 1);
|
||||
});
|
||||
|
||||
let mut udp_server =
|
||||
SimUdpServer::new(SIM_CTRL_UDP_PORT, request_sender, reply_receiver, 200, None)
|
||||
.expect("could not create UDP request server");
|
||||
log::info!("starting UDP server on port {}", SIM_CTRL_UDP_PORT);
|
||||
let mut udp_server = SimUdpServer::new(0, request_sender, reply_receiver, 200, None)
|
||||
.expect("could not create UDP request server");
|
||||
// This thread manages the simulator UDP server.
|
||||
let udp_tc_thread = thread::spawn(move || {
|
||||
udp_server.run();
|
||||
|
@ -26,13 +26,10 @@ impl SimTestbench {
|
||||
request_sender,
|
||||
}
|
||||
}
|
||||
pub fn handle_sim_requests_time_agnostic(&mut self) {
|
||||
self.handle_sim_requests(MonotonicTime::EPOCH);
|
||||
}
|
||||
|
||||
delegate! {
|
||||
to self.sim_controller {
|
||||
pub fn handle_sim_requests(&mut self, old_timestamp: MonotonicTime);
|
||||
pub fn handle_sim_requests(&mut self);
|
||||
}
|
||||
to self.sim_controller.simulation {
|
||||
pub fn step(&mut self);
|
||||
|
@ -270,7 +270,7 @@ mod tests {
|
||||
UdpTestbench::new(true, Some(SERVER_WAIT_TIME_MS), 10)
|
||||
.expect("could not create testbench");
|
||||
let server_thread = std::thread::spawn(move || udp_server.run());
|
||||
let sim_request = SimRequest::new_with_epoch_time(PcduRequest::RequestSwitchInfo);
|
||||
let sim_request = SimRequest::new(PcduRequest::RequestSwitchInfo);
|
||||
udp_testbench
|
||||
.send_request(&sim_request)
|
||||
.expect("sending request failed");
|
||||
@ -292,7 +292,7 @@ mod tests {
|
||||
.expect("could not create testbench");
|
||||
let server_thread = std::thread::spawn(move || udp_server.run());
|
||||
udp_testbench
|
||||
.send_request(&SimRequest::new_with_epoch_time(SimCtrlRequest::Ping))
|
||||
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
|
||||
.expect("sending request failed");
|
||||
|
||||
let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map()));
|
||||
@ -316,7 +316,7 @@ mod tests {
|
||||
// Send a ping so that the server knows the address of the client.
|
||||
// Do not check that the request arrives on the receiver side, is done by other test.
|
||||
udp_testbench
|
||||
.send_request(&SimRequest::new_with_epoch_time(SimCtrlRequest::Ping))
|
||||
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
|
||||
.expect("sending request failed");
|
||||
|
||||
// Send a reply to the server, ensure it gets forwarded to the client.
|
||||
@ -347,7 +347,7 @@ mod tests {
|
||||
|
||||
// Connect by sending a ping.
|
||||
udp_testbench
|
||||
.send_request(&SimRequest::new_with_epoch_time(SimCtrlRequest::Ping))
|
||||
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
|
||||
.expect("sending request failed");
|
||||
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
|
||||
|
||||
@ -376,7 +376,7 @@ mod tests {
|
||||
|
||||
// Connect by sending a ping.
|
||||
udp_testbench
|
||||
.send_request(&SimRequest::new_with_epoch_time(SimCtrlRequest::Ping))
|
||||
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
|
||||
.expect("sending request failed");
|
||||
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
|
||||
|
||||
|
@ -8,19 +8,6 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# [unreleased]
|
||||
|
||||
# [v0.1.4] 2024-04-24
|
||||
|
||||
## Added
|
||||
|
||||
- `ResultU16::from_be_bytes`
|
||||
- `From<u16>` impl for `ResultU16`.
|
||||
- Optional `defmt` support: `defmt::Format` impl on `ResultU16` if the `defmt` feature is
|
||||
activated.
|
||||
|
||||
# [v0.1.3] 2024-04-16
|
||||
|
||||
Allow `spacepackets` range starting with v0.10 and v0.11.
|
||||
|
||||
# [v0.1.2] 2024-02-17
|
||||
|
||||
- Bumped `spacepackets` to v0.10.0 for `UnsignedEnum` trait change.
|
||||
|
@ -1,7 +1,7 @@
|
||||
[package]
|
||||
name = "satrs-shared"
|
||||
description = "Components shared by multiple sat-rs crates"
|
||||
version = "0.1.4"
|
||||
version = "0.1.2"
|
||||
edition = "2021"
|
||||
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
|
||||
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
|
||||
@ -17,17 +17,14 @@ version = "1"
|
||||
default-features = false
|
||||
optional = true
|
||||
|
||||
[dependencies.defmt]
|
||||
version = "0.3"
|
||||
optional = true
|
||||
|
||||
[dependencies.spacepackets]
|
||||
version = ">0.9, <=0.11"
|
||||
git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
|
||||
version = "0.11.0-rc.0"
|
||||
branch = "main"
|
||||
default-features = false
|
||||
|
||||
[features]
|
||||
serde = ["dep:serde", "spacepackets/serde"]
|
||||
spacepackets = ["dep:defmt", "spacepackets/defmt"]
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]
|
||||
rustdoc-args = ["--cfg", "doc_cfg", "--generate-link-to-definition"]
|
||||
|
@ -1,4 +1,3 @@
|
||||
//! This crates contains modules shared among other sat-rs framework crates.
|
||||
#![no_std]
|
||||
#![cfg_attr(docs_rs, feature(doc_auto_cfg))]
|
||||
pub mod res_code;
|
||||
|
@ -7,7 +7,6 @@ use spacepackets::ByteConversionError;
|
||||
/// Simple [u16] based result code type which also allows to group related resultcodes.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
pub struct ResultU16 {
|
||||
group_id: u8,
|
||||
unique_id: u8,
|
||||
@ -20,28 +19,15 @@ impl ResultU16 {
|
||||
unique_id,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn raw(&self) -> u16 {
|
||||
((self.group_id as u16) << 8) | self.unique_id as u16
|
||||
}
|
||||
|
||||
pub fn group_id(&self) -> u8 {
|
||||
self.group_id
|
||||
}
|
||||
|
||||
pub fn unique_id(&self) -> u8 {
|
||||
self.unique_id
|
||||
}
|
||||
|
||||
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
|
||||
Self::from(u16::from_be_bytes(bytes))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u16> for ResultU16 {
|
||||
fn from(value: u16) -> Self {
|
||||
Self::new(((value >> 8) & 0xff) as u8, (value & 0xff) as u8)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ResultU16> for EcssEnumU16 {
|
||||
@ -98,14 +84,5 @@ mod tests {
|
||||
assert_eq!(written, 2);
|
||||
assert_eq!(buf[0], 1);
|
||||
assert_eq!(buf[1], 1);
|
||||
let read_back = ResultU16::from_be_bytes(buf);
|
||||
assert_eq!(read_back, result_code);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_u16() {
|
||||
let result_code = ResultU16::new(1, 1);
|
||||
let result_code_2 = ResultU16::from(result_code.raw());
|
||||
assert_eq!(result_code, result_code_2);
|
||||
}
|
||||
}
|
||||
|
@ -8,104 +8,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
# [unreleased]
|
||||
|
||||
# [v0.2.1] 2024-05-19
|
||||
|
||||
## Changed
|
||||
|
||||
- The HAL TCP server `ServerConfig::new` method now sets the `reuse_port` and `reuse_addr`
|
||||
fields to `true`.
|
||||
|
||||
## Fixed
|
||||
|
||||
- Possibly subtly broken v0.2.0 build artifact.
|
||||
|
||||
# [v0.2.0] 2024-05-02
|
||||
|
||||
## Changed
|
||||
|
||||
- Various improvements for the PUS stack components.
|
||||
|
||||
## Added
|
||||
|
||||
- Added `HandlingStatus` enumeration.
|
||||
|
||||
# [v0.2.0-rc.5] 2024-04-24
|
||||
|
||||
## Added
|
||||
|
||||
- Optional `defmt::Format` support for the event types, if the `defmt` feature is activated.
|
||||
|
||||
## Changed
|
||||
|
||||
- Removed `MpscEventReceiver`, the `EventReceiveProvider` trait is implemented directly
|
||||
on `mpsc::Receiver<EventMessage<Event>>`
|
||||
- Renamed `PusEventDispatcher` to `PusEventTmCreatorWithMap`.
|
||||
- Renamed `DefaultPusEventU32Dispatcher` to `DefaultPusEventU32EventCreator`.
|
||||
- Renamed `PusEventMgmtBackendProvider` renamed to `PusEventReportingMap`.
|
||||
- Reanmed Event `const_new` methods to `new` and the former `new` methods to `new_checked`
|
||||
|
||||
# [v0.2.0-rc.4] 2024-04-23
|
||||
|
||||
## Changed
|
||||
|
||||
- The `parse_for_ccsds_space_packets` method now expects a non-mutable slice and does not copy
|
||||
broken tail packets anymore. It also does not expect a mutable `next_write_idx` argument anymore.
|
||||
Instead, a `ParseResult` structure is returned which contains the `packets_found` and an
|
||||
optional `incomplete_tail_start` value.
|
||||
|
||||
## Fixed
|
||||
|
||||
- `parse_for_ccsds_space_packets` did not detect CCSDS space packets at the buffer end with the
|
||||
smallest possible size of 7 bytes.
|
||||
- TCP server component now re-registers the internal `mio::Poll` object if the client reset
|
||||
the connection unexpectedly. Not doing so prevented the server from functioning properly
|
||||
after a re-connect.
|
||||
|
||||
# [v0.2.0-rc.3] 2024-04-17
|
||||
|
||||
docs-rs hotfix 2
|
||||
|
||||
# [v0.2.0-rc.2] 2024-04-17
|
||||
|
||||
docs-rs hotfix
|
||||
|
||||
# [v0.2.0-rc.1] 2024-04-17
|
||||
|
||||
- `spacepackets` v0.11
|
||||
|
||||
## Added
|
||||
|
||||
- Added `params::WritableToBeBytes::to_vec`.
|
||||
- New `ComponentId` (`u64` typedef for now) which replaces former `TargetId` as a generic
|
||||
way to identify components.
|
||||
- Various abstraction and objects for targeted requests. This includes mode request/reply
|
||||
types for actions, HK and modes.
|
||||
- `VerificationReportingProvider::owner_id` method.
|
||||
- Introduced generic `EventMessage` which is generic over the event type and the additional
|
||||
parameter type. This message also contains the sender ID which can be useful for debugging
|
||||
or application layer / FDIR logic.
|
||||
- Stop signal handling for the TCP servers.
|
||||
- TCP server now uses `mio` crate to allow non-blocking operation. The server can now handle
|
||||
multiple connections at once, and the context information about handled transfers is
|
||||
passed via a callback which is inserted as a generic as well.
|
||||
|
||||
## Changed
|
||||
|
||||
- Renamed `ReceivesTcCore` to `PacketSenderRaw` to better show its primary purpose. It now contains
|
||||
a `send_raw_tc` method which is not mutable anymore.
|
||||
- Renamed `TmPacketSourceCore` to `TmPacketSource`.
|
||||
- Renamed `EcssTmSenderCore` to `EcssTmSender`.
|
||||
- Renamed `StoreAddr` to `PoolAddr`.
|
||||
- Reanmed `StoreError` to `PoolError`.
|
||||
- TCP server generics order. The error generics come last now.
|
||||
- `encoding::ccsds::PacketIdValidator` renamed to `ValidatorU16Id`, which lives in the crate root.
|
||||
It can be used for both CCSDS packet ID and CCSDS APID validation.
|
||||
- `EventManager::try_event_handling` not expects a mutable error handling closure instead of
|
||||
returning the occured errors.
|
||||
- Renamed `EventManagerBase` to `EventReportCreator`
|
||||
- Renamed `VerificationReporterCore` to `VerificationReportCreator`.
|
||||
- Removed `VerificationReporterCore`. The high-level API exposed by `VerificationReporter` and
|
||||
the low level API exposed by `VerificationReportCreator` should be sufficient for all use-cases.
|
||||
- Refactored `EventManager` to heavily use generics instead of trait objects.
|
||||
- `SendEventProvider` -> `EventSendProvider`. `id` trait method renamed to `channel_id`.
|
||||
- `ListenerTable` -> `ListenerMapProvider`
|
||||
@ -114,40 +18,16 @@ docs-rs hotfix
|
||||
- Refactored ECSS TM sender abstractions to be generic over different message queue backends.
|
||||
- Refactored Verification Reporter abstractions and implementation to be generic over the sender
|
||||
instead of using trait objects.
|
||||
- Renamed `WritableToBeBytes::raw_len` to `WritableToBeBytes::written_len` for consistency.
|
||||
- `PusServiceProvider` renamed to `PusServiceDistributor` to make the purpose of the object
|
||||
more clear
|
||||
- `PusServiceProvider::handle_pus_tc_packet` renamed to `PusServiceDistributor::distribute_packet`.
|
||||
- `PusServiceDistibutor` and `CcsdsDistributor` now use generics instead of trait objects.
|
||||
This makes accessing the concrete trait implementations more easy as well.
|
||||
- Major overhaul of the PUS handling module.
|
||||
- Replace `TargetId` by `ComponentId`.
|
||||
- Replace most usages of `ChannelId` by `ComponentId`. A dedicated channel ID has limited usage
|
||||
due to the nature of typed channels in Rust.
|
||||
- `CheckTimer` renamed to `CountdownProvider`.
|
||||
- Renamed `TargetId` to `ComponentId`.
|
||||
- Replaced most `ChannelId` occurences with `ComponentId`. For typed channels, there is generally
|
||||
no need for dedicated channel IDs.
|
||||
- Changed `params::WritableToBeBytes::raw_len` to `written_len` for consistency.
|
||||
- `EventReporter` caches component ID.
|
||||
- Renamed `PusService11SchedHandler` to `PusSchedServiceHandler`.
|
||||
- Fixed general naming of PUS handlers from `handle_one_tc` to `poll_and_handle_next_tc`.
|
||||
- Reworked verification module: The sender (`impl EcssTmSenderCore`)
|
||||
now needs to be passed explicitely to the `VerificationReportingProvider` abstraction. This
|
||||
allows easier sharing of the TM sender component.
|
||||
|
||||
## Fixed
|
||||
|
||||
- Update deprecated API for `PusScheduler::insert_wrapped_tc_cds_short`
|
||||
and `PusScheduler::insert_wrapped_tc_cds_long`.
|
||||
- `EventReporter` uses interior mutability pattern to allow non-mutable API.
|
||||
|
||||
## Removed
|
||||
|
||||
- Remove `objects` module.
|
||||
- Removed CCSDS and PUS distributor modules. Their worth is questionable in an architecture
|
||||
where routing traits are sufficient and the core logic to demultiplex and distribute packets
|
||||
is simple enough to be application code.
|
||||
|
||||
# [v0.2.0-rc.0] 2024-02-21
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
[package]
|
||||
name = "satrs"
|
||||
version = "0.2.1"
|
||||
version = "0.2.0-rc.0"
|
||||
edition = "2021"
|
||||
rust-version = "1.71.1"
|
||||
rust-version = "1.61"
|
||||
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
|
||||
description = "A framework to build software for remote systems"
|
||||
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
|
||||
@ -15,31 +15,17 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
|
||||
[dependencies]
|
||||
delegate = ">0.7, <=0.10"
|
||||
paste = "1"
|
||||
derive-new = "0.6"
|
||||
smallvec = "1"
|
||||
crc = "3"
|
||||
|
||||
[dependencies.satrs-shared]
|
||||
version = ">=0.1.3, <0.2"
|
||||
version = "0.1.2"
|
||||
path = "../satrs-shared"
|
||||
|
||||
[dependencies.num_enum]
|
||||
version = ">0.5, <=0.7"
|
||||
default-features = false
|
||||
|
||||
[dependencies.spacepackets]
|
||||
version = "0.11"
|
||||
default-features = false
|
||||
|
||||
[dependencies.cobs]
|
||||
git = "https://github.com/robamu/cobs.rs.git"
|
||||
version = "0.2.3"
|
||||
branch = "all_features"
|
||||
default-features = false
|
||||
|
||||
[dependencies.num-traits]
|
||||
version = "0.2"
|
||||
default-features = false
|
||||
|
||||
[dependencies.dyn-clone]
|
||||
version = "1"
|
||||
optional = true
|
||||
@ -52,6 +38,10 @@ optional = true
|
||||
version = "0.7"
|
||||
optional = true
|
||||
|
||||
[dependencies.num-traits]
|
||||
version = "0.2"
|
||||
default-features = false
|
||||
|
||||
[dependencies.downcast-rs]
|
||||
version = "1.2"
|
||||
default-features = false
|
||||
@ -80,19 +70,22 @@ version = "0.5.4"
|
||||
features = ["all"]
|
||||
optional = true
|
||||
|
||||
[dependencies.mio]
|
||||
version = "0.8"
|
||||
features = ["os-poll", "net"]
|
||||
optional = true
|
||||
[dependencies.spacepackets]
|
||||
git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
|
||||
version = "0.11.0-rc.0"
|
||||
branch = "main"
|
||||
default-features = false
|
||||
|
||||
[dependencies.defmt]
|
||||
version = "0.3"
|
||||
optional = true
|
||||
[dependencies.cobs]
|
||||
git = "https://github.com/robamu/cobs.rs.git"
|
||||
version = "0.2.3"
|
||||
branch = "all_features"
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies]
|
||||
serde = "1"
|
||||
zerocopy = "0.7"
|
||||
once_cell = "1"
|
||||
once_cell = "1.13"
|
||||
serde_json = "1"
|
||||
rand = "0.8"
|
||||
tempfile = "3"
|
||||
@ -112,8 +105,7 @@ std = [
|
||||
"spacepackets/std",
|
||||
"num_enum/std",
|
||||
"thiserror",
|
||||
"socket2",
|
||||
"mio"
|
||||
"socket2"
|
||||
]
|
||||
alloc = [
|
||||
"serde/alloc",
|
||||
@ -125,10 +117,8 @@ alloc = [
|
||||
serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"]
|
||||
crossbeam = ["crossbeam-channel"]
|
||||
heapless = ["dep:heapless"]
|
||||
defmt = ["dep:defmt", "spacepackets/defmt"]
|
||||
test_util = []
|
||||
doc-images = []
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docs_rs", "--generate-link-to-definition"]
|
||||
rustdoc-args = ["--cfg", "doc_cfg", "--generate-link-to-definition"]
|
||||
|
@ -4,11 +4,11 @@ Checklist for new releases
|
||||
# Pre-Release
|
||||
|
||||
1. Make sure any new modules are documented sufficiently enough and check docs with
|
||||
`cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]' --open`.
|
||||
`cargo +nightly doc --all-features --config 'rustdocflags=["--cfg", "doc_cfg"]' --open`.
|
||||
2. Bump version specifier in `Cargo.toml`.
|
||||
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
|
||||
`unreleased` section.
|
||||
4. Run `cargo test --all-features` or `cargo nextest r --all-features` and `cargo test --doc`.
|
||||
4. Run `cargo test --all-features`.
|
||||
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
|
||||
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
|
||||
targets.
|
||||
|
@ -1,68 +1,63 @@
|
||||
use crate::{params::Params, pool::PoolAddr};
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
use crate::{pool::StoreAddr, TargetId};
|
||||
|
||||
pub type ActionId = u32;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct ActionRequest {
|
||||
pub action_id: ActionId,
|
||||
pub variant: ActionRequestVariant,
|
||||
}
|
||||
|
||||
impl ActionRequest {
|
||||
pub fn new(action_id: ActionId, variant: ActionRequestVariant) -> Self {
|
||||
Self { action_id, variant }
|
||||
}
|
||||
}
|
||||
|
||||
#[non_exhaustive]
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub enum ActionRequestVariant {
|
||||
NoData,
|
||||
StoreData(PoolAddr),
|
||||
pub enum ActionRequest {
|
||||
UnsignedIdAndStoreData {
|
||||
action_id: ActionId,
|
||||
data_addr: StoreAddr,
|
||||
},
|
||||
#[cfg(feature = "alloc")]
|
||||
VecData(alloc::vec::Vec<u8>),
|
||||
UnsignedIdAndVecData {
|
||||
action_id: ActionId,
|
||||
data: alloc::vec::Vec<u8>,
|
||||
},
|
||||
#[cfg(feature = "alloc")]
|
||||
StringIdAndVecData {
|
||||
action_id: alloc::string::String,
|
||||
data: alloc::vec::Vec<u8>,
|
||||
},
|
||||
#[cfg(feature = "alloc")]
|
||||
StringIdAndStoreData {
|
||||
action_id: alloc::string::String,
|
||||
data: StoreAddr,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct ActionReply {
|
||||
pub action_id: ActionId,
|
||||
pub variant: ActionReplyVariant,
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct TargetedActionRequest {
|
||||
target: TargetId,
|
||||
action_request: ActionRequest,
|
||||
}
|
||||
|
||||
impl TargetedActionRequest {
|
||||
pub fn new(target: TargetId, action_request: ActionRequest) -> Self {
|
||||
Self {
|
||||
target,
|
||||
action_request,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A reply to an action request.
|
||||
#[non_exhaustive]
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
pub enum ActionReplyVariant {
|
||||
CompletionFailed(Params),
|
||||
StepFailed { step: u32, reason: Params },
|
||||
Completed,
|
||||
#[derive(Clone, Eq, PartialEq, Debug)]
|
||||
pub enum ActionReply {
|
||||
CompletionFailed(ActionId),
|
||||
StepFailed {
|
||||
id: ActionId,
|
||||
step: u32,
|
||||
},
|
||||
Completed(ActionId),
|
||||
#[cfg(feature = "alloc")]
|
||||
CompletedStringId(alloc::string::String),
|
||||
#[cfg(feature = "alloc")]
|
||||
CompletionFailedStringId(alloc::string::String),
|
||||
#[cfg(feature = "alloc")]
|
||||
StepFailedStringId {
|
||||
id: alloc::string::String,
|
||||
step: u32,
|
||||
},
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod alloc_mod {
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
pub struct ActionRequestStringId {
|
||||
pub action_id: alloc::string::String,
|
||||
pub variant: ActionRequestVariant,
|
||||
}
|
||||
|
||||
impl ActionRequestStringId {
|
||||
pub fn new(action_id: alloc::string::String, variant: ActionRequestVariant) -> Self {
|
||||
Self { action_id, variant }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct ActionReplyStringId {
|
||||
pub action_id: alloc::string::String,
|
||||
pub variant: ActionReplyVariant,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {}
|
||||
|
@ -5,7 +5,7 @@ use std::path::{Path, PathBuf};
|
||||
use super::{
|
||||
filestore::{FilestoreError, VirtualFilestore},
|
||||
user::{CfdpUser, FileSegmentRecvdParams, MetadataReceivedParams},
|
||||
CheckTimerCreator, CountdownProvider, EntityType, LocalEntityConfig, PacketInfo, PacketTarget,
|
||||
CheckTimer, CheckTimerCreator, EntityType, LocalEntityConfig, PacketInfo, PacketTarget,
|
||||
RemoteEntityConfig, RemoteEntityConfigProvider, State, TimerContext, TransactionId,
|
||||
TransactionStep,
|
||||
};
|
||||
@ -54,7 +54,7 @@ struct TransferState {
|
||||
completion_disposition: CompletionDisposition,
|
||||
checksum: u32,
|
||||
current_check_count: u32,
|
||||
current_check_timer: Option<Box<dyn CountdownProvider>>,
|
||||
current_check_timer: Option<Box<dyn CheckTimer>>,
|
||||
}
|
||||
|
||||
impl Default for TransferState {
|
||||
@ -799,9 +799,9 @@ mod tests {
|
||||
};
|
||||
|
||||
use crate::cfdp::{
|
||||
filestore::NativeFilestore, user::OwnedMetadataRecvdParams, CheckTimerCreator,
|
||||
CountdownProvider, DefaultFaultHandler, IndicationConfig, RemoteEntityConfig,
|
||||
StdRemoteEntityConfigProvider, UserFaultHandler, CRC_32,
|
||||
filestore::NativeFilestore, user::OwnedMetadataRecvdParams, CheckTimer, CheckTimerCreator,
|
||||
DefaultFaultHandler, IndicationConfig, RemoteEntityConfig, StdRemoteEntityConfigProvider,
|
||||
UserFaultHandler, CRC_32,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
@ -1057,7 +1057,7 @@ mod tests {
|
||||
expired: Arc<AtomicBool>,
|
||||
}
|
||||
|
||||
impl CountdownProvider for TestCheckTimer {
|
||||
impl CheckTimer for TestCheckTimer {
|
||||
fn has_expired(&self) -> bool {
|
||||
self.expired.load(core::sync::atomic::Ordering::Relaxed)
|
||||
}
|
||||
@ -1088,10 +1088,7 @@ mod tests {
|
||||
}
|
||||
|
||||
impl CheckTimerCreator for TestCheckTimerCreator {
|
||||
fn get_check_timer_provider(
|
||||
&self,
|
||||
timer_context: TimerContext,
|
||||
) -> Box<dyn CountdownProvider> {
|
||||
fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box<dyn CheckTimer> {
|
||||
match timer_context {
|
||||
TimerContext::CheckLimit { .. } => {
|
||||
Box::new(TestCheckTimer::new(self.check_limit_expired_flag.clone()))
|
||||
|
@ -17,8 +17,6 @@ use alloc::boxed::Box;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::time::CountdownProvider;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod dest;
|
||||
#[cfg(feature = "alloc")]
|
||||
@ -47,15 +45,7 @@ pub enum TimerContext {
|
||||
},
|
||||
}
|
||||
|
||||
/// A generic trait which allows CFDP entities to create check timers which are required to
|
||||
/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2
|
||||
/// and 4.6.3.3.
|
||||
///
|
||||
/// This trait also allows the creation of different check timers depending on context and purpose
|
||||
/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or
|
||||
/// other factors.
|
||||
///
|
||||
/// The countdown timer is used by 3 mechanisms of the CFDP protocol.
|
||||
/// Generic abstraction for a check timer which is used by 3 mechanisms of the CFDP protocol.
|
||||
///
|
||||
/// ## 1. Check limit handling
|
||||
///
|
||||
@ -84,9 +74,22 @@ pub enum TimerContext {
|
||||
/// The timer will be used to perform the Positive Acknowledgement Procedures as specified in
|
||||
/// 4.7. 1of the CFDP standard. The expiration period will be provided by the Positive ACK timer
|
||||
/// interval of the remote entity configuration.
|
||||
pub trait CheckTimer: Debug {
|
||||
fn has_expired(&self) -> bool;
|
||||
fn reset(&mut self);
|
||||
}
|
||||
|
||||
/// A generic trait which allows CFDP entities to create check timers which are required to
|
||||
/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2
|
||||
/// and 4.6.3.3. The [CheckTimer] documentation provides more information about the purpose of the
|
||||
/// check timer in the context of CFDP.
|
||||
///
|
||||
/// This trait also allows the creation of different check timers depending on context and purpose
|
||||
/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or
|
||||
/// other factors.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait CheckTimerCreator {
|
||||
fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box<dyn CountdownProvider>;
|
||||
fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box<dyn CheckTimer>;
|
||||
}
|
||||
|
||||
/// Simple implementation of the [CheckTimerCreator] trait assuming a standard runtime.
|
||||
@ -109,7 +112,7 @@ impl StdCheckTimer {
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl CountdownProvider for StdCheckTimer {
|
||||
impl CheckTimer for StdCheckTimer {
|
||||
fn has_expired(&self) -> bool {
|
||||
let elapsed_time = self.start_time.elapsed();
|
||||
if elapsed_time.as_secs() > self.expiry_time_seconds {
|
||||
|
@ -1,163 +1,163 @@
|
||||
use spacepackets::{CcsdsPacket, SpHeader};
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::vec::Vec;
|
||||
#[cfg(feature = "alloc")]
|
||||
use hashbrown::HashSet;
|
||||
use spacepackets::PacketId;
|
||||
|
||||
use crate::{tmtc::PacketSenderRaw, ComponentId};
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum SpValidity {
|
||||
Valid,
|
||||
/// The space packet can be assumed to have a valid format, but the packet should
|
||||
/// be skipped.
|
||||
Skip,
|
||||
/// The space packet or space packet header has an invalid format, for example a CRC check
|
||||
/// failed. In that case, the parser loses the packet synchronization and needs to check for
|
||||
/// the start of a new space packet header start again. The space packet header
|
||||
/// [spacepackets::PacketId] can be used as a synchronization marker to detect the start
|
||||
/// of a possible valid packet again.
|
||||
Invalid,
|
||||
pub trait PacketIdLookup {
|
||||
fn validate(&self, packet_id: u16) -> bool;
|
||||
}
|
||||
|
||||
/// Simple trait to allow user code to check the validity of a space packet.
|
||||
pub trait SpacePacketValidator {
|
||||
fn validate(&self, sp_header: &SpHeader, raw_buf: &[u8]) -> SpValidity;
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for Vec<u16> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&packet_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, PartialEq, Eq)]
|
||||
pub struct ParseResult {
|
||||
pub packets_found: u32,
|
||||
/// If an incomplete space packet was found, its start index is indicated by this value.
|
||||
pub incomplete_tail_start: Option<usize>,
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for HashSet<u16> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&packet_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for [u16] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&packet_id).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for &[u16] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&packet_id).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for Vec<PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for HashSet<PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for [PacketId] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&PacketId::from(packet_id)).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for &[PacketId] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&PacketId::from(packet_id)).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// This function parses a given buffer for tightly packed CCSDS space packets. It uses the
|
||||
/// [spacepackets::SpHeader] of the CCSDS packets and a user provided [SpacePacketValidator]
|
||||
/// to check whether a received space packet is relevant for processing.
|
||||
/// [PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet and then
|
||||
/// uses the length field of the packet to extract CCSDS packets.
|
||||
///
|
||||
/// This function is also able to deal with broken tail packets at the end as long a the parser
|
||||
/// can read the full 7 bytes which constitue a space packet header plus one byte minimal size.
|
||||
/// If broken tail packets are detected, they are moved to the front of the buffer, and the write
|
||||
/// index for future write operations will be written to the `next_write_idx` argument.
|
||||
///
|
||||
/// The parses will behave differently based on the [SpValidity] returned from the user provided
|
||||
/// [SpacePacketValidator]:
|
||||
///
|
||||
/// 1. [SpValidity::Valid]: The parser will forward all packets to the given `packet_sender` and
|
||||
/// return the number of packets found.If the [PacketSenderRaw::send_packet] calls fails, the
|
||||
/// error will be returned.
|
||||
/// 2. [SpValidity::Invalid]: The parser assumes that the synchronization is lost and tries to
|
||||
/// find the start of a new space packet header by scanning all the following bytes.
|
||||
/// 3. [SpValidity::Skip]: The parser skips the packet using the packet length determined from the
|
||||
/// space packet header.
|
||||
pub fn parse_buffer_for_ccsds_space_packets<SendError>(
|
||||
buf: &[u8],
|
||||
packet_validator: &(impl SpacePacketValidator + ?Sized),
|
||||
sender_id: ComponentId,
|
||||
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
|
||||
) -> Result<ParseResult, SendError> {
|
||||
let mut parse_result = ParseResult::default();
|
||||
/// The parser will write all packets which were decoded successfully to the given `tc_receiver`
|
||||
/// and return the number of packets found. If the [ReceivesTcCore::pass_tc] calls fails, the
|
||||
/// error will be returned.
|
||||
pub fn parse_buffer_for_ccsds_space_packets<E>(
|
||||
buf: &mut [u8],
|
||||
packet_id_lookup: &(impl PacketIdLookup + ?Sized),
|
||||
tc_receiver: &mut (impl ReceivesTcCore<Error = E> + ?Sized),
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<u32, E> {
|
||||
*next_write_idx = 0;
|
||||
let mut packets_found = 0;
|
||||
let mut current_idx = 0;
|
||||
let buf_len = buf.len();
|
||||
loop {
|
||||
if current_idx + 7 > buf.len() {
|
||||
if current_idx + 7 >= buf.len() {
|
||||
break;
|
||||
}
|
||||
let sp_header = SpHeader::from_be_bytes(&buf[current_idx..]).unwrap().0;
|
||||
match packet_validator.validate(&sp_header, &buf[current_idx..]) {
|
||||
SpValidity::Valid => {
|
||||
let packet_size = sp_header.total_len();
|
||||
if (current_idx + packet_size) <= buf_len {
|
||||
packet_sender
|
||||
.send_packet(sender_id, &buf[current_idx..current_idx + packet_size])?;
|
||||
parse_result.packets_found += 1;
|
||||
} else {
|
||||
// Move packet to start of buffer if applicable.
|
||||
parse_result.incomplete_tail_start = Some(current_idx);
|
||||
let packet_id = u16::from_be_bytes(buf[current_idx..current_idx + 2].try_into().unwrap());
|
||||
if packet_id_lookup.validate(packet_id) {
|
||||
let length_field =
|
||||
u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap());
|
||||
let packet_size = length_field + 7;
|
||||
if (current_idx + packet_size as usize) <= buf_len {
|
||||
tc_receiver.pass_tc(&buf[current_idx..current_idx + packet_size as usize])?;
|
||||
packets_found += 1;
|
||||
} else {
|
||||
// Move packet to start of buffer if applicable.
|
||||
if current_idx > 0 {
|
||||
buf.copy_within(current_idx.., 0);
|
||||
*next_write_idx = buf.len() - current_idx;
|
||||
}
|
||||
current_idx += packet_size;
|
||||
continue;
|
||||
}
|
||||
SpValidity::Skip => {
|
||||
current_idx += sp_header.total_len();
|
||||
}
|
||||
// We might have lost sync. Try to find the start of a new space packet header.
|
||||
SpValidity::Invalid => {
|
||||
current_idx += 1;
|
||||
}
|
||||
current_idx += packet_size as usize;
|
||||
continue;
|
||||
}
|
||||
current_idx += 1;
|
||||
}
|
||||
Ok(parse_result)
|
||||
Ok(packets_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, WritablePusPacket},
|
||||
CcsdsPacket, PacketId, PacketSequenceCtrl, PacketType, SequenceFlags, SpHeader,
|
||||
PacketId, SpHeader,
|
||||
};
|
||||
|
||||
use crate::{encoding::tests::TcCacher, ComponentId};
|
||||
use crate::encoding::tests::TcCacher;
|
||||
|
||||
use super::{parse_buffer_for_ccsds_space_packets, SpValidity, SpacePacketValidator};
|
||||
use super::parse_buffer_for_ccsds_space_packets;
|
||||
|
||||
const PARSER_ID: ComponentId = 0x05;
|
||||
const TEST_APID_0: u16 = 0x02;
|
||||
const TEST_APID_1: u16 = 0x10;
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
|
||||
const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
|
||||
|
||||
#[derive(Default)]
|
||||
struct SimpleVerificator {
|
||||
pub enable_second_id: bool,
|
||||
}
|
||||
|
||||
impl SimpleVerificator {
|
||||
pub fn new_with_second_id() -> Self {
|
||||
Self {
|
||||
enable_second_id: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SpacePacketValidator for SimpleVerificator {
|
||||
fn validate(&self, sp_header: &SpHeader, _raw_buf: &[u8]) -> super::SpValidity {
|
||||
if sp_header.packet_id() == TEST_PACKET_ID_0
|
||||
|| (self.enable_second_id && sp_header.packet_id() == TEST_PACKET_ID_1)
|
||||
{
|
||||
return SpValidity::Valid;
|
||||
}
|
||||
SpValidity::Skip
|
||||
}
|
||||
}
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0);
|
||||
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1);
|
||||
|
||||
#[test]
|
||||
fn test_basic() {
|
||||
let sph = SpHeader::new_from_apid(TEST_APID_0);
|
||||
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let tc_cacher = TcCacher::default();
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&buffer,
|
||||
&SimpleVerificator::default(),
|
||||
PARSER_ID,
|
||||
&tc_cacher,
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parse_result = parse_result.unwrap();
|
||||
assert_eq!(parse_result.packets_found, 1);
|
||||
let mut queue = tc_cacher.tc_queue.borrow_mut();
|
||||
assert_eq!(queue.len(), 1);
|
||||
let packet_with_sender = queue.pop_front().unwrap();
|
||||
assert_eq!(packet_with_sender.packet, buffer[..packet_len]);
|
||||
assert_eq!(packet_with_sender.sender_id, PARSER_ID);
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 1);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 1);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_packet() {
|
||||
let sph = SpHeader::new_from_apid(TEST_APID_0);
|
||||
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
|
||||
let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
@ -165,35 +165,35 @@ mod tests {
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let tc_cacher = TcCacher::default();
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&buffer,
|
||||
&SimpleVerificator::default(),
|
||||
PARSER_ID,
|
||||
&tc_cacher,
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parse_result = parse_result.unwrap();
|
||||
assert_eq!(parse_result.packets_found, 2);
|
||||
let mut queue = tc_cacher.tc_queue.borrow_mut();
|
||||
assert_eq!(queue.len(), 2);
|
||||
let packet_with_addr = queue.pop_front().unwrap();
|
||||
assert_eq!(packet_with_addr.packet, buffer[..packet_len_ping]);
|
||||
assert_eq!(packet_with_addr.sender_id, PARSER_ID);
|
||||
let packet_with_addr = queue.pop_front().unwrap();
|
||||
assert_eq!(packet_with_addr.sender_id, PARSER_ID);
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 2);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 2);
|
||||
assert_eq!(
|
||||
packet_with_addr.packet,
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len_ping]
|
||||
);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[packet_len_ping..packet_len_ping + packet_len_action]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_apid() {
|
||||
let sph = SpHeader::new_from_apid(TEST_APID_0);
|
||||
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
|
||||
let sph = SpHeader::new_from_apid(TEST_APID_1);
|
||||
let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
@ -201,30 +201,35 @@ mod tests {
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let tc_cacher = TcCacher::default();
|
||||
let verificator = SimpleVerificator::new_with_second_id();
|
||||
let parse_result =
|
||||
parse_buffer_for_ccsds_space_packets(&buffer, &verificator, PARSER_ID, &tc_cacher);
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parse_result = parse_result.unwrap();
|
||||
assert_eq!(parse_result.packets_found, 2);
|
||||
let mut queue = tc_cacher.tc_queue.borrow_mut();
|
||||
assert_eq!(queue.len(), 2);
|
||||
let packet_with_addr = queue.pop_front().unwrap();
|
||||
assert_eq!(packet_with_addr.packet, buffer[..packet_len_ping]);
|
||||
let packet_with_addr = queue.pop_front().unwrap();
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 2);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 2);
|
||||
assert_eq!(
|
||||
packet_with_addr.packet,
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len_ping]
|
||||
);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[packet_len_ping..packet_len_ping + packet_len_action]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_packet_multi() {
|
||||
let ping_tc =
|
||||
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
|
||||
let action_tc =
|
||||
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
@ -232,68 +237,45 @@ mod tests {
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let tc_cacher = TcCacher::default();
|
||||
let verificator = SimpleVerificator::new_with_second_id();
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&buffer[..packet_len_ping + packet_len_action - 4],
|
||||
&verificator,
|
||||
PARSER_ID,
|
||||
&tc_cacher,
|
||||
&mut buffer[..packet_len_ping + packet_len_action - 4],
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parse_result = parse_result.unwrap();
|
||||
assert_eq!(parse_result.packets_found, 1);
|
||||
assert!(parse_result.incomplete_tail_start.is_some());
|
||||
let incomplete_tail_idx = parse_result.incomplete_tail_start.unwrap();
|
||||
assert_eq!(incomplete_tail_idx, packet_len_ping);
|
||||
|
||||
let queue = tc_cacher.tc_queue.borrow();
|
||||
assert_eq!(queue.len(), 1);
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 1);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 1);
|
||||
// The broken packet was moved to the start, so the next write index should be after the
|
||||
// last segment missing 4 bytes.
|
||||
assert_eq!(next_write_idx, packet_len_action - 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_split_packet() {
|
||||
let ping_tc =
|
||||
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let tc_cacher = TcCacher::default();
|
||||
|
||||
let verificator = SimpleVerificator::new_with_second_id();
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&buffer[..packet_len_ping - 4],
|
||||
&verificator,
|
||||
PARSER_ID,
|
||||
&tc_cacher,
|
||||
&mut buffer[..packet_len_ping - 4],
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert_eq!(next_write_idx, 0);
|
||||
assert!(parse_result.is_ok());
|
||||
let parse_result = parse_result.unwrap();
|
||||
assert_eq!(parse_result.packets_found, 0);
|
||||
let queue = tc_cacher.tc_queue.borrow();
|
||||
assert_eq!(queue.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_smallest_packet() {
|
||||
let ccsds_header_only = SpHeader::new(
|
||||
PacketId::new(PacketType::Tc, true, TEST_APID_0),
|
||||
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0),
|
||||
0,
|
||||
);
|
||||
let mut buf: [u8; 7] = [0; 7];
|
||||
ccsds_header_only
|
||||
.write_to_be_bytes(&mut buf)
|
||||
.expect("writing failed");
|
||||
let verificator = SimpleVerificator::default();
|
||||
let tc_cacher = TcCacher::default();
|
||||
let parse_result =
|
||||
parse_buffer_for_ccsds_space_packets(&buf, &verificator, PARSER_ID, &tc_cacher);
|
||||
assert!(parse_result.is_ok());
|
||||
let parse_result = parse_result.unwrap();
|
||||
assert_eq!(parse_result.packets_found, 1);
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 0);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 0);
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,4 @@
|
||||
use crate::{tmtc::PacketSenderRaw, ComponentId};
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
use cobs::{decode_in_place, encode, max_encoding_length};
|
||||
|
||||
/// This function encodes the given packet with COBS and also wraps the encoded packet with
|
||||
@ -55,12 +55,11 @@ pub fn encode_packet_with_cobs(
|
||||
/// future write operations will be written to the `next_write_idx` argument.
|
||||
///
|
||||
/// The parser will write all packets which were decoded successfully to the given `tc_receiver`.
|
||||
pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
|
||||
pub fn parse_buffer_for_cobs_encoded_packets<E>(
|
||||
buf: &mut [u8],
|
||||
sender_id: ComponentId,
|
||||
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
|
||||
tc_receiver: &mut dyn ReceivesTcCore<Error = E>,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<u32, SendError> {
|
||||
) -> Result<u32, E> {
|
||||
let mut start_index_packet = 0;
|
||||
let mut start_found = false;
|
||||
let mut last_byte = false;
|
||||
@ -79,10 +78,8 @@ pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
|
||||
let decode_result = decode_in_place(&mut buf[start_index_packet..i]);
|
||||
if let Ok(packet_len) = decode_result {
|
||||
packets_found += 1;
|
||||
packet_sender.send_packet(
|
||||
sender_id,
|
||||
&buf[start_index_packet..start_index_packet + packet_len],
|
||||
)?;
|
||||
tc_receiver
|
||||
.pass_tc(&buf[start_index_packet..start_index_packet + packet_len])?;
|
||||
}
|
||||
start_found = false;
|
||||
} else {
|
||||
@ -103,39 +100,32 @@ pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
|
||||
pub(crate) mod tests {
|
||||
use cobs::encode;
|
||||
|
||||
use crate::{
|
||||
encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET},
|
||||
ComponentId,
|
||||
};
|
||||
use crate::encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET};
|
||||
|
||||
use super::parse_buffer_for_cobs_encoded_packets;
|
||||
|
||||
const PARSER_ID: ComponentId = 0x05;
|
||||
|
||||
#[test]
|
||||
fn test_parsing_simple_packet() {
|
||||
let test_sender = TcCacher::default();
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut next_read_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
&mut encoded_buf[0..current_idx],
|
||||
PARSER_ID,
|
||||
&test_sender,
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
let queue = test_sender.tc_queue.borrow();
|
||||
assert_eq!(queue.len(), 1);
|
||||
let packet = &queue[0];
|
||||
assert_eq!(packet.packet, &SIMPLE_PACKET);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
let packet = &test_sender.tc_queue[0];
|
||||
assert_eq!(packet, &SIMPLE_PACKET);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsing_consecutive_packets() {
|
||||
let test_sender = TcCacher::default();
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
@ -149,23 +139,21 @@ pub(crate) mod tests {
|
||||
let mut next_read_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
&mut encoded_buf[0..current_idx],
|
||||
PARSER_ID,
|
||||
&test_sender,
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 2);
|
||||
let queue = test_sender.tc_queue.borrow();
|
||||
assert_eq!(queue.len(), 2);
|
||||
let packet0 = &queue[0];
|
||||
assert_eq!(packet0.packet, &SIMPLE_PACKET);
|
||||
let packet1 = &queue[1];
|
||||
assert_eq!(packet1.packet, &INVERTED_PACKET);
|
||||
assert_eq!(test_sender.tc_queue.len(), 2);
|
||||
let packet0 = &test_sender.tc_queue[0];
|
||||
assert_eq!(packet0, &SIMPLE_PACKET);
|
||||
let packet1 = &test_sender.tc_queue[1];
|
||||
assert_eq!(packet1, &INVERTED_PACKET);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_tail_packet_only() {
|
||||
let test_sender = TcCacher::default();
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
@ -173,19 +161,17 @@ pub(crate) mod tests {
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx - 1],
|
||||
PARSER_ID,
|
||||
&test_sender,
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 0);
|
||||
let queue = test_sender.tc_queue.borrow();
|
||||
assert_eq!(queue.len(), 0);
|
||||
assert_eq!(test_sender.tc_queue.len(), 0);
|
||||
assert_eq!(next_read_idx, 0);
|
||||
}
|
||||
|
||||
fn generic_test_split_packet(cut_off: usize) {
|
||||
let test_sender = TcCacher::default();
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
assert!(cut_off < INVERTED_PACKET.len() + 1);
|
||||
let mut current_idx = 0;
|
||||
@ -207,15 +193,13 @@ pub(crate) mod tests {
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx - cut_off],
|
||||
PARSER_ID,
|
||||
&test_sender,
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
let queue = test_sender.tc_queue.borrow();
|
||||
assert_eq!(queue.len(), 1);
|
||||
assert_eq!(&queue[0].packet, &SIMPLE_PACKET);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
assert_eq!(&test_sender.tc_queue[0], &SIMPLE_PACKET);
|
||||
assert_eq!(next_write_idx, next_expected_write_idx);
|
||||
assert_eq!(encoded_buf[..next_expected_write_idx], expected_at_start);
|
||||
}
|
||||
@ -237,7 +221,7 @@ pub(crate) mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_zero_at_end() {
|
||||
let test_sender = TcCacher::default();
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut next_write_idx = 0;
|
||||
let mut current_idx = 0;
|
||||
@ -249,35 +233,31 @@ pub(crate) mod tests {
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx],
|
||||
PARSER_ID,
|
||||
&test_sender,
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
let queue = test_sender.tc_queue.borrow_mut();
|
||||
assert_eq!(queue.len(), 1);
|
||||
assert_eq!(&queue[0].packet, &SIMPLE_PACKET);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
assert_eq!(&test_sender.tc_queue[0], &SIMPLE_PACKET);
|
||||
assert_eq!(next_write_idx, 1);
|
||||
assert_eq!(encoded_buf[0], 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_zeroes() {
|
||||
let test_sender = TcCacher::default();
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut all_zeroes: [u8; 5] = [0; 5];
|
||||
let mut next_write_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut all_zeroes,
|
||||
PARSER_ID,
|
||||
&test_sender,
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 0);
|
||||
let queue = test_sender.tc_queue.borrow();
|
||||
assert!(queue.is_empty());
|
||||
assert!(test_sender.tc_queue.is_empty());
|
||||
assert_eq!(next_write_idx, 0);
|
||||
}
|
||||
}
|
||||
|
@ -6,14 +6,9 @@ pub use crate::encoding::cobs::{encode_packet_with_cobs, parse_buffer_for_cobs_e
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use core::cell::RefCell;
|
||||
use alloc::{collections::VecDeque, vec::Vec};
|
||||
|
||||
use alloc::collections::VecDeque;
|
||||
|
||||
use crate::{
|
||||
tmtc::{PacketAsVec, PacketSenderRaw},
|
||||
ComponentId,
|
||||
};
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
|
||||
use super::cobs::encode_packet_with_cobs;
|
||||
|
||||
@ -22,15 +17,14 @@ pub(crate) mod tests {
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct TcCacher {
|
||||
pub(crate) tc_queue: RefCell<VecDeque<PacketAsVec>>,
|
||||
pub(crate) tc_queue: VecDeque<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl PacketSenderRaw for TcCacher {
|
||||
impl ReceivesTcCore for TcCacher {
|
||||
type Error = ();
|
||||
|
||||
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut mut_queue = self.tc_queue.borrow_mut();
|
||||
mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
self.tc_queue.push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -20,12 +20,12 @@
|
||||
//! ```
|
||||
//! use satrs::events::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
|
||||
//!
|
||||
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(1, 0);
|
||||
//! const MSG_FAILED: EventU32 = EventU32::new(Severity::Low, 1, 1);
|
||||
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(1, 0);
|
||||
//! const MSG_FAILED: EventU32 = EventU32::const_new(Severity::LOW, 1, 1);
|
||||
//!
|
||||
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(2, 0);
|
||||
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::const_new(2, 0);
|
||||
//!
|
||||
//! let small_event = EventU16::new(Severity::Info, 3, 0);
|
||||
//! let small_event = EventU16::new(Severity::INFO, 3, 0);
|
||||
//! ```
|
||||
use core::fmt::Debug;
|
||||
use core::hash::Hash;
|
||||
@ -40,17 +40,12 @@ pub type LargestEventRaw = u32;
|
||||
/// Using a type definition allows to change this to u32 in the future more easily
|
||||
pub type LargestGroupIdRaw = u16;
|
||||
|
||||
pub const MAX_GROUP_ID_U32_EVENT: u16 = 2_u16.pow(14) - 1;
|
||||
pub const MAX_GROUP_ID_U16_EVENT: u16 = 2_u16.pow(6) - 1;
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
pub enum Severity {
|
||||
Info = 0,
|
||||
Low = 1,
|
||||
Medium = 2,
|
||||
High = 3,
|
||||
INFO = 0,
|
||||
LOW = 1,
|
||||
MEDIUM = 2,
|
||||
HIGH = 3,
|
||||
}
|
||||
|
||||
pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
|
||||
@ -61,31 +56,31 @@ pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct SeverityInfo {}
|
||||
impl HasSeverity for SeverityInfo {
|
||||
const SEVERITY: Severity = Severity::Info;
|
||||
const SEVERITY: Severity = Severity::INFO;
|
||||
}
|
||||
|
||||
/// Type level support struct
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct SeverityLow {}
|
||||
impl HasSeverity for SeverityLow {
|
||||
const SEVERITY: Severity = Severity::Low;
|
||||
const SEVERITY: Severity = Severity::LOW;
|
||||
}
|
||||
|
||||
/// Type level support struct
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct SeverityMedium {}
|
||||
impl HasSeverity for SeverityMedium {
|
||||
const SEVERITY: Severity = Severity::Medium;
|
||||
const SEVERITY: Severity = Severity::MEDIUM;
|
||||
}
|
||||
|
||||
/// Type level support struct
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub struct SeverityHigh {}
|
||||
impl HasSeverity for SeverityHigh {
|
||||
const SEVERITY: Severity = Severity::High;
|
||||
const SEVERITY: Severity = Severity::HIGH;
|
||||
}
|
||||
|
||||
pub trait GenericEvent: EcssEnumeration + Copy + Clone {
|
||||
pub trait GenericEvent: EcssEnumeration {
|
||||
type Raw;
|
||||
type GroupId;
|
||||
type UniqueId;
|
||||
@ -104,29 +99,27 @@ impl TryFrom<u8> for Severity {
|
||||
|
||||
fn try_from(value: u8) -> Result<Self, Self::Error> {
|
||||
match value {
|
||||
x if x == Severity::Info as u8 => Ok(Severity::Info),
|
||||
x if x == Severity::Low as u8 => Ok(Severity::Low),
|
||||
x if x == Severity::Medium as u8 => Ok(Severity::Medium),
|
||||
x if x == Severity::High as u8 => Ok(Severity::High),
|
||||
x if x == Severity::INFO as u8 => Ok(Severity::INFO),
|
||||
x if x == Severity::LOW as u8 => Ok(Severity::LOW),
|
||||
x if x == Severity::MEDIUM as u8 => Ok(Severity::MEDIUM),
|
||||
x if x == Severity::HIGH as u8 => Ok(Severity::HIGH),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
struct EventBase<Raw, GroupId, UniqueId> {
|
||||
struct EventBase<RAW, GID, UID> {
|
||||
severity: Severity,
|
||||
group_id: GroupId,
|
||||
unique_id: UniqueId,
|
||||
phantom: PhantomData<Raw>,
|
||||
group_id: GID,
|
||||
unique_id: UID,
|
||||
phantom: PhantomData<RAW>,
|
||||
}
|
||||
|
||||
impl<Raw: ToBeBytes, GroupId, UniqueId> EventBase<Raw, GroupId, UniqueId> {
|
||||
impl<RAW: ToBeBytes, GID, UID> EventBase<RAW, GID, UID> {
|
||||
fn write_to_bytes(
|
||||
&self,
|
||||
raw: Raw,
|
||||
raw: RAW,
|
||||
buf: &mut [u8],
|
||||
width: usize,
|
||||
) -> Result<usize, ByteConversionError> {
|
||||
@ -274,7 +267,6 @@ macro_rules! const_from_fn {
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
pub struct EventU32 {
|
||||
base: EventBase<u32, u16, u16>,
|
||||
}
|
||||
@ -317,12 +309,12 @@ impl EventU32 {
|
||||
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
|
||||
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
|
||||
/// raw event ID
|
||||
pub fn new_checked(
|
||||
pub fn new(
|
||||
severity: Severity,
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
if group_id > MAX_GROUP_ID_U32_EVENT {
|
||||
if group_id > (2u16.pow(14) - 1) {
|
||||
return None;
|
||||
}
|
||||
Some(Self {
|
||||
@ -334,14 +326,12 @@ impl EventU32 {
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
/// This constructor will panic if the passed group is is larger than [MAX_GROUP_ID_U32_EVENT].
|
||||
pub const fn new(
|
||||
pub const fn const_new(
|
||||
severity: Severity,
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
if group_id > MAX_GROUP_ID_U32_EVENT {
|
||||
if group_id > (2u16.pow(14) - 1) {
|
||||
panic!("Group ID too large");
|
||||
}
|
||||
Self {
|
||||
@ -354,16 +344,50 @@ impl EventU32 {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn from_be_bytes(bytes: [u8; 4]) -> Self {
|
||||
Self::from(u32::from_be_bytes(bytes))
|
||||
}
|
||||
|
||||
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
|
||||
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
|
||||
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
|
||||
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
|
||||
}
|
||||
|
||||
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
|
||||
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
|
||||
/// have distinct types for events with different severities
|
||||
pub fn new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id)?;
|
||||
Some(Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Const version of [Self::new], but panics on invalid group ID input values.
|
||||
pub const fn const_new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
let event = EventU32::const_new(SEVERITY::SEVERITY, group_id, unique_id);
|
||||
Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
|
||||
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
|
||||
if severity != expected {
|
||||
return Err(severity);
|
||||
}
|
||||
Ok(Self::const_new(
|
||||
((raw >> 16) & 0x3FFF) as u16,
|
||||
(raw & 0xFFFF) as u16,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<u32> for EventU32 {
|
||||
fn from(raw: u32) -> Self {
|
||||
// Severity conversion from u8 should never fail
|
||||
@ -371,10 +395,15 @@ impl From<u32> for EventU32 {
|
||||
let group_id = ((raw >> 16) & 0x3FFF) as u16;
|
||||
let unique_id = (raw & 0xFFFF) as u16;
|
||||
// Sanitized input, should never fail
|
||||
Self::new(severity, group_id, unique_id)
|
||||
Self::const_new(severity, group_id, unique_id)
|
||||
}
|
||||
}
|
||||
|
||||
try_from_impls!(SeverityInfo, Severity::INFO, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityLow, Severity::LOW, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityMedium, Severity::MEDIUM, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityHigh, Severity::HIGH, u32, EventU32TypedSev);
|
||||
|
||||
impl UnsignedEnum for EventU32 {
|
||||
fn size(&self) -> usize {
|
||||
core::mem::size_of::<u32>()
|
||||
@ -395,49 +424,6 @@ impl EcssEnumeration for EventU32 {
|
||||
}
|
||||
}
|
||||
|
||||
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
|
||||
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
|
||||
/// have distinct types for events with different severities
|
||||
pub fn new_checked(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
let event = EventU32::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
|
||||
Some(Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U32_EVENT].
|
||||
pub const fn new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id);
|
||||
Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
|
||||
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
|
||||
if severity != expected {
|
||||
return Err(severity);
|
||||
}
|
||||
Ok(Self::new(
|
||||
((raw >> 16) & 0x3FFF) as u16,
|
||||
(raw & 0xFFFF) as u16,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
try_from_impls!(SeverityInfo, Severity::Info, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityLow, Severity::Low, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityMedium, Severity::Medium, u32, EventU32TypedSev);
|
||||
try_from_impls!(SeverityHigh, Severity::High, u32, EventU32TypedSev);
|
||||
|
||||
//noinspection RsTraitImplementation
|
||||
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
|
||||
delegate!(to self.event {
|
||||
@ -455,8 +441,6 @@ impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
pub struct EventU16 {
|
||||
base: EventBase<u16, u8, u8>,
|
||||
}
|
||||
@ -491,7 +475,7 @@ impl EventU16 {
|
||||
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
|
||||
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
|
||||
/// raw event ID
|
||||
pub fn new_checked(
|
||||
pub fn new(
|
||||
severity: Severity,
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
@ -509,8 +493,8 @@ impl EventU16 {
|
||||
})
|
||||
}
|
||||
|
||||
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
|
||||
pub const fn new(
|
||||
/// Const version of [Self::new], but panics on invalid group ID input values.
|
||||
pub const fn const_new(
|
||||
severity: Severity,
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
@ -527,26 +511,52 @@ impl EventU16 {
|
||||
},
|
||||
}
|
||||
}
|
||||
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
|
||||
Self::from(u16::from_be_bytes(bytes))
|
||||
}
|
||||
|
||||
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
|
||||
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
|
||||
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
|
||||
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
|
||||
}
|
||||
|
||||
impl From<u16> for EventU16 {
|
||||
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
|
||||
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
|
||||
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
|
||||
/// have distinct types for events with different severities
|
||||
pub fn new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id)?;
|
||||
Some(Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Const version of [Self::new], but panics on invalid group ID input values.
|
||||
pub const fn const_new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
let event = EventU16::const_new(SEVERITY::SEVERITY, group_id, unique_id);
|
||||
Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
|
||||
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
|
||||
let group_id = ((raw >> 8) & 0x3F) as u8;
|
||||
let unique_id = (raw & 0xFF) as u8;
|
||||
// Sanitized input, new call should never fail
|
||||
Self::new(severity, group_id, unique_id)
|
||||
if severity != expected {
|
||||
return Err(severity);
|
||||
}
|
||||
Ok(Self::const_new(
|
||||
((raw >> 8) & 0x3F) as u8,
|
||||
(raw & 0xFF) as u8,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
|
||||
|
||||
impl UnsignedEnum for EventU16 {
|
||||
fn size(&self) -> usize {
|
||||
core::mem::size_of::<u16>()
|
||||
@ -567,43 +577,6 @@ impl EcssEnumeration for EventU16 {
|
||||
}
|
||||
}
|
||||
|
||||
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
|
||||
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
|
||||
/// have distinct types for events with different severities
|
||||
pub fn new_checked(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Option<Self> {
|
||||
let event = EventU16::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
|
||||
Some(Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
|
||||
pub const fn new(
|
||||
group_id: <Self as GenericEvent>::GroupId,
|
||||
unique_id: <Self as GenericEvent>::UniqueId,
|
||||
) -> Self {
|
||||
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id);
|
||||
Self {
|
||||
event,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
|
||||
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
|
||||
if severity != expected {
|
||||
return Err(severity);
|
||||
}
|
||||
Ok(Self::new(((raw >> 8) & 0x3F) as u8, (raw & 0xFF) as u8))
|
||||
}
|
||||
}
|
||||
|
||||
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
|
||||
|
||||
//noinspection RsTraitImplementation
|
||||
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
|
||||
delegate!(to self.event {
|
||||
@ -620,10 +593,20 @@ impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
|
||||
});
|
||||
}
|
||||
|
||||
try_from_impls!(SeverityInfo, Severity::Info, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityLow, Severity::Low, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityMedium, Severity::Medium, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityHigh, Severity::High, u16, EventU16TypedSev);
|
||||
impl From<u16> for EventU16 {
|
||||
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
|
||||
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
|
||||
let group_id = ((raw >> 8) & 0x3F) as u8;
|
||||
let unique_id = (raw & 0xFF) as u8;
|
||||
// Sanitized input, new call should never fail
|
||||
Self::const_new(severity, group_id, unique_id)
|
||||
}
|
||||
}
|
||||
|
||||
try_from_impls!(SeverityInfo, Severity::INFO, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityLow, Severity::LOW, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityMedium, Severity::MEDIUM, u16, EventU16TypedSev);
|
||||
try_from_impls!(SeverityHigh, Severity::HIGH, u16, EventU16TypedSev);
|
||||
|
||||
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
|
||||
#[inline]
|
||||
@ -664,10 +647,12 @@ mod tests {
|
||||
assert_eq!(size_of::<T>(), val);
|
||||
}
|
||||
|
||||
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
|
||||
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::new(0, 0);
|
||||
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(0x3FFF, 0xFFFF);
|
||||
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> = EventU16TypedSev::new(0x3F, 0xff);
|
||||
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::const_new(0, 0);
|
||||
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::const_new(0, 0);
|
||||
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> =
|
||||
EventU32TypedSev::const_new(0x3FFF, 0xFFFF);
|
||||
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> =
|
||||
EventU16TypedSev::const_new(0x3F, 0xff);
|
||||
|
||||
/// This working is a test in itself.
|
||||
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
|
||||
@ -698,7 +683,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_normal_event_getters() {
|
||||
assert_eq!(INFO_EVENT.severity(), Severity::Info);
|
||||
assert_eq!(INFO_EVENT.severity(), Severity::INFO);
|
||||
assert_eq!(INFO_EVENT.unique_id(), 0);
|
||||
assert_eq!(INFO_EVENT.group_id(), 0);
|
||||
let raw_event = INFO_EVENT.raw();
|
||||
@ -707,7 +692,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_small_event_getters() {
|
||||
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::Info);
|
||||
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::INFO);
|
||||
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
|
||||
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
|
||||
let raw_event = INFO_EVENT_SMALL.raw();
|
||||
@ -716,7 +701,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn all_ones_event_regular() {
|
||||
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::High);
|
||||
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::HIGH);
|
||||
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
|
||||
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
|
||||
let raw_event = HIGH_SEV_EVENT.raw();
|
||||
@ -725,7 +710,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn all_ones_event_small() {
|
||||
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::High);
|
||||
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::HIGH);
|
||||
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
|
||||
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
|
||||
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
|
||||
@ -734,19 +719,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn invalid_group_id_normal() {
|
||||
assert!(EventU32TypedSev::<SeverityMedium>::new_checked(2_u16.pow(14), 0).is_none());
|
||||
assert!(EventU32TypedSev::<SeverityMedium>::new(2_u16.pow(14), 0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_group_id_small() {
|
||||
assert!(EventU16TypedSev::<SeverityMedium>::new_checked(2_u8.pow(6), 0).is_none());
|
||||
assert!(EventU16TypedSev::<SeverityMedium>::new(2_u8.pow(6), 0).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn regular_new() {
|
||||
assert_eq!(
|
||||
EventU32TypedSev::<SeverityInfo>::new_checked(0, 0)
|
||||
.expect("Creating regular event failed"),
|
||||
EventU32TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"),
|
||||
INFO_EVENT
|
||||
);
|
||||
}
|
||||
@ -754,8 +738,7 @@ mod tests {
|
||||
#[test]
|
||||
fn small_new() {
|
||||
assert_eq!(
|
||||
EventU16TypedSev::<SeverityInfo>::new_checked(0, 0)
|
||||
.expect("Creating regular event failed"),
|
||||
EventU16TypedSev::<SeverityInfo>::new(0, 0).expect("Creating regular event failed"),
|
||||
INFO_EVENT_SMALL
|
||||
);
|
||||
}
|
||||
@ -794,8 +777,6 @@ mod tests {
|
||||
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
|
||||
let val_from_raw = u32::from_be_bytes(buf);
|
||||
assert_eq!(val_from_raw, 0xFFFFFFFF);
|
||||
let event_read_back = EventU32::from_be_bytes(buf);
|
||||
assert_eq!(event_read_back, HIGH_SEV_EVENT);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -804,8 +785,6 @@ mod tests {
|
||||
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
|
||||
let val_from_raw = u16::from_be_bytes(buf);
|
||||
assert_eq!(val_from_raw, 0xFFFF);
|
||||
let event_read_back = EventU16::from_be_bytes(buf);
|
||||
assert_eq!(event_read_back, HIGH_SEV_EVENT_SMALL);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -836,13 +815,13 @@ mod tests {
|
||||
fn severity_from_invalid_raw_val() {
|
||||
let invalid = 0xFF;
|
||||
assert!(Severity::try_from(invalid).is_err());
|
||||
let invalid = Severity::High as u8 + 1;
|
||||
let invalid = Severity::HIGH as u8 + 1;
|
||||
assert!(Severity::try_from(invalid).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reduction() {
|
||||
let event = EventU32TypedSev::<SeverityInfo>::new(1, 1);
|
||||
let event = EventU32TypedSev::<SeverityInfo>::const_new(1, 1);
|
||||
let raw = event.raw();
|
||||
let reduced: EventU32 = event.into();
|
||||
assert_eq!(reduced.group_id(), 1);
|
||||
|
@ -1,3 +1,4 @@
|
||||
//! # Hardware Abstraction Layer module
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod std;
|
||||
|
@ -1,25 +1,19 @@
|
||||
use alloc::sync::Arc;
|
||||
use alloc::vec;
|
||||
use cobs::encode;
|
||||
use core::sync::atomic::AtomicBool;
|
||||
use core::time::Duration;
|
||||
use delegate::delegate;
|
||||
use mio::net::{TcpListener, TcpStream};
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::TcpListener;
|
||||
use std::net::TcpStream;
|
||||
use std::vec::Vec;
|
||||
|
||||
use crate::encoding::parse_buffer_for_cobs_encoded_packets;
|
||||
use crate::tmtc::PacketSenderRaw;
|
||||
use crate::tmtc::PacketSource;
|
||||
use crate::tmtc::ReceivesTc;
|
||||
use crate::tmtc::TmPacketSource;
|
||||
|
||||
use crate::hal::std::tcp_server::{
|
||||
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
|
||||
};
|
||||
use crate::ComponentId;
|
||||
|
||||
use super::tcp_server::HandledConnectionHandler;
|
||||
use super::tcp_server::HandledConnectionInfo;
|
||||
|
||||
/// Concrete [TcpTcParser] implementation for the [TcpTmtcInCobsServer].
|
||||
#[derive(Default)]
|
||||
@ -29,16 +23,14 @@ impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for CobsTcParser {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
sender_id: ComponentId,
|
||||
tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized),
|
||||
conn_result: &mut HandledConnectionInfo,
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>> {
|
||||
conn_result.num_received_tcs += parse_buffer_for_cobs_encoded_packets(
|
||||
&mut tc_buffer[..current_write_idx],
|
||||
sender_id,
|
||||
tc_sender,
|
||||
tc_receiver.upcast_mut(),
|
||||
next_write_idx,
|
||||
)
|
||||
.map_err(|e| TcpTmtcError::TcError(e))?;
|
||||
@ -65,8 +57,8 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut HandledConnectionInfo,
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>> {
|
||||
let mut tm_was_sent = false;
|
||||
@ -104,7 +96,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
|
||||
/// Telemetry will be encoded with the COBS protocol using [cobs::encode] in addition to being
|
||||
/// wrapped with the sentinel value 0 as the packet delimiter as well before being sent back to
|
||||
/// the client. Please note that the server will send as much data as it can retrieve from the
|
||||
/// [PacketSource] in its current implementation.
|
||||
/// [TmPacketSource] in its current implementation.
|
||||
///
|
||||
/// Using a framing protocol like COBS imposes minimal restrictions on the type of TMTC data
|
||||
/// exchanged while also allowing packets with flexible size and a reliable way to reconstruct full
|
||||
@ -118,30 +110,21 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
|
||||
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
|
||||
/// test also serves as the example application for this module.
|
||||
pub struct TcpTmtcInCobsServer<
|
||||
TmSource: PacketSource<Error = TmError>,
|
||||
TcSender: PacketSenderRaw<Error = SendError>,
|
||||
HandledConnection: HandledConnectionHandler,
|
||||
TmError,
|
||||
SendError: 'static,
|
||||
TcError: 'static,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
> {
|
||||
pub generic_server: TcpTmtcGenericServer<
|
||||
TmSource,
|
||||
TcSender,
|
||||
CobsTmSender,
|
||||
CobsTcParser,
|
||||
HandledConnection,
|
||||
TmError,
|
||||
SendError,
|
||||
>,
|
||||
generic_server:
|
||||
TcpTmtcGenericServer<TmError, TcError, TmSource, TcReceiver, CobsTmSender, CobsTcParser>,
|
||||
}
|
||||
|
||||
impl<
|
||||
TmSource: PacketSource<Error = TmError>,
|
||||
TcReceiver: PacketSenderRaw<Error = TcError>,
|
||||
HandledConnection: HandledConnectionHandler,
|
||||
TmError: 'static,
|
||||
TcError: 'static,
|
||||
> TcpTmtcInCobsServer<TmSource, TcReceiver, HandledConnection, TmError, TcError>
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
> TcpTmtcInCobsServer<TmError, TcError, TmSource, TcReceiver>
|
||||
{
|
||||
/// Create a new TCP TMTC server which exchanges TMTC packets encoded with
|
||||
/// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing).
|
||||
@ -157,8 +140,6 @@ impl<
|
||||
cfg: ServerConfig,
|
||||
tm_source: TmSource,
|
||||
tc_receiver: TcReceiver,
|
||||
handled_connection: HandledConnection,
|
||||
stop_signal: Option<Arc<AtomicBool>>,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
Ok(Self {
|
||||
generic_server: TcpTmtcGenericServer::new(
|
||||
@ -167,8 +148,6 @@ impl<
|
||||
CobsTmSender::new(cfg.tm_buffer_size),
|
||||
tm_source,
|
||||
tc_receiver,
|
||||
handled_connection,
|
||||
stop_signal,
|
||||
)?,
|
||||
})
|
||||
}
|
||||
@ -181,10 +160,9 @@ impl<
|
||||
/// useful if using the port number 0 for OS auto-assignment.
|
||||
pub fn local_addr(&self) -> std::io::Result<SocketAddr>;
|
||||
|
||||
/// Delegation to the [TcpTmtcGenericServer::handle_all_connections] call.
|
||||
pub fn handle_all_connections(
|
||||
/// Delegation to the [TcpTmtcGenericServer::handle_next_connection] call.
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
poll_duration: Option<Duration>,
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
}
|
||||
@ -199,29 +177,21 @@ mod tests {
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
|
||||
panic,
|
||||
sync::mpsc,
|
||||
thread,
|
||||
time::Instant,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET},
|
||||
hal::std::tcp_server::{
|
||||
tests::{ConnectionFinishedHandler, SyncTmSource},
|
||||
ConnectionResult, ServerConfig,
|
||||
tests::{SyncTcCacher, SyncTmSource},
|
||||
ServerConfig,
|
||||
},
|
||||
queue::GenericSendError,
|
||||
tmtc::PacketAsVec,
|
||||
ComponentId,
|
||||
};
|
||||
use alloc::sync::Arc;
|
||||
use cobs::encode;
|
||||
|
||||
use super::TcpTmtcInCobsServer;
|
||||
|
||||
const TCP_SERVER_ID: ComponentId = 0x05;
|
||||
|
||||
fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet(&SIMPLE_PACKET, encoded_buf, current_idx)
|
||||
}
|
||||
@ -240,22 +210,13 @@ mod tests {
|
||||
|
||||
fn generic_tmtc_server(
|
||||
addr: &SocketAddr,
|
||||
tc_sender: mpsc::Sender<PacketAsVec>,
|
||||
tc_receiver: SyncTcCacher,
|
||||
tm_source: SyncTmSource,
|
||||
stop_signal: Option<Arc<AtomicBool>>,
|
||||
) -> TcpTmtcInCobsServer<
|
||||
SyncTmSource,
|
||||
mpsc::Sender<PacketAsVec>,
|
||||
ConnectionFinishedHandler,
|
||||
(),
|
||||
GenericSendError,
|
||||
> {
|
||||
) -> TcpTmtcInCobsServer<(), (), SyncTmSource, SyncTcCacher> {
|
||||
TcpTmtcInCobsServer::new(
|
||||
ServerConfig::new(TCP_SERVER_ID, *addr, Duration::from_millis(2), 1024, 1024),
|
||||
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024),
|
||||
tm_source,
|
||||
tc_sender,
|
||||
ConnectionFinishedHandler::default(),
|
||||
stop_signal,
|
||||
tc_receiver,
|
||||
)
|
||||
.expect("TCP server generation failed")
|
||||
}
|
||||
@ -263,10 +224,9 @@ mod tests {
|
||||
#[test]
|
||||
fn test_server_basic_no_tm() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let (tc_sender, tc_receiver) = mpsc::channel();
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let tm_source = SyncTmSource::default();
|
||||
let mut tcp_server =
|
||||
generic_tmtc_server(&auto_port_addr, tc_sender.clone(), tm_source, None);
|
||||
let mut tcp_server = generic_tmtc_server(&auto_port_addr, tc_receiver.clone(), tm_source);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
@ -274,20 +234,13 @@ mod tests {
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(100)));
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let result = result.unwrap();
|
||||
assert_eq!(result, ConnectionResult::HandledConnections(1));
|
||||
tcp_server
|
||||
.generic_server
|
||||
.finished_handler
|
||||
.check_last_connection(0, 1);
|
||||
tcp_server
|
||||
.generic_server
|
||||
.finished_handler
|
||||
.check_no_connections_left();
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1);
|
||||
assert_eq!(conn_result.num_sent_tms, 0);
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
// Send TC to server now.
|
||||
@ -309,20 +262,24 @@ mod tests {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that the packet was received and decoded successfully.
|
||||
let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
|
||||
assert_eq!(packet_with_sender.packet, &SIMPLE_PACKET);
|
||||
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
|
||||
let mut tc_queue = tc_receiver
|
||||
.tc_queue
|
||||
.lock()
|
||||
.expect("locking tc queue failed");
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
|
||||
drop(tc_queue);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_basic_multi_tm_multi_tc() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let (tc_sender, tc_receiver) = mpsc::channel();
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
tm_source.add_tm(&INVERTED_PACKET);
|
||||
tm_source.add_tm(&SIMPLE_PACKET);
|
||||
let mut tcp_server =
|
||||
generic_tmtc_server(&auto_port_addr, tc_sender.clone(), tm_source.clone(), None);
|
||||
generic_tmtc_server(&auto_port_addr, tc_receiver.clone(), tm_source.clone());
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
@ -330,20 +287,13 @@ mod tests {
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(100)));
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let result = result.unwrap();
|
||||
assert_eq!(result, ConnectionResult::HandledConnections(1));
|
||||
tcp_server
|
||||
.generic_server
|
||||
.finished_handler
|
||||
.check_last_connection(2, 2);
|
||||
tcp_server
|
||||
.generic_server
|
||||
.finished_handler
|
||||
.check_no_connections_left();
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 2, "Not enough TCs received");
|
||||
assert_eq!(conn_result.num_sent_tms, 2, "Not enough TMs received");
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
// Send TC to server now.
|
||||
@ -417,78 +367,13 @@ mod tests {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that the packet was received and decoded successfully.
|
||||
let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
|
||||
let packet = &packet_with_sender.packet;
|
||||
assert_eq!(packet, &SIMPLE_PACKET);
|
||||
let packet_with_sender = tc_receiver.recv().expect("receiving TC failed");
|
||||
let packet = &packet_with_sender.packet;
|
||||
assert_eq!(packet, &INVERTED_PACKET);
|
||||
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_accept_timeout() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let (tc_sender, _tc_receiver) = mpsc::channel();
|
||||
let tm_source = SyncTmSource::default();
|
||||
let mut tcp_server =
|
||||
generic_tmtc_server(&auto_port_addr, tc_sender.clone(), tm_source, None);
|
||||
let start = Instant::now();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
let thread_jh = thread::spawn(move || loop {
|
||||
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let result = result.unwrap();
|
||||
if result == ConnectionResult::AcceptTimeout {
|
||||
break;
|
||||
}
|
||||
if Instant::now() - start > Duration::from_millis(100) {
|
||||
panic!("regular stop signal handling failed");
|
||||
}
|
||||
});
|
||||
thread_jh.join().expect("thread join failed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_stop_signal() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let (tc_sender, _tc_receiver) = mpsc::channel();
|
||||
let tm_source = SyncTmSource::default();
|
||||
let stop_signal = Arc::new(AtomicBool::new(false));
|
||||
let mut tcp_server = generic_tmtc_server(
|
||||
&auto_port_addr,
|
||||
tc_sender.clone(),
|
||||
tm_source,
|
||||
Some(stop_signal.clone()),
|
||||
);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let stop_signal_copy = stop_signal.clone();
|
||||
let start = Instant::now();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
let thread_jh = thread::spawn(move || loop {
|
||||
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let result = result.unwrap();
|
||||
if result == ConnectionResult::AcceptTimeout {
|
||||
panic!("unexpected accept timeout");
|
||||
}
|
||||
if stop_signal_copy.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
if Instant::now() - start > Duration::from_millis(100) {
|
||||
panic!("regular stop signal handling failed");
|
||||
}
|
||||
});
|
||||
// We connect but do not do anything.
|
||||
let _stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stop_signal.store(true, Ordering::Relaxed);
|
||||
// No need to drop the connection, the stop signal should take take of everything.
|
||||
thread_jh.join().expect("thread join failed");
|
||||
let mut tc_queue = tc_receiver
|
||||
.tc_queue
|
||||
.lock()
|
||||
.expect("locking tc queue failed");
|
||||
assert_eq!(tc_queue.len(), 2);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &INVERTED_PACKET);
|
||||
drop(tc_queue);
|
||||
}
|
||||
}
|
||||
|
@ -1,23 +1,21 @@
|
||||
//! Generic TCP TMTC servers with different TMTC format flavours.
|
||||
use alloc::sync::Arc;
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use core::sync::atomic::AtomicBool;
|
||||
use core::time::Duration;
|
||||
use mio::net::{TcpListener, TcpStream};
|
||||
use mio::{Events, Interest, Poll, Token};
|
||||
use socket2::{Domain, Socket, Type};
|
||||
use std::io::{self, Read};
|
||||
use std::net::SocketAddr;
|
||||
use std::io::Read;
|
||||
use std::net::TcpListener;
|
||||
use std::net::{SocketAddr, TcpStream};
|
||||
use std::thread;
|
||||
|
||||
use crate::tmtc::{PacketSenderRaw, PacketSource};
|
||||
use crate::ComponentId;
|
||||
use crate::tmtc::{ReceivesTc, TmPacketSource};
|
||||
use thiserror::Error;
|
||||
|
||||
// Re-export the TMTC in COBS server.
|
||||
pub use crate::hal::std::tcp_cobs_server::{CobsTcParser, CobsTmSender, TcpTmtcInCobsServer};
|
||||
pub use crate::hal::std::tcp_spacepackets_server::{SpacepacketsTmSender, TcpSpacepacketsServer};
|
||||
pub use crate::hal::std::tcp_spacepackets_server::{
|
||||
SpacepacketsTcParser, SpacepacketsTmSender, TcpSpacepacketsServer,
|
||||
};
|
||||
|
||||
/// Configuration struct for the generic TCP TMTC server
|
||||
///
|
||||
@ -27,7 +25,7 @@ pub use crate::hal::std::tcp_spacepackets_server::{SpacepacketsTmSender, TcpSpac
|
||||
/// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or
|
||||
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
|
||||
/// to reduce CPU load.
|
||||
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [PacketSource] and
|
||||
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [TmPacketSource] and
|
||||
/// encoding of that data. This buffer should at large enough to hold the maximum expected
|
||||
/// TM size read from the packet source.
|
||||
/// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from
|
||||
@ -43,7 +41,6 @@ pub use crate::hal::std::tcp_spacepackets_server::{SpacepacketsTmSender, TcpSpac
|
||||
/// default.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct ServerConfig {
|
||||
pub id: ComponentId,
|
||||
pub addr: SocketAddr,
|
||||
pub inner_loop_delay: Duration,
|
||||
pub tm_buffer_size: usize,
|
||||
@ -54,20 +51,18 @@ pub struct ServerConfig {
|
||||
|
||||
impl ServerConfig {
|
||||
pub fn new(
|
||||
id: ComponentId,
|
||||
addr: SocketAddr,
|
||||
inner_loop_delay: Duration,
|
||||
tm_buffer_size: usize,
|
||||
tc_buffer_size: usize,
|
||||
) -> Self {
|
||||
Self {
|
||||
id,
|
||||
addr,
|
||||
inner_loop_delay,
|
||||
tm_buffer_size,
|
||||
tc_buffer_size,
|
||||
reuse_addr: true,
|
||||
reuse_port: true,
|
||||
reuse_addr: false,
|
||||
reuse_port: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -84,62 +79,37 @@ pub enum TcpTmtcError<TmError, TcError> {
|
||||
|
||||
/// Result of one connection attempt. Contains the client address if a connection was established,
|
||||
/// in addition to the number of telecommands and telemetry packets exchanged.
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum ConnectionResult {
|
||||
AcceptTimeout,
|
||||
HandledConnections(u32),
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HandledConnectionInfo {
|
||||
pub addr: SocketAddr,
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ConnectionResult {
|
||||
pub addr: Option<SocketAddr>,
|
||||
pub num_received_tcs: u32,
|
||||
pub num_sent_tms: u32,
|
||||
/// The generic TCP server can be stopped using an external signal. If this happened, this
|
||||
/// boolean will be set to true.
|
||||
pub stopped_by_signal: bool,
|
||||
}
|
||||
|
||||
impl HandledConnectionInfo {
|
||||
pub fn new(addr: SocketAddr) -> Self {
|
||||
Self {
|
||||
addr,
|
||||
num_received_tcs: 0,
|
||||
num_sent_tms: 0,
|
||||
stopped_by_signal: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait HandledConnectionHandler {
|
||||
fn handled_connection(&mut self, info: HandledConnectionInfo);
|
||||
}
|
||||
|
||||
/// Generic parser abstraction for an object which can parse for telecommands given a raw
|
||||
/// bytestream received from a TCP socket and send them using a generic [PacketSenderRaw]
|
||||
/// implementation. This allows different encoding schemes for telecommands.
|
||||
pub trait TcpTcParser<TmError, SendError> {
|
||||
/// bytestream received from a TCP socket and send them to a generic [ReceivesTc] telecommand
|
||||
/// receiver. This allows different encoding schemes for telecommands.
|
||||
pub trait TcpTcParser<TmError, TcError> {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
sender_id: ComponentId,
|
||||
tc_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
|
||||
conn_result: &mut HandledConnectionInfo,
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, SendError>>;
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
|
||||
/// Generic sender abstraction for an object which can pull telemetry from a given TM source
|
||||
/// using a [PacketSource] and then send them back to a client using a given [TcpStream].
|
||||
/// using a [TmPacketSource] and then send them back to a client using a given [TcpStream].
|
||||
/// The concrete implementation can also perform any encoding steps which are necessary before
|
||||
/// sending back the data to a client.
|
||||
pub trait TcpTmSender<TmError, TcError> {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut HandledConnectionInfo,
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
@ -151,9 +121,9 @@ pub trait TcpTmSender<TmError, TcError> {
|
||||
/// through the following 4 core abstractions:
|
||||
///
|
||||
/// 1. [TcpTcParser] to parse for telecommands from the raw bytestream received from a client.
|
||||
/// 2. Parsed telecommands will be sent using the [PacketSenderRaw] object.
|
||||
/// 2. Parsed telecommands will be sent to the [ReceivesTc] telecommand receiver.
|
||||
/// 3. [TcpTmSender] to send telemetry pulled from a TM source back to the client.
|
||||
/// 4. [PacketSource] as a generic TM source used by the [TcpTmSender].
|
||||
/// 4. [TmPacketSource] as a generic TM source used by the [TcpTmSender].
|
||||
///
|
||||
/// It is possible to specify custom abstractions to build a dedicated TCP TMTC server without
|
||||
/// having to re-implement common logic.
|
||||
@ -161,49 +131,32 @@ pub trait TcpTmSender<TmError, TcError> {
|
||||
/// Currently, this framework offers the following concrete implementations:
|
||||
///
|
||||
/// 1. [TcpTmtcInCobsServer] to exchange TMTC wrapped inside the COBS framing protocol.
|
||||
/// 2. [TcpSpacepacketsServer] to exchange space packets via TCP.
|
||||
pub struct TcpTmtcGenericServer<
|
||||
TmSource: PacketSource<Error = TmError>,
|
||||
TcSender: PacketSenderRaw<Error = TcSendError>,
|
||||
TmSender: TcpTmSender<TmError, TcSendError>,
|
||||
TcParser: TcpTcParser<TmError, TcSendError>,
|
||||
HandledConnection: HandledConnectionHandler,
|
||||
TmError,
|
||||
TcSendError,
|
||||
TcError,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
TmSender: TcpTmSender<TmError, TcError>,
|
||||
TcParser: TcpTcParser<TmError, TcError>,
|
||||
> {
|
||||
pub id: ComponentId,
|
||||
pub finished_handler: HandledConnection,
|
||||
pub(crate) listener: TcpListener,
|
||||
pub(crate) inner_loop_delay: Duration,
|
||||
pub(crate) tm_source: TmSource,
|
||||
pub(crate) tm_buffer: Vec<u8>,
|
||||
pub(crate) tc_sender: TcSender,
|
||||
pub(crate) tc_receiver: TcReceiver,
|
||||
pub(crate) tc_buffer: Vec<u8>,
|
||||
poll: Poll,
|
||||
events: Events,
|
||||
pub tc_handler: TcParser,
|
||||
pub tm_handler: TmSender,
|
||||
stop_signal: Option<Arc<AtomicBool>>,
|
||||
tc_handler: TcParser,
|
||||
tm_handler: TmSender,
|
||||
}
|
||||
|
||||
impl<
|
||||
TmSource: PacketSource<Error = TmError>,
|
||||
TcSender: PacketSenderRaw<Error = TcSendError>,
|
||||
TmSender: TcpTmSender<TmError, TcSendError>,
|
||||
TcParser: TcpTcParser<TmError, TcSendError>,
|
||||
HandledConnection: HandledConnectionHandler,
|
||||
TmError: 'static,
|
||||
TcSendError: 'static,
|
||||
>
|
||||
TcpTmtcGenericServer<
|
||||
TmSource,
|
||||
TcSender,
|
||||
TmSender,
|
||||
TcParser,
|
||||
HandledConnection,
|
||||
TmError,
|
||||
TcSendError,
|
||||
>
|
||||
TcError: 'static,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
TmSender: TcpTmSender<TmError, TcError>,
|
||||
TcParser: TcpTcParser<TmError, TcError>,
|
||||
> TcpTmtcGenericServer<TmError, TcError, TmSource, TcReceiver, TmSender, TcParser>
|
||||
{
|
||||
/// Create a new generic TMTC server instance.
|
||||
///
|
||||
@ -215,58 +168,32 @@ impl<
|
||||
/// * `tm_sender` - Sends back telemetry to the client using the specified TM source.
|
||||
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
|
||||
/// then sent back to the client.
|
||||
/// * `tc_sender` - Any received telecommand which was decoded successfully will be forwarded
|
||||
/// using this TC sender.
|
||||
/// * `stop_signal` - Can be used to stop the server even if a connection is ongoing.
|
||||
/// * `tc_receiver` - Any received telecommand which was decoded successfully will be forwarded
|
||||
/// to this TC receiver.
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tc_parser: TcParser,
|
||||
tm_sender: TmSender,
|
||||
tm_source: TmSource,
|
||||
tc_receiver: TcSender,
|
||||
finished_handler: HandledConnection,
|
||||
stop_signal: Option<Arc<AtomicBool>>,
|
||||
tc_receiver: TcReceiver,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
// Create a TCP listener bound to two addresses.
|
||||
let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?;
|
||||
|
||||
socket.set_reuse_address(cfg.reuse_addr)?;
|
||||
#[cfg(unix)]
|
||||
socket.set_reuse_port(cfg.reuse_port)?;
|
||||
// MIO does not do this for us. We want the accept calls to be non-blocking.
|
||||
socket.set_nonblocking(true)?;
|
||||
let addr = (cfg.addr).into();
|
||||
socket.bind(&addr)?;
|
||||
socket.listen(128)?;
|
||||
|
||||
// Create a poll instance.
|
||||
let poll = Poll::new()?;
|
||||
// Create storage for events.
|
||||
let events = Events::with_capacity(32);
|
||||
let listener: std::net::TcpListener = socket.into();
|
||||
let mut mio_listener = TcpListener::from_std(listener);
|
||||
|
||||
// Start listening for incoming connections.
|
||||
poll.registry().register(
|
||||
&mut mio_listener,
|
||||
Token(0),
|
||||
Interest::READABLE | Interest::WRITABLE,
|
||||
)?;
|
||||
|
||||
Ok(Self {
|
||||
id: cfg.id,
|
||||
tc_handler: tc_parser,
|
||||
tm_handler: tm_sender,
|
||||
poll,
|
||||
events,
|
||||
listener: mio_listener,
|
||||
listener: socket.into(),
|
||||
inner_loop_delay: cfg.inner_loop_delay,
|
||||
tm_source,
|
||||
tm_buffer: vec![0; cfg.tm_buffer_size],
|
||||
tc_sender: tc_receiver,
|
||||
tc_receiver,
|
||||
tc_buffer: vec![0; cfg.tc_buffer_size],
|
||||
stop_signal,
|
||||
finished_handler,
|
||||
})
|
||||
}
|
||||
|
||||
@ -281,11 +208,11 @@ impl<
|
||||
self.listener.local_addr()
|
||||
}
|
||||
|
||||
/// This call is used to handle all connection from clients. Right now, it performs
|
||||
/// This call is used to handle the next connection to a client. Right now, it performs
|
||||
/// the following steps:
|
||||
///
|
||||
/// 1. It calls the [std::net::TcpListener::accept] method until a client connects. An optional
|
||||
/// timeout can be specified for non-blocking acceptance.
|
||||
/// 1. It calls the [std::net::TcpListener::accept] method internally using the blocking API
|
||||
/// until a client connects.
|
||||
/// 2. It reads all the telecommands from the client and parses all received data using the
|
||||
/// user specified [TcpTcParser].
|
||||
/// 3. After reading and parsing all telecommands, it sends back all telemetry using the
|
||||
@ -294,66 +221,15 @@ impl<
|
||||
/// The server will delay for a user-specified period if the client connects to the server
|
||||
/// for prolonged periods and there is no traffic for the server. This is the case if the
|
||||
/// client does not send any telecommands and no telemetry needs to be sent back to the client.
|
||||
pub fn handle_all_connections(
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
poll_timeout: Option<Duration>,
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcSendError>> {
|
||||
let mut handled_connections = 0;
|
||||
// Poll Mio for events.
|
||||
self.poll.poll(&mut self.events, poll_timeout)?;
|
||||
let mut acceptable_connection = false;
|
||||
// Process each event.
|
||||
for event in self.events.iter() {
|
||||
if event.token() == Token(0) {
|
||||
acceptable_connection = true;
|
||||
} else {
|
||||
// Should never happen..
|
||||
panic!("unexpected TCP event token");
|
||||
}
|
||||
}
|
||||
// I'd love to do this in the loop above, but there are issues with multiple borrows.
|
||||
if acceptable_connection {
|
||||
// There might be mutliple connections available. Accept until all of them have
|
||||
// been handled.
|
||||
loop {
|
||||
match self.listener.accept() {
|
||||
Ok((stream, addr)) => {
|
||||
if let Err(e) = self.handle_accepted_connection(stream, addr) {
|
||||
self.reregister_poll_interest()?;
|
||||
return Err(e);
|
||||
}
|
||||
handled_connections += 1;
|
||||
}
|
||||
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break,
|
||||
Err(err) => {
|
||||
self.reregister_poll_interest()?;
|
||||
return Err(TcpTmtcError::Io(err));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if handled_connections > 0 {
|
||||
return Ok(ConnectionResult::HandledConnections(handled_connections));
|
||||
}
|
||||
Ok(ConnectionResult::AcceptTimeout)
|
||||
}
|
||||
|
||||
fn reregister_poll_interest(&mut self) -> io::Result<()> {
|
||||
self.poll.registry().reregister(
|
||||
&mut self.listener,
|
||||
Token(0),
|
||||
Interest::READABLE | Interest::WRITABLE,
|
||||
)
|
||||
}
|
||||
|
||||
fn handle_accepted_connection(
|
||||
&mut self,
|
||||
mut stream: TcpStream,
|
||||
addr: SocketAddr,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcSendError>> {
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>> {
|
||||
let mut connection_result = ConnectionResult::default();
|
||||
let mut current_write_idx;
|
||||
let mut next_write_idx = 0;
|
||||
let mut connection_result = HandledConnectionInfo::new(addr);
|
||||
let (mut stream, addr) = self.listener.accept()?;
|
||||
stream.set_nonblocking(true)?;
|
||||
connection_result.addr = Some(addr);
|
||||
current_write_idx = next_write_idx;
|
||||
loop {
|
||||
let read_result = stream.read(&mut self.tc_buffer[current_write_idx..]);
|
||||
@ -364,8 +240,7 @@ impl<
|
||||
if current_write_idx > 0 {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.tc_buffer,
|
||||
self.id,
|
||||
&self.tc_sender,
|
||||
&mut self.tc_receiver,
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
@ -379,8 +254,7 @@ impl<
|
||||
if current_write_idx == self.tc_buffer.capacity() {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.tc_buffer,
|
||||
self.id,
|
||||
&self.tc_sender,
|
||||
&mut self.tc_receiver,
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
@ -394,8 +268,7 @@ impl<
|
||||
std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut => {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.tc_buffer,
|
||||
self.id,
|
||||
&self.tc_sender,
|
||||
&mut self.tc_receiver,
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
@ -411,18 +284,6 @@ impl<
|
||||
// No TC read, no TM was sent, but the client has not disconnected.
|
||||
// Perform an inner delay to avoid burning CPU time.
|
||||
thread::sleep(self.inner_loop_delay);
|
||||
// Optional stop signal handling.
|
||||
if self.stop_signal.is_some()
|
||||
&& self
|
||||
.stop_signal
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
{
|
||||
connection_result.stopped_by_signal = true;
|
||||
self.finished_handler.handled_connection(connection_result);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
@ -437,8 +298,7 @@ impl<
|
||||
&mut connection_result,
|
||||
&mut stream,
|
||||
)?;
|
||||
self.finished_handler.handled_connection(connection_result);
|
||||
Ok(())
|
||||
Ok(connection_result)
|
||||
}
|
||||
}
|
||||
|
||||
@ -448,9 +308,21 @@ pub(crate) mod tests {
|
||||
|
||||
use alloc::{collections::VecDeque, sync::Arc, vec::Vec};
|
||||
|
||||
use crate::tmtc::PacketSource;
|
||||
use crate::tmtc::{ReceivesTcCore, TmPacketSourceCore};
|
||||
|
||||
use super::*;
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct SyncTcCacher {
|
||||
pub(crate) tc_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
impl ReceivesTcCore for SyncTcCacher {
|
||||
type Error = ();
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut tc_queue = self.tc_queue.lock().expect("tc forwarder failed");
|
||||
tc_queue.push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct SyncTmSource {
|
||||
@ -464,7 +336,7 @@ pub(crate) mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketSource for SyncTmSource {
|
||||
impl TmPacketSourceCore for SyncTmSource {
|
||||
type Error = ();
|
||||
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
|
||||
@ -484,30 +356,4 @@ pub(crate) mod tests {
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct ConnectionFinishedHandler {
|
||||
connection_info: VecDeque<HandledConnectionInfo>,
|
||||
}
|
||||
|
||||
impl HandledConnectionHandler for ConnectionFinishedHandler {
|
||||
fn handled_connection(&mut self, info: HandledConnectionInfo) {
|
||||
self.connection_info.push_back(info);
|
||||
}
|
||||
}
|
||||
|
||||
impl ConnectionFinishedHandler {
|
||||
pub fn check_last_connection(&mut self, num_tms: u32, num_tcs: u32) {
|
||||
let last_conn_result = self
|
||||
.connection_info
|
||||
.pop_back()
|
||||
.expect("no connection info available");
|
||||
assert_eq!(last_conn_result.num_received_tcs, num_tcs);
|
||||
assert_eq!(last_conn_result.num_sent_tms, num_tms);
|
||||
}
|
||||
|
||||
pub fn check_no_connections_left(&self) {
|
||||
assert!(self.connection_info.is_empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,44 +1,48 @@
|
||||
use alloc::sync::Arc;
|
||||
use core::{sync::atomic::AtomicBool, time::Duration};
|
||||
use delegate::delegate;
|
||||
use mio::net::{TcpListener, TcpStream};
|
||||
use std::{io::Write, net::SocketAddr};
|
||||
use std::{
|
||||
io::Write,
|
||||
net::{SocketAddr, TcpListener, TcpStream},
|
||||
};
|
||||
|
||||
use alloc::boxed::Box;
|
||||
|
||||
use crate::{
|
||||
encoding::{ccsds::SpacePacketValidator, parse_buffer_for_ccsds_space_packets},
|
||||
tmtc::{PacketSenderRaw, PacketSource},
|
||||
ComponentId,
|
||||
encoding::{ccsds::PacketIdLookup, parse_buffer_for_ccsds_space_packets},
|
||||
tmtc::{ReceivesTc, TmPacketSource},
|
||||
};
|
||||
|
||||
use super::tcp_server::{
|
||||
ConnectionResult, HandledConnectionHandler, HandledConnectionInfo, ServerConfig, TcpTcParser,
|
||||
TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
|
||||
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
|
||||
};
|
||||
|
||||
impl<T: SpacePacketValidator, TmError, TcError: 'static> TcpTcParser<TmError, TcError> for T {
|
||||
/// Concrete [TcpTcParser] implementation for the [TcpSpacepacketsServer].
|
||||
pub struct SpacepacketsTcParser {
|
||||
packet_id_lookup: Box<dyn PacketIdLookup + Send>,
|
||||
}
|
||||
|
||||
impl SpacepacketsTcParser {
|
||||
pub fn new(packet_id_lookup: Box<dyn PacketIdLookup + Send>) -> Self {
|
||||
Self { packet_id_lookup }
|
||||
}
|
||||
}
|
||||
|
||||
impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for SpacepacketsTcParser {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
sender_id: ComponentId,
|
||||
tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized),
|
||||
conn_result: &mut HandledConnectionInfo,
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>> {
|
||||
// Reader vec full, need to parse for packets.
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&tc_buffer[..current_write_idx],
|
||||
self,
|
||||
sender_id,
|
||||
tc_sender,
|
||||
conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets(
|
||||
&mut tc_buffer[..current_write_idx],
|
||||
self.packet_id_lookup.as_ref(),
|
||||
tc_receiver.upcast_mut(),
|
||||
next_write_idx,
|
||||
)
|
||||
.map_err(|e| TcpTmtcError::TcError(e))?;
|
||||
if let Some(broken_tail_start) = parse_result.incomplete_tail_start {
|
||||
// Copy broken tail to front of buffer.
|
||||
tc_buffer.copy_within(broken_tail_start..current_write_idx, 0);
|
||||
*next_write_idx = current_write_idx - broken_tail_start;
|
||||
}
|
||||
conn_result.num_received_tcs += parse_result.packets_found;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -51,8 +55,8 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut HandledConnectionInfo,
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>> {
|
||||
let mut tm_was_sent = false;
|
||||
@ -79,41 +83,35 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
|
||||
///
|
||||
/// This serves only works if
|
||||
/// [CCSDS 133.0-B-2 space packets](https://public.ccsds.org/Pubs/133x0b2e1.pdf) are the only
|
||||
/// packet type being exchanged. It uses the CCSDS space packet header [spacepackets::SpHeader] and
|
||||
/// a user specified [SpacePacketValidator] to determine the space packets relevant for further
|
||||
/// processing.
|
||||
/// packet type being exchanged. It uses the CCSDS [spacepackets::PacketId] as the packet delimiter
|
||||
/// and start marker when parsing for packets. The user specifies a set of expected
|
||||
/// [spacepackets::PacketId]s as part of the server configuration for that purpose.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
|
||||
/// also serves as the example application for this module.
|
||||
pub struct TcpSpacepacketsServer<
|
||||
TmSource: PacketSource<Error = TmError>,
|
||||
TcSender: PacketSenderRaw<Error = SendError>,
|
||||
Validator: SpacePacketValidator,
|
||||
HandledConnection: HandledConnectionHandler,
|
||||
TmError,
|
||||
SendError: 'static,
|
||||
TcError: 'static,
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
> {
|
||||
pub generic_server: TcpTmtcGenericServer<
|
||||
TmSource,
|
||||
TcSender,
|
||||
SpacepacketsTmSender,
|
||||
Validator,
|
||||
HandledConnection,
|
||||
generic_server: TcpTmtcGenericServer<
|
||||
TmError,
|
||||
SendError,
|
||||
TcError,
|
||||
TmSource,
|
||||
TcReceiver,
|
||||
SpacepacketsTmSender,
|
||||
SpacepacketsTcParser,
|
||||
>,
|
||||
}
|
||||
|
||||
impl<
|
||||
TmSource: PacketSource<Error = TmError>,
|
||||
TcSender: PacketSenderRaw<Error = TcError>,
|
||||
Validator: SpacePacketValidator,
|
||||
HandledConnection: HandledConnectionHandler,
|
||||
TmError: 'static,
|
||||
TcError: 'static,
|
||||
> TcpSpacepacketsServer<TmSource, TcSender, Validator, HandledConnection, TmError, TcError>
|
||||
TmSource: TmPacketSource<Error = TmError>,
|
||||
TcReceiver: ReceivesTc<Error = TcError>,
|
||||
> TcpSpacepacketsServer<TmError, TcError, TmSource, TcReceiver>
|
||||
{
|
||||
///
|
||||
/// ## Parameter
|
||||
@ -121,31 +119,23 @@ impl<
|
||||
/// * `cfg` - Configuration of the server.
|
||||
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
|
||||
/// then sent back to the client.
|
||||
/// * `tc_sender` - Any received telecommands which were decoded successfully will be
|
||||
/// forwarded using this [PacketSenderRaw].
|
||||
/// * `validator` - Used to determine the space packets relevant for further processing and
|
||||
/// to detect broken space packets.
|
||||
/// * `handled_connection_hook` - Called to notify the user about a succesfully handled
|
||||
/// connection.
|
||||
/// * `stop_signal` - Can be used to shut down the TCP server even for longer running
|
||||
/// connections.
|
||||
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be
|
||||
/// forwarded to this TC receiver.
|
||||
/// * `packet_id_lookup` - This lookup table contains the relevant packets IDs for packet
|
||||
/// parsing. This mechanism is used to have a start marker for finding CCSDS packets.
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tm_source: TmSource,
|
||||
tc_sender: TcSender,
|
||||
validator: Validator,
|
||||
handled_connection_hook: HandledConnection,
|
||||
stop_signal: Option<Arc<AtomicBool>>,
|
||||
tc_receiver: TcReceiver,
|
||||
packet_id_lookup: Box<dyn PacketIdLookup + Send>,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
Ok(Self {
|
||||
generic_server: TcpTmtcGenericServer::new(
|
||||
cfg,
|
||||
validator,
|
||||
SpacepacketsTcParser::new(packet_id_lookup),
|
||||
SpacepacketsTmSender::default(),
|
||||
tm_source,
|
||||
tc_sender,
|
||||
handled_connection_hook,
|
||||
stop_signal,
|
||||
tc_receiver,
|
||||
)?,
|
||||
})
|
||||
}
|
||||
@ -158,10 +148,9 @@ impl<
|
||||
/// useful if using the port number 0 for OS auto-assignment.
|
||||
pub fn local_addr(&self) -> std::io::Result<SocketAddr>;
|
||||
|
||||
/// Delegation to the [TcpTmtcGenericServer::handle_all_connections] call.
|
||||
pub fn handle_all_connections(
|
||||
/// Delegation to the [TcpTmtcGenericServer::handle_next_connection] call.
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
poll_timeout: Option<Duration>
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
}
|
||||
@ -178,70 +167,39 @@ mod tests {
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
|
||||
sync::mpsc,
|
||||
thread,
|
||||
};
|
||||
|
||||
use alloc::sync::Arc;
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
use hashbrown::HashSet;
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, WritablePusPacket},
|
||||
CcsdsPacket, PacketId, SpHeader,
|
||||
PacketId, SpHeader,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
encoding::ccsds::{SpValidity, SpacePacketValidator},
|
||||
hal::std::tcp_server::{
|
||||
tests::{ConnectionFinishedHandler, SyncTmSource},
|
||||
ConnectionResult, ServerConfig,
|
||||
},
|
||||
queue::GenericSendError,
|
||||
tmtc::PacketAsVec,
|
||||
ComponentId,
|
||||
use crate::hal::std::tcp_server::{
|
||||
tests::{SyncTcCacher, SyncTmSource},
|
||||
ServerConfig,
|
||||
};
|
||||
|
||||
use super::TcpSpacepacketsServer;
|
||||
|
||||
const TCP_SERVER_ID: ComponentId = 0x05;
|
||||
const TEST_APID_0: u16 = 0x02;
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0);
|
||||
const TEST_APID_1: u16 = 0x10;
|
||||
const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct SimpleValidator(pub HashSet<PacketId>);
|
||||
|
||||
impl SpacePacketValidator for SimpleValidator {
|
||||
fn validate(&self, sp_header: &SpHeader, _raw_buf: &[u8]) -> SpValidity {
|
||||
if self.0.contains(&sp_header.packet_id()) {
|
||||
return SpValidity::Valid;
|
||||
}
|
||||
// Simple case: Assume that the interface always contains valid space packets.
|
||||
SpValidity::Skip
|
||||
}
|
||||
}
|
||||
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1);
|
||||
|
||||
fn generic_tmtc_server(
|
||||
addr: &SocketAddr,
|
||||
tc_sender: mpsc::Sender<PacketAsVec>,
|
||||
tc_receiver: SyncTcCacher,
|
||||
tm_source: SyncTmSource,
|
||||
validator: SimpleValidator,
|
||||
stop_signal: Option<Arc<AtomicBool>>,
|
||||
) -> TcpSpacepacketsServer<
|
||||
SyncTmSource,
|
||||
mpsc::Sender<PacketAsVec>,
|
||||
SimpleValidator,
|
||||
ConnectionFinishedHandler,
|
||||
(),
|
||||
GenericSendError,
|
||||
> {
|
||||
packet_id_lookup: HashSet<PacketId>,
|
||||
) -> TcpSpacepacketsServer<(), (), SyncTmSource, SyncTcCacher> {
|
||||
TcpSpacepacketsServer::new(
|
||||
ServerConfig::new(TCP_SERVER_ID, *addr, Duration::from_millis(2), 1024, 1024),
|
||||
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024),
|
||||
tm_source,
|
||||
tc_sender,
|
||||
validator,
|
||||
ConnectionFinishedHandler::default(),
|
||||
stop_signal,
|
||||
tc_receiver,
|
||||
Box::new(packet_id_lookup),
|
||||
)
|
||||
.expect("TCP server generation failed")
|
||||
}
|
||||
@ -249,16 +207,15 @@ mod tests {
|
||||
#[test]
|
||||
fn test_basic_tc_only() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let (tc_sender, tc_receiver) = mpsc::channel();
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let tm_source = SyncTmSource::default();
|
||||
let mut validator = SimpleValidator::default();
|
||||
validator.0.insert(TEST_PACKET_ID_0);
|
||||
let mut packet_id_lookup = HashSet::new();
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_0);
|
||||
let mut tcp_server = generic_tmtc_server(
|
||||
&auto_port_addr,
|
||||
tc_sender.clone(),
|
||||
tc_receiver.clone(),
|
||||
tm_source,
|
||||
validator,
|
||||
None,
|
||||
packet_id_lookup,
|
||||
);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
@ -267,24 +224,17 @@ mod tests {
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(100)));
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
matches!(conn_result, ConnectionResult::HandledConnections(1));
|
||||
tcp_server
|
||||
.generic_server
|
||||
.finished_handler
|
||||
.check_last_connection(0, 1);
|
||||
tcp_server
|
||||
.generic_server
|
||||
.finished_handler
|
||||
.check_no_connections_left();
|
||||
assert_eq!(conn_result.num_received_tcs, 1);
|
||||
assert_eq!(conn_result.num_sent_tms, 0);
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
let ping_tc =
|
||||
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let tc_0 = ping_tc.to_vec().expect("packet generation failed");
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
@ -301,40 +251,40 @@ mod tests {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
let packet = tc_receiver.try_recv().expect("receiving TC failed");
|
||||
assert_eq!(packet.packet, tc_0);
|
||||
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
|
||||
// Check that TC has arrived.
|
||||
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap();
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_tc_multi_tm() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let (tc_sender, tc_receiver) = mpsc::channel();
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
|
||||
// Add telemetry
|
||||
let mut total_tm_len = 0;
|
||||
let verif_tm =
|
||||
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 1, 1, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true);
|
||||
let tm_0 = verif_tm.to_vec().expect("writing packet failed");
|
||||
total_tm_len += tm_0.len();
|
||||
tm_source.add_tm(&tm_0);
|
||||
let verif_tm =
|
||||
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 1, 3, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 3, None, true);
|
||||
let tm_1 = verif_tm.to_vec().expect("writing packet failed");
|
||||
total_tm_len += tm_1.len();
|
||||
tm_source.add_tm(&tm_1);
|
||||
|
||||
// Set up server
|
||||
let mut validator = SimpleValidator::default();
|
||||
validator.0.insert(TEST_PACKET_ID_0);
|
||||
validator.0.insert(TEST_PACKET_ID_1);
|
||||
let mut packet_id_lookup = HashSet::new();
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_0);
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_1);
|
||||
let mut tcp_server = generic_tmtc_server(
|
||||
&auto_port_addr,
|
||||
tc_sender.clone(),
|
||||
tc_receiver.clone(),
|
||||
tm_source,
|
||||
validator,
|
||||
None,
|
||||
packet_id_lookup,
|
||||
);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
@ -344,20 +294,16 @@ mod tests {
|
||||
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(100)));
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
matches!(conn_result, ConnectionResult::HandledConnections(1));
|
||||
tcp_server
|
||||
.generic_server
|
||||
.finished_handler
|
||||
.check_last_connection(2, 2);
|
||||
tcp_server
|
||||
.generic_server
|
||||
.finished_handler
|
||||
.check_no_connections_left();
|
||||
assert_eq!(
|
||||
conn_result.num_received_tcs, 2,
|
||||
"wrong number of received TCs"
|
||||
);
|
||||
assert_eq!(conn_result.num_sent_tms, 2, "wrong number of sent TMs");
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
@ -366,14 +312,14 @@ mod tests {
|
||||
.expect("setting reas timeout failed");
|
||||
|
||||
// Send telecommands
|
||||
let ping_tc =
|
||||
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let tc_0 = ping_tc.to_vec().expect("ping tc creation failed");
|
||||
stream
|
||||
.write_all(&tc_0)
|
||||
.expect("writing to TCP server failed");
|
||||
let action_tc =
|
||||
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let tc_1 = action_tc.to_vec().expect("action tc creation failed");
|
||||
stream
|
||||
.write_all(&tc_1)
|
||||
@ -408,10 +354,9 @@ mod tests {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that TC has arrived.
|
||||
let packet_0 = tc_receiver.try_recv().expect("receiving TC failed");
|
||||
assert_eq!(packet_0.packet, tc_0);
|
||||
let packet_1 = tc_receiver.try_recv().expect("receiving TC failed");
|
||||
assert_eq!(packet_1.packet, tc_1);
|
||||
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
|
||||
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap();
|
||||
assert_eq!(tc_queue.len(), 2);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_1);
|
||||
}
|
||||
}
|
||||
|
@ -1,8 +1,7 @@
|
||||
//! Generic UDP TC server.
|
||||
use crate::tmtc::PacketSenderRaw;
|
||||
use crate::ComponentId;
|
||||
use core::fmt::Debug;
|
||||
use std::io::{self, ErrorKind};
|
||||
use crate::tmtc::{ReceivesTc, ReceivesTcCore};
|
||||
use std::boxed::Box;
|
||||
use std::io::{Error, ErrorKind};
|
||||
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
|
||||
use std::vec;
|
||||
use std::vec::Vec;
|
||||
@ -12,46 +11,45 @@ use std::vec::Vec;
|
||||
///
|
||||
/// It caches all received telecomands into a vector. The maximum expected telecommand size should
|
||||
/// be declared upfront. This avoids dynamic allocation during run-time. The user can specify a TC
|
||||
/// sender in form of a special trait object which implements [PacketSenderRaw]. For example, this
|
||||
/// can be used to send the telecommands to a centralized TC source component for further
|
||||
/// processing and routing.
|
||||
/// receiver in form of a special trait object which implements [ReceivesTc]. Please note that the
|
||||
/// receiver should copy out the received data if it the data is required past the
|
||||
/// [ReceivesTcCore::pass_tc] call.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
/// use std::sync::mpsc;
|
||||
/// use spacepackets::ecss::WritablePusPacket;
|
||||
/// use satrs::hal::std::udp_server::UdpTcServer;
|
||||
/// use satrs::ComponentId;
|
||||
/// use satrs::tmtc::PacketSenderRaw;
|
||||
/// use satrs::tmtc::{ReceivesTc, ReceivesTcCore};
|
||||
/// use spacepackets::SpHeader;
|
||||
/// use spacepackets::ecss::tc::PusTcCreator;
|
||||
///
|
||||
/// const UDP_SERVER_ID: ComponentId = 0x05;
|
||||
/// #[derive (Default)]
|
||||
/// struct PingReceiver {}
|
||||
/// impl ReceivesTcCore for PingReceiver {
|
||||
/// type Error = ();
|
||||
/// fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
/// assert_eq!(tc_raw.len(), 13);
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let (packet_sender, packet_receiver) = mpsc::channel();
|
||||
/// let mut buf = [0; 32];
|
||||
/// let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
|
||||
/// let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, packet_sender)
|
||||
/// let ping_receiver = PingReceiver::default();
|
||||
/// let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
|
||||
/// .expect("Creating UDP TMTC server failed");
|
||||
/// let sph = SpHeader::new_from_apid(0x02);
|
||||
/// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
|
||||
/// // Can not fail.
|
||||
/// let ping_tc_raw = pus_tc.to_vec().unwrap();
|
||||
///
|
||||
/// // Now create a UDP client and send the ping telecommand to the server.
|
||||
/// let client = UdpSocket::bind("127.0.0.1:0").expect("creating UDP client failed");
|
||||
/// let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap();
|
||||
/// let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
/// let len = pus_tc
|
||||
/// .write_to_bytes(&mut buf)
|
||||
/// .expect("Error writing PUS TC packet");
|
||||
/// assert_eq!(len, 13);
|
||||
/// let client = UdpSocket::bind("127.0.0.1:7778").expect("Connecting to UDP server failed");
|
||||
/// client
|
||||
/// .send_to(&ping_tc_raw, dest_addr)
|
||||
/// .send_to(&buf[0..len], dest_addr)
|
||||
/// .expect("Error sending PUS TC via UDP");
|
||||
/// let recv_result = udp_tc_server.try_recv_tc();
|
||||
/// assert!(recv_result.is_ok());
|
||||
/// // The packet is received by the UDP TC server and sent via the mpsc channel.
|
||||
/// let sent_packet_with_sender = packet_receiver.try_recv().expect("expected telecommand");
|
||||
/// assert_eq!(sent_packet_with_sender.packet, ping_tc_raw);
|
||||
/// assert_eq!(sent_packet_with_sender.sender_id, UDP_SERVER_ID);
|
||||
/// // No more packets received.
|
||||
/// matches!(packet_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
|
||||
/// ```
|
||||
///
|
||||
/// The [satrs-example crate](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/satrs-example)
|
||||
@ -59,45 +57,65 @@ use std::vec::Vec;
|
||||
/// [example code](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/tmtc.rs#L67)
|
||||
/// on how to use this TC server. It uses the server to receive PUS telecommands on a specific port
|
||||
/// and then forwards them to a generic CCSDS packet receiver.
|
||||
pub struct UdpTcServer<TcSender: PacketSenderRaw<Error = SendError>, SendError> {
|
||||
pub id: ComponentId,
|
||||
pub struct UdpTcServer<E> {
|
||||
pub socket: UdpSocket,
|
||||
recv_buf: Vec<u8>,
|
||||
sender_addr: Option<SocketAddr>,
|
||||
pub tc_sender: TcSender,
|
||||
tc_receiver: Box<dyn ReceivesTc<Error = E>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ReceiveResult<SendError: Debug + 'static> {
|
||||
#[error("nothing was received")]
|
||||
#[derive(Debug)]
|
||||
pub enum ReceiveResult<E> {
|
||||
NothingReceived,
|
||||
#[error(transparent)]
|
||||
Io(#[from] io::Error),
|
||||
#[error(transparent)]
|
||||
Send(SendError),
|
||||
IoError(Error),
|
||||
ReceiverError(E),
|
||||
}
|
||||
|
||||
impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
|
||||
UdpTcServer<TcSender, SendError>
|
||||
{
|
||||
impl<E> From<Error> for ReceiveResult<E> {
|
||||
fn from(e: Error) -> Self {
|
||||
ReceiveResult::IoError(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: PartialEq> PartialEq for ReceiveResult<E> {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
use ReceiveResult::*;
|
||||
match (self, other) {
|
||||
(IoError(ref e), IoError(ref other_e)) => e.kind() == other_e.kind(),
|
||||
(NothingReceived, NothingReceived) => true,
|
||||
(ReceiverError(e), ReceiverError(other_e)) => e == other_e,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: Eq + PartialEq> Eq for ReceiveResult<E> {}
|
||||
|
||||
impl<E: 'static> ReceivesTcCore for UdpTcServer<E> {
|
||||
type Error = E;
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
self.tc_receiver.pass_tc(tc_raw)
|
||||
}
|
||||
}
|
||||
|
||||
impl<E: 'static> UdpTcServer<E> {
|
||||
pub fn new<A: ToSocketAddrs>(
|
||||
id: ComponentId,
|
||||
addr: A,
|
||||
max_recv_size: usize,
|
||||
tc_sender: TcSender,
|
||||
) -> Result<Self, io::Error> {
|
||||
tc_receiver: Box<dyn ReceivesTc<Error = E>>,
|
||||
) -> Result<Self, Error> {
|
||||
let server = Self {
|
||||
id,
|
||||
socket: UdpSocket::bind(addr)?,
|
||||
recv_buf: vec![0; max_recv_size],
|
||||
sender_addr: None,
|
||||
tc_sender,
|
||||
tc_receiver,
|
||||
};
|
||||
server.socket.set_nonblocking(true)?;
|
||||
Ok(server)
|
||||
}
|
||||
|
||||
pub fn try_recv_tc(&mut self) -> Result<(usize, SocketAddr), ReceiveResult<SendError>> {
|
||||
pub fn try_recv_tc(&mut self) -> Result<(usize, SocketAddr), ReceiveResult<E>> {
|
||||
let res = match self.socket.recv_from(&mut self.recv_buf) {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
@ -110,9 +128,9 @@ impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
|
||||
};
|
||||
let (num_bytes, from) = res;
|
||||
self.sender_addr = Some(from);
|
||||
self.tc_sender
|
||||
.send_packet(self.id, &self.recv_buf[0..num_bytes])
|
||||
.map_err(ReceiveResult::Send)?;
|
||||
self.tc_receiver
|
||||
.pass_tc(&self.recv_buf[0..num_bytes])
|
||||
.map_err(|e| ReceiveResult::ReceiverError(e))?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
@ -124,35 +142,29 @@ impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer};
|
||||
use crate::queue::GenericSendError;
|
||||
use crate::tmtc::PacketSenderRaw;
|
||||
use crate::ComponentId;
|
||||
use core::cell::RefCell;
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
use spacepackets::ecss::tc::PusTcCreator;
|
||||
use spacepackets::ecss::WritablePusPacket;
|
||||
use spacepackets::SpHeader;
|
||||
use std::boxed::Box;
|
||||
use std::collections::VecDeque;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
use std::vec::Vec;
|
||||
|
||||
fn is_send<T: Send>(_: &T) {}
|
||||
|
||||
const UDP_SERVER_ID: ComponentId = 0x05;
|
||||
|
||||
#[derive(Default)]
|
||||
struct PingReceiver {
|
||||
pub sent_cmds: RefCell<VecDeque<Vec<u8>>>,
|
||||
pub sent_cmds: VecDeque<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl PacketSenderRaw for PingReceiver {
|
||||
type Error = GenericSendError;
|
||||
impl ReceivesTcCore for PingReceiver {
|
||||
type Error = ();
|
||||
|
||||
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
assert_eq!(sender_id, UDP_SERVER_ID);
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut sent_data = Vec::new();
|
||||
sent_data.extend_from_slice(tc_raw);
|
||||
let mut queue = self.sent_cmds.borrow_mut();
|
||||
queue.push_back(sent_data);
|
||||
self.sent_cmds.push_back(sent_data);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -163,11 +175,11 @@ mod tests {
|
||||
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
|
||||
let ping_receiver = PingReceiver::default();
|
||||
is_send(&ping_receiver);
|
||||
let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, ping_receiver)
|
||||
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
|
||||
.expect("Creating UDP TMTC server failed");
|
||||
is_send(&udp_tc_server);
|
||||
let sph = SpHeader::new_from_apid(0x02);
|
||||
let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
|
||||
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap();
|
||||
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let len = pus_tc
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("Error writing PUS TC packet");
|
||||
@ -183,10 +195,9 @@ mod tests {
|
||||
udp_tc_server.last_sender().expect("No sender set"),
|
||||
local_addr
|
||||
);
|
||||
let ping_receiver = &mut udp_tc_server.tc_sender;
|
||||
let mut queue = ping_receiver.sent_cmds.borrow_mut();
|
||||
assert_eq!(queue.len(), 1);
|
||||
let sent_cmd = queue.pop_front().unwrap();
|
||||
let ping_receiver: &mut PingReceiver = udp_tc_server.tc_receiver.downcast_mut().unwrap();
|
||||
assert_eq!(ping_receiver.sent_cmds.len(), 1);
|
||||
let sent_cmd = ping_receiver.sent_cmds.pop_front().unwrap();
|
||||
assert_eq!(sent_cmd, buf[0..len]);
|
||||
}
|
||||
|
||||
@ -194,11 +205,11 @@ mod tests {
|
||||
fn test_nothing_received() {
|
||||
let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7779);
|
||||
let ping_receiver = PingReceiver::default();
|
||||
let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, ping_receiver)
|
||||
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
|
||||
.expect("Creating UDP TMTC server failed");
|
||||
let res = udp_tc_server.try_recv_tc();
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
matches!(err, ReceiveResult::NothingReceived);
|
||||
assert_eq!(err, ReceiveResult::NothingReceived);
|
||||
}
|
||||
}
|
||||
|
@ -1,40 +1,40 @@
|
||||
use crate::ComponentId;
|
||||
use crate::{
|
||||
pus::verification::{TcStateAccepted, VerificationToken},
|
||||
TargetId,
|
||||
};
|
||||
|
||||
pub type CollectionIntervalFactor = u32;
|
||||
/// Unique Identifier for a certain housekeeping dataset.
|
||||
pub type UniqueId = u32;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct HkRequest {
|
||||
pub unique_id: UniqueId,
|
||||
pub variant: HkRequestVariant,
|
||||
}
|
||||
|
||||
impl HkRequest {
|
||||
pub fn new(unique_id: UniqueId, variant: HkRequestVariant) -> Self {
|
||||
Self { unique_id, variant }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum HkRequestVariant {
|
||||
OneShot,
|
||||
EnablePeriodic,
|
||||
DisablePeriodic,
|
||||
ModifyCollectionInterval(CollectionIntervalFactor),
|
||||
pub enum HkRequest {
|
||||
OneShot(UniqueId),
|
||||
Enable(UniqueId),
|
||||
Disable(UniqueId),
|
||||
ModifyCollectionInterval(UniqueId, CollectionIntervalFactor),
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct TargetedHkRequest {
|
||||
pub target_id: ComponentId,
|
||||
pub hk_request: HkRequestVariant,
|
||||
pub target_id: TargetId,
|
||||
pub hk_request: HkRequest,
|
||||
}
|
||||
|
||||
impl TargetedHkRequest {
|
||||
pub fn new(target_id: ComponentId, hk_request: HkRequestVariant) -> Self {
|
||||
pub fn new(target_id: TargetId, hk_request: HkRequest) -> Self {
|
||||
Self {
|
||||
target_id,
|
||||
hk_request,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait PusHkRequestRouter {
|
||||
type Error;
|
||||
fn route(
|
||||
&self,
|
||||
target_id: TargetId,
|
||||
hk_request: HkRequest,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
@ -14,7 +14,7 @@
|
||||
//! - The [pus] module which provides special support for projects using
|
||||
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
|
||||
#![no_std]
|
||||
#![cfg_attr(docs_rs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(doc_cfg, feature(doc_cfg))]
|
||||
#[cfg(feature = "alloc")]
|
||||
extern crate alloc;
|
||||
#[cfg(feature = "alloc")]
|
||||
@ -23,15 +23,16 @@ extern crate downcast_rs;
|
||||
extern crate std;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub mod cfdp;
|
||||
pub mod encoding;
|
||||
pub mod event_man;
|
||||
pub mod events;
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod executable;
|
||||
pub mod hal;
|
||||
#[cfg(feature = "std")]
|
||||
pub mod mode_tree;
|
||||
pub mod objects;
|
||||
pub mod pool;
|
||||
pub mod power;
|
||||
pub mod pus;
|
||||
@ -39,7 +40,6 @@ pub mod queue;
|
||||
pub mod request;
|
||||
pub mod res_code;
|
||||
pub mod seq_count;
|
||||
pub mod time;
|
||||
pub mod tmtc;
|
||||
|
||||
pub mod action;
|
||||
@ -49,82 +49,8 @@ pub mod params;
|
||||
|
||||
pub use spacepackets;
|
||||
|
||||
use spacepackets::PacketId;
|
||||
/// Generic channel ID type.
|
||||
pub type ChannelId = u32;
|
||||
|
||||
/// Generic component ID type.
|
||||
pub type ComponentId = u64;
|
||||
|
||||
pub trait ValidatorU16Id {
|
||||
fn validate(&self, id: u16) -> bool;
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl ValidatorU16Id for alloc::vec::Vec<u16> {
|
||||
fn validate(&self, id: u16) -> bool {
|
||||
self.contains(&id)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl ValidatorU16Id for hashbrown::HashSet<u16> {
|
||||
fn validate(&self, id: u16) -> bool {
|
||||
self.contains(&id)
|
||||
}
|
||||
}
|
||||
|
||||
impl ValidatorU16Id for u16 {
|
||||
fn validate(&self, id: u16) -> bool {
|
||||
id == *self
|
||||
}
|
||||
}
|
||||
|
||||
impl ValidatorU16Id for &u16 {
|
||||
fn validate(&self, id: u16) -> bool {
|
||||
id == **self
|
||||
}
|
||||
}
|
||||
|
||||
impl ValidatorU16Id for [u16] {
|
||||
fn validate(&self, id: u16) -> bool {
|
||||
self.binary_search(&id).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl ValidatorU16Id for &[u16] {
|
||||
fn validate(&self, id: u16) -> bool {
|
||||
self.binary_search(&id).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl ValidatorU16Id for alloc::vec::Vec<spacepackets::PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl ValidatorU16Id for hashbrown::HashSet<spacepackets::PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl ValidatorU16Id for std::collections::HashSet<PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
|
||||
impl ValidatorU16Id for [PacketId] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&PacketId::from(packet_id)).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
impl ValidatorU16Id for &[PacketId] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&PacketId::from(packet_id)).is_ok()
|
||||
}
|
||||
}
|
||||
/// Generic target ID type.
|
||||
pub type TargetId = u64;
|
||||
|
@ -1,95 +1,67 @@
|
||||
use core::mem::size_of;
|
||||
use satrs_shared::res_code::ResultU16;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
use spacepackets::ByteConversionError;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std_mod::*;
|
||||
|
||||
use crate::{
|
||||
queue::GenericTargetedMessagingError,
|
||||
request::{GenericMessage, MessageMetadata, MessageReceiver, MessageReceiverWithId, RequestId},
|
||||
ComponentId,
|
||||
};
|
||||
|
||||
pub type Mode = u32;
|
||||
pub type Submode = u16;
|
||||
use crate::TargetId;
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct ModeAndSubmode {
|
||||
mode: Mode,
|
||||
submode: Submode,
|
||||
mode: u32,
|
||||
submode: u16,
|
||||
}
|
||||
|
||||
impl ModeAndSubmode {
|
||||
pub const RAW_LEN: usize = size_of::<Mode>() + size_of::<Submode>();
|
||||
|
||||
pub const fn new_mode_only(mode: Mode) -> Self {
|
||||
pub const fn new_mode_only(mode: u32) -> Self {
|
||||
Self { mode, submode: 0 }
|
||||
}
|
||||
|
||||
pub const fn new(mode: Mode, submode: Submode) -> Self {
|
||||
pub const fn new(mode: u32, submode: u16) -> Self {
|
||||
Self { mode, submode }
|
||||
}
|
||||
|
||||
pub fn raw_len() -> usize {
|
||||
size_of::<u32>() + size_of::<u16>()
|
||||
}
|
||||
|
||||
pub fn from_be_bytes(buf: &[u8]) -> Result<Self, ByteConversionError> {
|
||||
if buf.len() < 6 {
|
||||
return Err(ByteConversionError::FromSliceTooSmall {
|
||||
expected: Self::RAW_LEN,
|
||||
expected: 6,
|
||||
found: buf.len(),
|
||||
});
|
||||
}
|
||||
Ok(Self {
|
||||
mode: Mode::from_be_bytes(buf[0..size_of::<Mode>()].try_into().unwrap()),
|
||||
submode: Submode::from_be_bytes(
|
||||
buf[size_of::<Mode>()..size_of::<Mode>() + size_of::<Submode>()]
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
),
|
||||
mode: u32::from_be_bytes(buf[0..4].try_into().unwrap()),
|
||||
submode: u16::from_be_bytes(buf[4..6].try_into().unwrap()),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
if buf.len() < Self::RAW_LEN {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
expected: Self::RAW_LEN,
|
||||
found: buf.len(),
|
||||
});
|
||||
}
|
||||
buf[0..size_of::<Mode>()].copy_from_slice(&self.mode.to_be_bytes());
|
||||
buf[size_of::<Mode>()..Self::RAW_LEN].copy_from_slice(&self.submode.to_be_bytes());
|
||||
Ok(Self::RAW_LEN)
|
||||
}
|
||||
|
||||
pub fn mode(&self) -> Mode {
|
||||
pub fn mode(&self) -> u32 {
|
||||
self.mode
|
||||
}
|
||||
|
||||
pub fn submode(&self) -> Submode {
|
||||
pub fn submode(&self) -> u16 {
|
||||
self.submode
|
||||
}
|
||||
}
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct TargetedModeCommand {
|
||||
pub address: ComponentId,
|
||||
pub address: TargetId,
|
||||
pub mode_submode: ModeAndSubmode,
|
||||
}
|
||||
|
||||
impl TargetedModeCommand {
|
||||
pub const fn new(address: ComponentId, mode_submode: ModeAndSubmode) -> Self {
|
||||
pub const fn new(address: TargetId, mode_submode: ModeAndSubmode) -> Self {
|
||||
Self {
|
||||
address,
|
||||
mode_submode,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn address(&self) -> ComponentId {
|
||||
pub fn address(&self) -> TargetId {
|
||||
self.address
|
||||
}
|
||||
|
||||
@ -109,8 +81,6 @@ impl TargetedModeCommand {
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum ModeRequest {
|
||||
/// Mode information. Can be used to notify other components of changed modes.
|
||||
ModeInfo(ModeAndSubmode),
|
||||
SetMode(ModeAndSubmode),
|
||||
ReadMode,
|
||||
AnnounceMode,
|
||||
@ -120,471 +90,6 @@ pub enum ModeRequest {
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct TargetedModeRequest {
|
||||
target_id: ComponentId,
|
||||
target_id: TargetId,
|
||||
mode_request: ModeRequest,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum ModeReply {
|
||||
/// Reply to a mode request to confirm the commanded mode was reached.
|
||||
ModeReply(ModeAndSubmode),
|
||||
// Can not reach the commanded mode. Contains a reason as a [ResultU16].
|
||||
CantReachMode(ResultU16),
|
||||
/// We are in the wrong mode for unknown reasons. Contains the expected and reached mode.
|
||||
WrongMode {
|
||||
expected: ModeAndSubmode,
|
||||
reached: ModeAndSubmode,
|
||||
},
|
||||
}
|
||||
|
||||
pub type GenericModeReply = GenericMessage<ModeReply>;
|
||||
|
||||
pub trait ModeRequestSender {
|
||||
fn local_channel_id(&self) -> ComponentId;
|
||||
fn send_mode_request(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
target_id: ComponentId,
|
||||
request: ModeRequest,
|
||||
) -> Result<(), GenericTargetedMessagingError>;
|
||||
}
|
||||
|
||||
pub trait ModeRequestReceiver {
|
||||
fn try_recv_mode_request(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError>;
|
||||
}
|
||||
|
||||
impl<R: MessageReceiver<ModeRequest>> ModeRequestReceiver
|
||||
for MessageReceiverWithId<ModeRequest, R>
|
||||
{
|
||||
fn try_recv_mode_request(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError> {
|
||||
self.try_recv_message()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ModeError {
|
||||
Messaging(GenericTargetedMessagingError),
|
||||
}
|
||||
|
||||
impl From<GenericTargetedMessagingError> for ModeError {
|
||||
fn from(value: GenericTargetedMessagingError) -> Self {
|
||||
Self::Messaging(value)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ModeProvider {
|
||||
fn mode_and_submode(&self) -> ModeAndSubmode;
|
||||
|
||||
fn mode(&self) -> Mode {
|
||||
self.mode_and_submode().mode()
|
||||
}
|
||||
|
||||
fn submode(&self) -> Submode {
|
||||
self.mode_and_submode().submode()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ModeRequestHandler: ModeProvider {
|
||||
type Error;
|
||||
|
||||
fn start_transition(
|
||||
&mut self,
|
||||
requestor: MessageMetadata,
|
||||
mode_and_submode: ModeAndSubmode,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
fn announce_mode(&self, requestor_info: Option<MessageMetadata>, recursive: bool);
|
||||
|
||||
fn handle_mode_reached(
|
||||
&mut self,
|
||||
requestor_info: Option<MessageMetadata>,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
fn handle_mode_info(
|
||||
&mut self,
|
||||
requestor_info: MessageMetadata,
|
||||
info: ModeAndSubmode,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
fn send_mode_reply(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
reply: ModeReply,
|
||||
) -> Result<(), Self::Error>;
|
||||
|
||||
fn handle_mode_request(
|
||||
&mut self,
|
||||
request: GenericMessage<ModeRequest>,
|
||||
) -> Result<(), Self::Error> {
|
||||
match request.message {
|
||||
ModeRequest::SetMode(mode_and_submode) => {
|
||||
self.start_transition(request.requestor_info, mode_and_submode)
|
||||
}
|
||||
ModeRequest::ReadMode => self.send_mode_reply(
|
||||
request.requestor_info,
|
||||
ModeReply::ModeReply(self.mode_and_submode()),
|
||||
),
|
||||
ModeRequest::AnnounceMode => {
|
||||
self.announce_mode(Some(request.requestor_info), false);
|
||||
Ok(())
|
||||
}
|
||||
ModeRequest::AnnounceModeRecursive => {
|
||||
self.announce_mode(Some(request.requestor_info), true);
|
||||
Ok(())
|
||||
}
|
||||
ModeRequest::ModeInfo(info) => self.handle_mode_info(request.requestor_info, info),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ModeReplyReceiver {
|
||||
fn try_recv_mode_reply(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError>;
|
||||
}
|
||||
|
||||
impl<R: MessageReceiver<ModeReply>> ModeReplyReceiver for MessageReceiverWithId<ModeReply, R> {
|
||||
fn try_recv_mode_reply(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError> {
|
||||
self.try_recv_message()
|
||||
}
|
||||
}
|
||||
|
||||
pub trait ModeReplySender {
|
||||
fn local_channel_id(&self) -> ComponentId;
|
||||
|
||||
/// The requestor is assumed to be the target of the reply.
|
||||
fn send_mode_reply(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
reply: ModeReply,
|
||||
) -> Result<(), GenericTargetedMessagingError>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod alloc_mod {
|
||||
use crate::request::{
|
||||
MessageSender, MessageSenderAndReceiver, MessageSenderMap, RequestAndReplySenderAndReceiver,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
impl<S: MessageSender<ModeReply>> MessageSenderMap<ModeReply, S> {
|
||||
pub fn send_mode_reply(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
target_id: ComponentId,
|
||||
request: ModeReply,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.send_message(requestor_info, target_id, request)
|
||||
}
|
||||
|
||||
pub fn add_reply_target(&mut self, target_id: ComponentId, request_sender: S) {
|
||||
self.add_message_target(target_id, request_sender)
|
||||
}
|
||||
}
|
||||
|
||||
impl<FROM, S: MessageSender<ModeReply>, R: MessageReceiver<FROM>> ModeReplySender
|
||||
for MessageSenderAndReceiver<ModeReply, FROM, S, R>
|
||||
{
|
||||
fn local_channel_id(&self) -> ComponentId {
|
||||
self.local_channel_id_generic()
|
||||
}
|
||||
|
||||
fn send_mode_reply(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
request: ModeReply,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.message_sender_map.send_mode_reply(
|
||||
MessageMetadata::new(requestor_info.request_id(), self.local_channel_id()),
|
||||
requestor_info.sender_id(),
|
||||
request,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<TO, S: MessageSender<TO>, R: MessageReceiver<ModeReply>> ModeReplyReceiver
|
||||
for MessageSenderAndReceiver<TO, ModeReply, S, R>
|
||||
{
|
||||
fn try_recv_mode_reply(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError> {
|
||||
self.message_receiver.try_recv_message()
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
REQUEST,
|
||||
S0: MessageSender<REQUEST>,
|
||||
R0: MessageReceiver<ModeReply>,
|
||||
S1: MessageSender<ModeReply>,
|
||||
R1: MessageReceiver<REQUEST>,
|
||||
> RequestAndReplySenderAndReceiver<REQUEST, ModeReply, S0, R0, S1, R1>
|
||||
{
|
||||
pub fn add_reply_target(&mut self, target_id: ComponentId, reply_sender: S1) {
|
||||
self.reply_sender_map
|
||||
.add_message_target(target_id, reply_sender)
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
REQUEST,
|
||||
S0: MessageSender<REQUEST>,
|
||||
R0: MessageReceiver<ModeReply>,
|
||||
S1: MessageSender<ModeReply>,
|
||||
R1: MessageReceiver<REQUEST>,
|
||||
> ModeReplySender for RequestAndReplySenderAndReceiver<REQUEST, ModeReply, S0, R0, S1, R1>
|
||||
{
|
||||
fn local_channel_id(&self) -> ComponentId {
|
||||
self.local_channel_id_generic()
|
||||
}
|
||||
|
||||
fn send_mode_reply(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
request: ModeReply,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.reply_sender_map.send_mode_reply(
|
||||
MessageMetadata::new(requestor_info.request_id(), self.local_channel_id()),
|
||||
requestor_info.sender_id(),
|
||||
request,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
REQUEST,
|
||||
S0: MessageSender<REQUEST>,
|
||||
R0: MessageReceiver<ModeReply>,
|
||||
S1: MessageSender<ModeReply>,
|
||||
R1: MessageReceiver<REQUEST>,
|
||||
> ModeReplyReceiver
|
||||
for RequestAndReplySenderAndReceiver<REQUEST, ModeReply, S0, R0, S1, R1>
|
||||
{
|
||||
fn try_recv_mode_reply(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError> {
|
||||
self.reply_receiver.try_recv_message()
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper type definition for a mode handler which can handle mode requests.
|
||||
pub type ModeRequestHandlerInterface<S, R> =
|
||||
MessageSenderAndReceiver<ModeReply, ModeRequest, S, R>;
|
||||
|
||||
impl<S: MessageSender<ModeReply>, R: MessageReceiver<ModeRequest>>
|
||||
ModeRequestHandlerInterface<S, R>
|
||||
{
|
||||
pub fn try_recv_mode_request(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError> {
|
||||
self.try_recv_message()
|
||||
}
|
||||
|
||||
pub fn send_mode_reply(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
reply: ModeReply,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.send_message(
|
||||
requestor_info.request_id(),
|
||||
requestor_info.sender_id(),
|
||||
reply,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper type defintion for a mode handler object which can send mode requests and receive
|
||||
/// mode replies.
|
||||
pub type ModeRequestorInterface<S, R> = MessageSenderAndReceiver<ModeRequest, ModeReply, S, R>;
|
||||
|
||||
impl<S: MessageSender<ModeRequest>, R: MessageReceiver<ModeReply>> ModeRequestorInterface<S, R> {
|
||||
pub fn try_recv_mode_reply(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError> {
|
||||
self.try_recv_message()
|
||||
}
|
||||
|
||||
pub fn send_mode_request(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
target_id: ComponentId,
|
||||
reply: ModeRequest,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.send_message(request_id, target_id, reply)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper type defintion for a mode handler object which can both send mode requests and
|
||||
/// process mode requests.
|
||||
pub type ModeInterface<S0, R0, S1, R1> =
|
||||
RequestAndReplySenderAndReceiver<ModeRequest, ModeReply, S0, R0, S1, R1>;
|
||||
|
||||
impl<S: MessageSender<ModeRequest>> MessageSenderMap<ModeRequest, S> {
|
||||
pub fn send_mode_request(
|
||||
&self,
|
||||
requestor_info: MessageMetadata,
|
||||
target_id: ComponentId,
|
||||
request: ModeRequest,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.send_message(requestor_info, target_id, request)
|
||||
}
|
||||
|
||||
pub fn add_request_target(&mut self, target_id: ComponentId, request_sender: S) {
|
||||
self.add_message_target(target_id, request_sender)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
impl<S: MessageSender<ModeRequest>> ModeRequestSender for MessageSenderMapWithId<ModeRequest, S> {
|
||||
fn local_channel_id(&self) -> ComponentId {
|
||||
self.local_channel_id
|
||||
}
|
||||
|
||||
fn send_mode_request(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
target_id: ComponentId,
|
||||
request: ModeRequest,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.send_message(request_id, target_id, request)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
impl<TO, S: MessageSender<TO>, R: MessageReceiver<ModeRequest>> ModeRequestReceiver
|
||||
for MessageSenderAndReceiver<TO, ModeRequest, S, R>
|
||||
{
|
||||
fn try_recv_mode_request(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError> {
|
||||
self.message_receiver.try_recv_message()
|
||||
}
|
||||
}
|
||||
|
||||
impl<FROM, S: MessageSender<ModeRequest>, R: MessageReceiver<FROM>> ModeRequestSender
|
||||
for MessageSenderAndReceiver<ModeRequest, FROM, S, R>
|
||||
{
|
||||
fn local_channel_id(&self) -> ComponentId {
|
||||
self.local_channel_id_generic()
|
||||
}
|
||||
|
||||
fn send_mode_request(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
target_id: ComponentId,
|
||||
request: ModeRequest,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.message_sender_map.send_mode_request(
|
||||
MessageMetadata::new(request_id, self.local_channel_id()),
|
||||
target_id,
|
||||
request,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
REPLY,
|
||||
S0: MessageSender<ModeRequest>,
|
||||
R0: MessageReceiver<REPLY>,
|
||||
S1: MessageSender<REPLY>,
|
||||
R1: MessageReceiver<ModeRequest>,
|
||||
> RequestAndReplySenderAndReceiver<ModeRequest, REPLY, S0, R0, S1, R1>
|
||||
{
|
||||
pub fn add_request_target(&mut self, target_id: ComponentId, request_sender: S0) {
|
||||
self.request_sender_map
|
||||
.add_message_target(target_id, request_sender)
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
REPLY,
|
||||
S0: MessageSender<ModeRequest>,
|
||||
R0: MessageReceiver<REPLY>,
|
||||
S1: MessageSender<REPLY>,
|
||||
R1: MessageReceiver<ModeRequest>,
|
||||
> ModeRequestSender
|
||||
for RequestAndReplySenderAndReceiver<ModeRequest, REPLY, S0, R0, S1, R1>
|
||||
{
|
||||
fn local_channel_id(&self) -> ComponentId {
|
||||
self.local_channel_id_generic()
|
||||
}
|
||||
|
||||
fn send_mode_request(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
target_id: ComponentId,
|
||||
request: ModeRequest,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.request_sender_map.send_mode_request(
|
||||
MessageMetadata::new(request_id, self.local_channel_id()),
|
||||
target_id,
|
||||
request,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<
|
||||
REPLY,
|
||||
S0: MessageSender<ModeRequest>,
|
||||
R0: MessageReceiver<REPLY>,
|
||||
S1: MessageSender<REPLY>,
|
||||
R1: MessageReceiver<ModeRequest>,
|
||||
> ModeRequestReceiver
|
||||
for RequestAndReplySenderAndReceiver<ModeRequest, REPLY, S0, R0, S1, R1>
|
||||
{
|
||||
fn try_recv_mode_request(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError> {
|
||||
self.request_receiver.try_recv_message()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod std_mod {
|
||||
use std::sync::mpsc;
|
||||
|
||||
use super::*;
|
||||
|
||||
pub type ModeRequestHandlerMpsc = ModeRequestHandlerInterface<
|
||||
mpsc::Sender<GenericMessage<ModeReply>>,
|
||||
mpsc::Receiver<GenericMessage<ModeRequest>>,
|
||||
>;
|
||||
pub type ModeRequestHandlerMpscBounded = ModeRequestHandlerInterface<
|
||||
mpsc::SyncSender<GenericMessage<ModeReply>>,
|
||||
mpsc::Receiver<GenericMessage<ModeRequest>>,
|
||||
>;
|
||||
|
||||
pub type ModeRequestorMpsc = ModeRequestorInterface<
|
||||
mpsc::Sender<GenericMessage<ModeRequest>>,
|
||||
mpsc::Receiver<GenericMessage<ModeReply>>,
|
||||
>;
|
||||
pub type ModeRequestorBoundedMpsc = ModeRequestorInterface<
|
||||
mpsc::SyncSender<GenericMessage<ModeRequest>>,
|
||||
mpsc::Receiver<GenericMessage<ModeReply>>,
|
||||
>;
|
||||
|
||||
pub type ModeRequestorAndHandlerMpsc = ModeInterface<
|
||||
mpsc::Sender<GenericMessage<ModeRequest>>,
|
||||
mpsc::Receiver<GenericMessage<ModeReply>>,
|
||||
mpsc::Sender<GenericMessage<ModeReply>>,
|
||||
mpsc::Receiver<GenericMessage<ModeRequest>>,
|
||||
>;
|
||||
pub type ModeRequestorAndHandlerMpscBounded = ModeInterface<
|
||||
mpsc::SyncSender<GenericMessage<ModeRequest>>,
|
||||
mpsc::Receiver<GenericMessage<ModeReply>>,
|
||||
mpsc::SyncSender<GenericMessage<ModeReply>>,
|
||||
mpsc::Receiver<GenericMessage<ModeRequest>>,
|
||||
>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {}
|
||||
|
@ -1,37 +0,0 @@
|
||||
use alloc::vec::Vec;
|
||||
use hashbrown::HashMap;
|
||||
|
||||
use crate::{
|
||||
mode::{Mode, ModeAndSubmode, Submode},
|
||||
ComponentId,
|
||||
};
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum TableEntryType {
|
||||
/// Target table containing information of the expected children modes for given mode.
|
||||
Target,
|
||||
/// Sequence table which contains information about how to reach a target table, including
|
||||
/// the order of the sequences.
|
||||
Sequence,
|
||||
}
|
||||
|
||||
pub struct ModeTableEntry {
|
||||
/// Name of respective table entry.
|
||||
pub name: &'static str,
|
||||
/// Target channel ID.
|
||||
pub channel_id: ComponentId,
|
||||
pub mode_submode: ModeAndSubmode,
|
||||
pub allowed_submode_mask: Option<Submode>,
|
||||
pub check_success: bool,
|
||||
}
|
||||
|
||||
pub struct ModeTableMapValue {
|
||||
/// Name for a given mode table entry.
|
||||
pub name: &'static str,
|
||||
pub entries: Vec<ModeTableEntry>,
|
||||
}
|
||||
|
||||
pub type ModeTable = HashMap<Mode, ModeTableMapValue>;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {}
|
308
satrs/src/objects.rs
Normal file
308
satrs/src/objects.rs
Normal file
@ -0,0 +1,308 @@
|
||||
//! # Module providing addressable object support and a manager for them
|
||||
//!
|
||||
//! Each addressable object can be identified using an [object ID][ObjectId].
|
||||
//! The [system object][ManagedSystemObject] trait also allows storing these objects into the
|
||||
//! [object manager][ObjectManager]. They can then be retrieved and casted back to a known type
|
||||
//! using the object ID.
|
||||
//!
|
||||
//! # Examples
|
||||
//!
|
||||
//! ```rust
|
||||
//! use std::any::Any;
|
||||
//! use std::error::Error;
|
||||
//! use satrs::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject};
|
||||
//!
|
||||
//! struct ExampleSysObj {
|
||||
//! id: ObjectId,
|
||||
//! dummy: u32,
|
||||
//! was_initialized: bool,
|
||||
//! }
|
||||
//!
|
||||
//! impl ExampleSysObj {
|
||||
//! fn new(id: ObjectId, dummy: u32) -> ExampleSysObj {
|
||||
//! ExampleSysObj {
|
||||
//! id,
|
||||
//! dummy,
|
||||
//! was_initialized: false,
|
||||
//! }
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl SystemObject for ExampleSysObj {
|
||||
//! type Error = ();
|
||||
//! fn get_object_id(&self) -> &ObjectId {
|
||||
//! &self.id
|
||||
//! }
|
||||
//!
|
||||
//! fn initialize(&mut self) -> Result<(), Self::Error> {
|
||||
//! self.was_initialized = true;
|
||||
//! Ok(())
|
||||
//! }
|
||||
//! }
|
||||
//!
|
||||
//! impl ManagedSystemObject for ExampleSysObj {}
|
||||
//!
|
||||
//! let mut obj_manager = ObjectManager::default();
|
||||
//! let obj_id = ObjectId { id: 0, name: "Example 0"};
|
||||
//! let example_obj = ExampleSysObj::new(obj_id, 42);
|
||||
//! obj_manager.insert(Box::new(example_obj));
|
||||
//! let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&obj_id);
|
||||
//! let example_obj = obj_back_casted.unwrap();
|
||||
//! assert_eq!(example_obj.id, obj_id);
|
||||
//! assert_eq!(example_obj.dummy, 42);
|
||||
//! ```
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
#[cfg(feature = "alloc")]
|
||||
use downcast_rs::Downcast;
|
||||
#[cfg(feature = "alloc")]
|
||||
use hashbrown::HashMap;
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
use crate::TargetId;
|
||||
|
||||
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
|
||||
pub struct ObjectId {
|
||||
pub id: TargetId,
|
||||
pub name: &'static str,
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod alloc_mod {
|
||||
use super::*;
|
||||
|
||||
/// Each object which is stored inside the [object manager][ObjectManager] needs to implemented
|
||||
/// this trait
|
||||
pub trait SystemObject: Downcast {
|
||||
type Error;
|
||||
fn get_object_id(&self) -> &ObjectId;
|
||||
fn initialize(&mut self) -> Result<(), Self::Error>;
|
||||
}
|
||||
downcast_rs::impl_downcast!(SystemObject assoc Error);
|
||||
|
||||
pub trait ManagedSystemObject: SystemObject + Send {}
|
||||
downcast_rs::impl_downcast!(ManagedSystemObject assoc Error);
|
||||
|
||||
/// Helper module to manage multiple [ManagedSystemObjects][ManagedSystemObject] by mapping them
|
||||
/// using an [object ID][ObjectId]
|
||||
#[cfg(feature = "alloc")]
|
||||
pub struct ObjectManager<E> {
|
||||
obj_map: HashMap<ObjectId, Box<dyn ManagedSystemObject<Error = E>>>,
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<E: 'static> Default for ObjectManager<E> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<E: 'static> ObjectManager<E> {
|
||||
pub fn new() -> Self {
|
||||
ObjectManager {
|
||||
obj_map: HashMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn insert(&mut self, sys_obj: Box<dyn ManagedSystemObject<Error = E>>) -> bool {
|
||||
let obj_id = sys_obj.get_object_id();
|
||||
if self.obj_map.contains_key(obj_id) {
|
||||
return false;
|
||||
}
|
||||
self.obj_map.insert(*obj_id, sys_obj).is_none()
|
||||
}
|
||||
|
||||
/// Initializes all System Objects in the hash map and returns the number of successful
|
||||
/// initializations
|
||||
pub fn initialize(&mut self) -> Result<u32, Box<dyn Error>> {
|
||||
let mut init_success = 0;
|
||||
for val in self.obj_map.values_mut() {
|
||||
if val.initialize().is_ok() {
|
||||
init_success += 1
|
||||
}
|
||||
}
|
||||
Ok(init_success)
|
||||
}
|
||||
|
||||
/// Retrieve a reference to an object stored inside the manager. The type to retrieve needs to
|
||||
/// be explicitly passed as a generic parameter or specified on the left hand side of the
|
||||
/// expression.
|
||||
pub fn get_ref<T: ManagedSystemObject<Error = E>>(&self, key: &ObjectId) -> Option<&T> {
|
||||
self.obj_map.get(key).and_then(|o| o.downcast_ref::<T>())
|
||||
}
|
||||
|
||||
/// Retrieve a mutable reference to an object stored inside the manager. The type to retrieve
|
||||
/// needs to be explicitly passed as a generic parameter or specified on the left hand side
|
||||
/// of the expression.
|
||||
pub fn get_mut<T: ManagedSystemObject<Error = E>>(
|
||||
&mut self,
|
||||
key: &ObjectId,
|
||||
) -> Option<&mut T> {
|
||||
self.obj_map
|
||||
.get_mut(key)
|
||||
.and_then(|o| o.downcast_mut::<T>())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject};
|
||||
use std::boxed::Box;
|
||||
use std::string::String;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
|
||||
struct ExampleSysObj {
|
||||
id: ObjectId,
|
||||
dummy: u32,
|
||||
was_initialized: bool,
|
||||
}
|
||||
|
||||
impl ExampleSysObj {
|
||||
fn new(id: ObjectId, dummy: u32) -> ExampleSysObj {
|
||||
ExampleSysObj {
|
||||
id,
|
||||
dummy,
|
||||
was_initialized: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl SystemObject for ExampleSysObj {
|
||||
type Error = ();
|
||||
fn get_object_id(&self) -> &ObjectId {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn initialize(&mut self) -> Result<(), Self::Error> {
|
||||
self.was_initialized = true;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ManagedSystemObject for ExampleSysObj {}
|
||||
|
||||
struct OtherExampleObject {
|
||||
id: ObjectId,
|
||||
string: String,
|
||||
was_initialized: bool,
|
||||
}
|
||||
|
||||
impl SystemObject for OtherExampleObject {
|
||||
type Error = ();
|
||||
fn get_object_id(&self) -> &ObjectId {
|
||||
&self.id
|
||||
}
|
||||
|
||||
fn initialize(&mut self) -> Result<(), Self::Error> {
|
||||
self.was_initialized = true;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl ManagedSystemObject for OtherExampleObject {}
|
||||
|
||||
#[test]
|
||||
fn test_obj_manager_simple() {
|
||||
let mut obj_manager = ObjectManager::default();
|
||||
let expl_obj_id = ObjectId {
|
||||
id: 0,
|
||||
name: "Example 0",
|
||||
};
|
||||
let example_obj = ExampleSysObj::new(expl_obj_id, 42);
|
||||
assert!(obj_manager.insert(Box::new(example_obj)));
|
||||
let res = obj_manager.initialize();
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), 1);
|
||||
let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&expl_obj_id);
|
||||
assert!(obj_back_casted.is_some());
|
||||
let expl_obj_back_casted = obj_back_casted.unwrap();
|
||||
assert_eq!(expl_obj_back_casted.dummy, 42);
|
||||
assert!(expl_obj_back_casted.was_initialized);
|
||||
|
||||
let second_obj_id = ObjectId {
|
||||
id: 12,
|
||||
name: "Example 1",
|
||||
};
|
||||
let second_example_obj = OtherExampleObject {
|
||||
id: second_obj_id,
|
||||
string: String::from("Hello Test"),
|
||||
was_initialized: false,
|
||||
};
|
||||
|
||||
assert!(obj_manager.insert(Box::new(second_example_obj)));
|
||||
let res = obj_manager.initialize();
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), 2);
|
||||
let obj_back_casted: Option<&OtherExampleObject> = obj_manager.get_ref(&second_obj_id);
|
||||
assert!(obj_back_casted.is_some());
|
||||
let expl_obj_back_casted = obj_back_casted.unwrap();
|
||||
assert_eq!(expl_obj_back_casted.string, String::from("Hello Test"));
|
||||
assert!(expl_obj_back_casted.was_initialized);
|
||||
|
||||
let existing_obj_id = ObjectId {
|
||||
id: 12,
|
||||
name: "Example 1",
|
||||
};
|
||||
let invalid_obj = OtherExampleObject {
|
||||
id: existing_obj_id,
|
||||
string: String::from("Hello Test"),
|
||||
was_initialized: false,
|
||||
};
|
||||
|
||||
assert!(!obj_manager.insert(Box::new(invalid_obj)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn object_man_threaded() {
|
||||
let obj_manager = Arc::new(Mutex::new(ObjectManager::new()));
|
||||
let expl_obj_id = ObjectId {
|
||||
id: 0,
|
||||
name: "Example 0",
|
||||
};
|
||||
let example_obj = ExampleSysObj::new(expl_obj_id, 42);
|
||||
let second_obj_id = ObjectId {
|
||||
id: 12,
|
||||
name: "Example 1",
|
||||
};
|
||||
let second_example_obj = OtherExampleObject {
|
||||
id: second_obj_id,
|
||||
string: String::from("Hello Test"),
|
||||
was_initialized: false,
|
||||
};
|
||||
|
||||
let mut obj_man_handle = obj_manager.lock().expect("Mutex lock failed");
|
||||
assert!(obj_man_handle.insert(Box::new(example_obj)));
|
||||
assert!(obj_man_handle.insert(Box::new(second_example_obj)));
|
||||
let res = obj_man_handle.initialize();
|
||||
std::mem::drop(obj_man_handle);
|
||||
assert!(res.is_ok());
|
||||
assert_eq!(res.unwrap(), 2);
|
||||
let obj_man_0 = obj_manager.clone();
|
||||
let jh0 = thread::spawn(move || {
|
||||
let locked_man = obj_man_0.lock().expect("Mutex lock failed");
|
||||
let obj_back_casted: Option<&ExampleSysObj> = locked_man.get_ref(&expl_obj_id);
|
||||
assert!(obj_back_casted.is_some());
|
||||
let expl_obj_back_casted = obj_back_casted.unwrap();
|
||||
assert_eq!(expl_obj_back_casted.dummy, 42);
|
||||
assert!(expl_obj_back_casted.was_initialized);
|
||||
std::mem::drop(locked_man)
|
||||
});
|
||||
|
||||
let jh1 = thread::spawn(move || {
|
||||
let locked_man = obj_manager.lock().expect("Mutex lock failed");
|
||||
let obj_back_casted: Option<&OtherExampleObject> = locked_man.get_ref(&second_obj_id);
|
||||
assert!(obj_back_casted.is_some());
|
||||
let expl_obj_back_casted = obj_back_casted.unwrap();
|
||||
assert_eq!(expl_obj_back_casted.string, String::from("Hello Test"));
|
||||
assert!(expl_obj_back_casted.was_initialized);
|
||||
std::mem::drop(locked_man)
|
||||
});
|
||||
jh0.join().expect("Joining thread 0 failed");
|
||||
jh1.join().expect("Joining thread 1 failed");
|
||||
}
|
||||
}
|
@ -43,7 +43,7 @@
|
||||
//! This includes the [ParamsHeapless] enumeration for contained values which do not require heap
|
||||
//! allocation, and the [Params] which enumerates [ParamsHeapless] and some additional types which
|
||||
//! require [alloc] support but allow for more flexbility.
|
||||
use crate::pool::PoolAddr;
|
||||
use crate::pool::StoreAddr;
|
||||
use core::fmt::Debug;
|
||||
use core::mem::size_of;
|
||||
use paste::paste;
|
||||
@ -60,28 +60,21 @@ use alloc::vec::Vec;
|
||||
/// Generic trait which is used for objects which can be converted into a raw network (big) endian
|
||||
/// byte format.
|
||||
pub trait WritableToBeBytes {
|
||||
fn written_len(&self) -> usize;
|
||||
fn raw_len(&self) -> usize;
|
||||
/// Writes the object to a raw buffer in network endianness (big)
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
fn to_vec(&self) -> Result<Vec<u8>, ByteConversionError> {
|
||||
let mut vec = alloc::vec![0; self.written_len()];
|
||||
self.write_to_be_bytes(&mut vec)?;
|
||||
Ok(vec)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! param_to_be_bytes_impl {
|
||||
($Newtype: ident) => {
|
||||
impl WritableToBeBytes for $Newtype {
|
||||
#[inline]
|
||||
fn written_len(&self) -> usize {
|
||||
fn raw_len(&self) -> usize {
|
||||
size_of::<<Self as ToBeBytes>::ByteArray>()
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
let raw_len = WritableToBeBytes::written_len(self);
|
||||
let raw_len = self.raw_len();
|
||||
if buf.len() < raw_len {
|
||||
return Err(ByteConversionError::ToSliceTooSmall {
|
||||
found: buf.len(),
|
||||
@ -389,32 +382,32 @@ pub enum ParamsRaw {
|
||||
}
|
||||
|
||||
impl WritableToBeBytes for ParamsRaw {
|
||||
fn written_len(&self) -> usize {
|
||||
fn raw_len(&self) -> usize {
|
||||
match self {
|
||||
ParamsRaw::U8(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U8Pair(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U8Triplet(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I8(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I8Pair(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I8Triplet(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U16(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U16Pair(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U16Triplet(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I16(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I16Pair(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I16Triplet(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U32(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U32Pair(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U32Triplet(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I32(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I32Pair(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I32Triplet(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::F32(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::F32Pair(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::F32Triplet(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U64(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::I64(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::F64(v) => WritableToBeBytes::written_len(v),
|
||||
ParamsRaw::U8(v) => v.raw_len(),
|
||||
ParamsRaw::U8Pair(v) => v.raw_len(),
|
||||
ParamsRaw::U8Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::I8(v) => v.raw_len(),
|
||||
ParamsRaw::I8Pair(v) => v.raw_len(),
|
||||
ParamsRaw::I8Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::U16(v) => v.raw_len(),
|
||||
ParamsRaw::U16Pair(v) => v.raw_len(),
|
||||
ParamsRaw::U16Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::I16(v) => v.raw_len(),
|
||||
ParamsRaw::I16Pair(v) => v.raw_len(),
|
||||
ParamsRaw::I16Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::U32(v) => v.raw_len(),
|
||||
ParamsRaw::U32Pair(v) => v.raw_len(),
|
||||
ParamsRaw::U32Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::I32(v) => v.raw_len(),
|
||||
ParamsRaw::I32Pair(v) => v.raw_len(),
|
||||
ParamsRaw::I32Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::F32(v) => v.raw_len(),
|
||||
ParamsRaw::F32Pair(v) => v.raw_len(),
|
||||
ParamsRaw::F32Triplet(v) => v.raw_len(),
|
||||
ParamsRaw::U64(v) => v.raw_len(),
|
||||
ParamsRaw::I64(v) => v.raw_len(),
|
||||
ParamsRaw::F64(v) => v.raw_len(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -467,7 +460,7 @@ params_raw_from_newtype!(
|
||||
);
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub enum ParamsEcssEnum {
|
||||
pub enum EcssEnumParams {
|
||||
U8(EcssEnumU8),
|
||||
U16(EcssEnumU16),
|
||||
U32(EcssEnumU32),
|
||||
@ -475,46 +468,40 @@ pub enum ParamsEcssEnum {
|
||||
}
|
||||
|
||||
macro_rules! writable_as_be_bytes_ecss_enum_impl {
|
||||
($EnumIdent: ident, $Ty: ident) => {
|
||||
impl From<$EnumIdent> for ParamsEcssEnum {
|
||||
fn from(e: $EnumIdent) -> Self {
|
||||
Self::$Ty(e)
|
||||
}
|
||||
}
|
||||
|
||||
($EnumIdent: ident) => {
|
||||
impl WritableToBeBytes for $EnumIdent {
|
||||
fn written_len(&self) -> usize {
|
||||
fn raw_len(&self) -> usize {
|
||||
self.size()
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
<Self as UnsignedEnum>::write_to_be_bytes(self, buf).map(|_| self.written_len())
|
||||
<Self as UnsignedEnum>::write_to_be_bytes(self, buf).map(|_| self.raw_len())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8, U8);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16, U16);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32, U32);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64, U64);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32);
|
||||
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64);
|
||||
|
||||
impl WritableToBeBytes for ParamsEcssEnum {
|
||||
fn written_len(&self) -> usize {
|
||||
impl WritableToBeBytes for EcssEnumParams {
|
||||
fn raw_len(&self) -> usize {
|
||||
match self {
|
||||
ParamsEcssEnum::U8(e) => e.written_len(),
|
||||
ParamsEcssEnum::U16(e) => e.written_len(),
|
||||
ParamsEcssEnum::U32(e) => e.written_len(),
|
||||
ParamsEcssEnum::U64(e) => e.written_len(),
|
||||
EcssEnumParams::U8(e) => e.raw_len(),
|
||||
EcssEnumParams::U16(e) => e.raw_len(),
|
||||
EcssEnumParams::U32(e) => e.raw_len(),
|
||||
EcssEnumParams::U64(e) => e.raw_len(),
|
||||
}
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
match self {
|
||||
ParamsEcssEnum::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
ParamsEcssEnum::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
ParamsEcssEnum::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
ParamsEcssEnum::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
EcssEnumParams::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
EcssEnumParams::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
EcssEnumParams::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
EcssEnumParams::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -523,19 +510,7 @@ impl WritableToBeBytes for ParamsEcssEnum {
|
||||
#[derive(Debug, Copy, Clone, PartialEq)]
|
||||
pub enum ParamsHeapless {
|
||||
Raw(ParamsRaw),
|
||||
EcssEnum(ParamsEcssEnum),
|
||||
}
|
||||
|
||||
impl From<ParamsRaw> for ParamsHeapless {
|
||||
fn from(v: ParamsRaw) -> Self {
|
||||
Self::Raw(v)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParamsEcssEnum> for ParamsHeapless {
|
||||
fn from(v: ParamsEcssEnum) -> Self {
|
||||
Self::EcssEnum(v)
|
||||
}
|
||||
EcssEnum(EcssEnumParams),
|
||||
}
|
||||
|
||||
macro_rules! from_conversions_for_raw {
|
||||
@ -584,19 +559,21 @@ from_conversions_for_raw!(
|
||||
|
||||
/// Generic enumeration for additional parameters, including parameters which rely on heap
|
||||
/// allocations.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone)]
|
||||
#[non_exhaustive]
|
||||
pub enum Params {
|
||||
Heapless(ParamsHeapless),
|
||||
Store(PoolAddr),
|
||||
Store(StoreAddr),
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
Vec(Vec<u8>),
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
String(String),
|
||||
}
|
||||
|
||||
impl From<PoolAddr> for Params {
|
||||
fn from(x: PoolAddr) -> Self {
|
||||
impl From<StoreAddr> for Params {
|
||||
fn from(x: StoreAddr) -> Self {
|
||||
Self::Store(x)
|
||||
}
|
||||
}
|
||||
@ -607,13 +584,8 @@ impl From<ParamsHeapless> for Params {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ParamsRaw> for Params {
|
||||
fn from(x: ParamsRaw) -> Self {
|
||||
Self::Heapless(ParamsHeapless::Raw(x))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
impl From<Vec<u8>> for Params {
|
||||
fn from(val: Vec<u8>) -> Self {
|
||||
Self::Vec(val)
|
||||
@ -622,6 +594,7 @@ impl From<Vec<u8>> for Params {
|
||||
|
||||
/// Converts a byte slice into the [Params::Vec] variant
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
impl From<&[u8]> for Params {
|
||||
fn from(val: &[u8]) -> Self {
|
||||
Self::Vec(val.to_vec())
|
||||
@ -629,6 +602,7 @@ impl From<&[u8]> for Params {
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
impl From<String> for Params {
|
||||
fn from(val: String) -> Self {
|
||||
Self::String(val)
|
||||
@ -636,6 +610,7 @@ impl From<String> for Params {
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
/// Converts a string slice into the [Params::String] variant
|
||||
impl From<&str> for Params {
|
||||
fn from(val: &str) -> Self {
|
||||
@ -643,56 +618,10 @@ impl From<&str> for Params {
|
||||
}
|
||||
}
|
||||
|
||||
impl WritableToBeBytes for ParamsHeapless {
|
||||
fn written_len(&self) -> usize {
|
||||
match self {
|
||||
ParamsHeapless::Raw(raw) => raw.written_len(),
|
||||
ParamsHeapless::EcssEnum(ecss_enum) => ecss_enum.written_len(),
|
||||
}
|
||||
}
|
||||
|
||||
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
|
||||
match self {
|
||||
ParamsHeapless::Raw(raw) => raw.write_to_be_bytes(buf),
|
||||
ParamsHeapless::EcssEnum(ecss_enum) => ecss_enum.write_to_be_bytes(buf),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn test_cloning_works(param_raw: &impl WritableToBeBytes) {
|
||||
let _new_param = param_raw;
|
||||
}
|
||||
|
||||
fn test_writing_fails(param_raw: &(impl WritableToBeBytes + ToBeBytes)) {
|
||||
let pair_size = WritableToBeBytes::written_len(param_raw);
|
||||
assert_eq!(pair_size, ToBeBytes::written_len(param_raw));
|
||||
let mut vec = alloc::vec![0; pair_size - 1];
|
||||
let result = param_raw.write_to_be_bytes(&mut vec);
|
||||
if let Err(ByteConversionError::ToSliceTooSmall { found, expected }) = result {
|
||||
assert_eq!(found, pair_size - 1);
|
||||
assert_eq!(expected, pair_size);
|
||||
} else {
|
||||
panic!("Expected ByteConversionError::ToSliceTooSmall");
|
||||
}
|
||||
}
|
||||
|
||||
fn test_writing(params_raw: &ParamsRaw, writeable: &impl WritableToBeBytes) {
|
||||
assert_eq!(params_raw.written_len(), writeable.written_len());
|
||||
let mut vec = alloc::vec![0; writeable.written_len()];
|
||||
writeable
|
||||
.write_to_be_bytes(&mut vec)
|
||||
.expect("writing parameter to buffer failed");
|
||||
let mut other_vec = alloc::vec![0; writeable.written_len()];
|
||||
params_raw
|
||||
.write_to_be_bytes(&mut other_vec)
|
||||
.expect("writing parameter to buffer failed");
|
||||
assert_eq!(vec, other_vec);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_u32_pair() {
|
||||
let u32_pair = U32Pair(4, 8);
|
||||
@ -703,32 +632,10 @@ mod tests {
|
||||
assert_eq!(u32_conv_back, 4);
|
||||
u32_conv_back = u32::from_be_bytes(raw[4..8].try_into().unwrap());
|
||||
assert_eq!(u32_conv_back, 8);
|
||||
test_writing_fails(&u32_pair);
|
||||
test_cloning_works(&u32_pair);
|
||||
let u32_praw = ParamsRaw::from(u32_pair);
|
||||
test_writing(&u32_praw, &u32_pair);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u16_pair_writing_fails() {
|
||||
let u16_pair = U16Pair(4, 8);
|
||||
test_writing_fails(&u16_pair);
|
||||
test_cloning_works(&u16_pair);
|
||||
let u16_praw = ParamsRaw::from(u16_pair);
|
||||
test_writing(&u16_praw, &u16_pair);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u8_pair_writing_fails() {
|
||||
let u8_pair = U8Pair(4, 8);
|
||||
test_writing_fails(&u8_pair);
|
||||
test_cloning_works(&u8_pair);
|
||||
let u8_praw = ParamsRaw::from(u8_pair);
|
||||
test_writing(&u8_praw, &u8_pair);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_i8_test() {
|
||||
fn basic_signed_test_pair() {
|
||||
let i8_pair = I8Pair(-3, -16);
|
||||
assert_eq!(i8_pair.0, -3);
|
||||
assert_eq!(i8_pair.1, -16);
|
||||
@ -737,31 +644,10 @@ mod tests {
|
||||
assert_eq!(i8_conv_back, -3);
|
||||
i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap());
|
||||
assert_eq!(i8_conv_back, -16);
|
||||
test_writing_fails(&i8_pair);
|
||||
test_cloning_works(&i8_pair);
|
||||
let i8_praw = ParamsRaw::from(i8_pair);
|
||||
test_writing(&i8_praw, &i8_pair);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_from_u32_triplet() {
|
||||
let raw_params = U32Triplet::from((1, 2, 3));
|
||||
assert_eq!(raw_params.0, 1);
|
||||
assert_eq!(raw_params.1, 2);
|
||||
assert_eq!(raw_params.2, 3);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 12);
|
||||
assert_eq!(
|
||||
raw_params.to_be_bytes(),
|
||||
[0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3]
|
||||
);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let u32_triplet = ParamsRaw::from(raw_params);
|
||||
test_writing(&u32_triplet, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i8_triplet() {
|
||||
fn basic_signed_test_triplet() {
|
||||
let i8_triplet = I8Triplet(-3, -16, -126);
|
||||
assert_eq!(i8_triplet.0, -3);
|
||||
assert_eq!(i8_triplet.1, -16);
|
||||
@ -773,10 +659,6 @@ mod tests {
|
||||
assert_eq!(i8_conv_back, -16);
|
||||
i8_conv_back = i8::from_be_bytes(raw[2..3].try_into().unwrap());
|
||||
assert_eq!(i8_conv_back, -126);
|
||||
test_writing_fails(&i8_triplet);
|
||||
test_cloning_works(&i8_triplet);
|
||||
let i8_praw = ParamsRaw::from(i8_triplet);
|
||||
test_writing(&i8_praw, &i8_triplet);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -799,358 +681,4 @@ mod tests {
|
||||
panic!("Params type is not a vector")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_params_written_len_raw() {
|
||||
let param_raw = ParamsRaw::from((500_u32, 1000_u32));
|
||||
assert_eq!(param_raw.written_len(), 8);
|
||||
let mut buf: [u8; 8] = [0; 8];
|
||||
param_raw
|
||||
.write_to_be_bytes(&mut buf)
|
||||
.expect("writing to buffer failed");
|
||||
assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500);
|
||||
assert_eq!(u32::from_be_bytes(buf[4..8].try_into().unwrap()), 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heapless_param_writable_trait_raw() {
|
||||
let param_heapless = ParamsHeapless::Raw(ParamsRaw::from((500_u32, 1000_u32)));
|
||||
assert_eq!(param_heapless.written_len(), 8);
|
||||
let mut buf: [u8; 8] = [0; 8];
|
||||
let size = param_heapless
|
||||
.write_to_be_bytes(&mut buf)
|
||||
.expect("writing failed");
|
||||
assert_eq!(size, 8);
|
||||
assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500);
|
||||
assert_eq!(u32::from_be_bytes(buf[4..8].try_into().unwrap()), 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heapless_param_writable_trait_ecss_enum() {
|
||||
let param_heapless = ParamsHeapless::EcssEnum(ParamsEcssEnum::U16(5.into()));
|
||||
assert_eq!(param_heapless.written_len(), 2);
|
||||
let mut buf: [u8; 2] = [0; 2];
|
||||
let size = param_heapless
|
||||
.write_to_be_bytes(&mut buf)
|
||||
.expect("writing failed");
|
||||
assert_eq!(size, 2);
|
||||
assert_eq!(u16::from_be_bytes(buf[0..2].try_into().unwrap()), 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u32_single() {
|
||||
let raw_params = U32::from(20);
|
||||
assert_eq!(raw_params.0, 20);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 4);
|
||||
assert_eq!(raw_params.to_be_bytes(), [0, 0, 0, 20]);
|
||||
let other = U32::from(20);
|
||||
assert_eq!(raw_params, other);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let u32_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&u32_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i8_single() {
|
||||
let neg_number: i8 = -5_i8;
|
||||
let raw_params = I8::from(neg_number);
|
||||
assert_eq!(raw_params.0, neg_number);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 1);
|
||||
assert_eq!(raw_params.to_be_bytes(), neg_number.to_be_bytes());
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let u8_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&u8_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u8_single() {
|
||||
let raw_params = U8::from(20);
|
||||
assert_eq!(raw_params.0, 20);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 1);
|
||||
assert_eq!(raw_params.to_be_bytes(), [20]);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let u32_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&u32_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u16_single() {
|
||||
let raw_params = U16::from(0x123);
|
||||
assert_eq!(raw_params.0, 0x123);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 2);
|
||||
assert_eq!(raw_params.to_be_bytes(), [0x01, 0x23]);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let u16_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&u16_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u16_triplet() {
|
||||
let raw_params = U16Triplet::from((1, 2, 3));
|
||||
assert_eq!(raw_params.0, 1);
|
||||
assert_eq!(raw_params.1, 2);
|
||||
assert_eq!(raw_params.2, 3);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 6);
|
||||
assert_eq!(raw_params.to_be_bytes(), [0, 1, 0, 2, 0, 3]);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let u16_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&u16_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u8_triplet() {
|
||||
let raw_params = U8Triplet::from((1, 2, 3));
|
||||
assert_eq!(raw_params.0, 1);
|
||||
assert_eq!(raw_params.1, 2);
|
||||
assert_eq!(raw_params.2, 3);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 3);
|
||||
assert_eq!(raw_params.to_be_bytes(), [1, 2, 3]);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let u8_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&u8_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i16_single() {
|
||||
let value = -300_i16;
|
||||
let raw_params = I16::from(value);
|
||||
assert_eq!(raw_params.0, value);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 2);
|
||||
assert_eq!(raw_params.to_be_bytes(), value.to_be_bytes());
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let i16_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&i16_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i16_pair() {
|
||||
let raw_params = I16Pair::from((-300, -400));
|
||||
assert_eq!(raw_params.0, -300);
|
||||
assert_eq!(raw_params.1, -400);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 4);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let i16_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&i16_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i16_triplet() {
|
||||
let raw_params = I16Triplet::from((-300, -400, -350));
|
||||
assert_eq!(raw_params.0, -300);
|
||||
assert_eq!(raw_params.1, -400);
|
||||
assert_eq!(raw_params.2, -350);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 6);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let i16_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&i16_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i32_single() {
|
||||
let raw_params = I32::from(-80000);
|
||||
assert_eq!(raw_params.0, -80000);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 4);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let i32_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&i32_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i32_pair() {
|
||||
let raw_params = I32Pair::from((-80000, -200));
|
||||
assert_eq!(raw_params.0, -80000);
|
||||
assert_eq!(raw_params.1, -200);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 8);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let i32_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&i32_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i32_triplet() {
|
||||
let raw_params = I32Triplet::from((-80000, -5, -200));
|
||||
assert_eq!(raw_params.0, -80000);
|
||||
assert_eq!(raw_params.1, -5);
|
||||
assert_eq!(raw_params.2, -200);
|
||||
assert_eq!(WritableToBeBytes::written_len(&raw_params), 12);
|
||||
test_writing_fails(&raw_params);
|
||||
test_cloning_works(&raw_params);
|
||||
let i32_praw = ParamsRaw::from(raw_params);
|
||||
test_writing(&i32_praw, &raw_params);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f32_single() {
|
||||
let param = F32::from(0.1);
|
||||
assert_eq!(param.0, 0.1);
|
||||
assert_eq!(WritableToBeBytes::written_len(¶m), 4);
|
||||
let f32_pair_raw = param.to_be_bytes();
|
||||
let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap());
|
||||
assert_eq!(f32_0, 0.1);
|
||||
test_writing_fails(¶m);
|
||||
test_cloning_works(¶m);
|
||||
let praw = ParamsRaw::from(param);
|
||||
test_writing(&praw, ¶m);
|
||||
let p_try_from = F32::try_from(param.to_be_bytes().as_ref()).expect("try_from failed");
|
||||
assert_eq!(p_try_from, param);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f32_pair() {
|
||||
let param = F32Pair::from((0.1, 0.2));
|
||||
assert_eq!(param.0, 0.1);
|
||||
assert_eq!(param.1, 0.2);
|
||||
assert_eq!(WritableToBeBytes::written_len(¶m), 8);
|
||||
let f32_pair_raw = param.to_be_bytes();
|
||||
let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap());
|
||||
assert_eq!(f32_0, 0.1);
|
||||
let f32_1 = f32::from_be_bytes(f32_pair_raw[4..8].try_into().unwrap());
|
||||
assert_eq!(f32_1, 0.2);
|
||||
let other_pair = F32Pair::from((0.1, 0.2));
|
||||
assert_eq!(param, other_pair);
|
||||
test_writing_fails(¶m);
|
||||
test_cloning_works(¶m);
|
||||
let praw = ParamsRaw::from(param);
|
||||
test_writing(&praw, ¶m);
|
||||
let p_try_from = F32Pair::try_from(param.to_be_bytes().as_ref()).expect("try_from failed");
|
||||
assert_eq!(p_try_from, param);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f32_triplet() {
|
||||
let f32 = F32Triplet::from((0.1, -0.1, -5.2));
|
||||
assert_eq!(f32.0, 0.1);
|
||||
assert_eq!(f32.1, -0.1);
|
||||
assert_eq!(f32.2, -5.2);
|
||||
assert_eq!(WritableToBeBytes::written_len(&f32), 12);
|
||||
let f32_pair_raw = f32.to_be_bytes();
|
||||
let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap());
|
||||
assert_eq!(f32_0, 0.1);
|
||||
let f32_1 = f32::from_be_bytes(f32_pair_raw[4..8].try_into().unwrap());
|
||||
assert_eq!(f32_1, -0.1);
|
||||
let f32_2 = f32::from_be_bytes(f32_pair_raw[8..12].try_into().unwrap());
|
||||
assert_eq!(f32_2, -5.2);
|
||||
test_writing_fails(&f32);
|
||||
test_cloning_works(&f32);
|
||||
let f32_praw = ParamsRaw::from(f32);
|
||||
test_writing(&f32_praw, &f32);
|
||||
let f32_try_from =
|
||||
F32Triplet::try_from(f32.to_be_bytes().as_ref()).expect("try_from failed");
|
||||
assert_eq!(f32_try_from, f32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u64_single() {
|
||||
let u64 = U64::from(0x1010101010);
|
||||
assert_eq!(u64.0, 0x1010101010);
|
||||
assert_eq!(WritableToBeBytes::written_len(&u64), 8);
|
||||
test_writing_fails(&u64);
|
||||
test_cloning_works(&u64);
|
||||
let praw = ParamsRaw::from(u64);
|
||||
test_writing(&praw, &u64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_i64_single() {
|
||||
let i64 = I64::from(-0xfffffffff);
|
||||
assert_eq!(i64.0, -0xfffffffff);
|
||||
assert_eq!(WritableToBeBytes::written_len(&i64), 8);
|
||||
test_writing_fails(&i64);
|
||||
test_cloning_works(&i64);
|
||||
let praw = ParamsRaw::from(i64);
|
||||
test_writing(&praw, &i64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f64_single() {
|
||||
let value = 823_823_812_832.232_3;
|
||||
let f64 = F64::from(value);
|
||||
assert_eq!(f64.0, value);
|
||||
assert_eq!(WritableToBeBytes::written_len(&f64), 8);
|
||||
test_writing_fails(&f64);
|
||||
test_cloning_works(&f64);
|
||||
let praw = ParamsRaw::from(f64);
|
||||
test_writing(&praw, &f64);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_f64_triplet() {
|
||||
let f64_triplet = F64Triplet::from((0.1, 0.2, 0.3));
|
||||
assert_eq!(f64_triplet.0, 0.1);
|
||||
assert_eq!(f64_triplet.1, 0.2);
|
||||
assert_eq!(f64_triplet.2, 0.3);
|
||||
assert_eq!(WritableToBeBytes::written_len(&f64_triplet), 24);
|
||||
let f64_triplet_raw = f64_triplet.to_be_bytes();
|
||||
let f64_0 = f64::from_be_bytes(f64_triplet_raw[0..8].try_into().unwrap());
|
||||
assert_eq!(f64_0, 0.1);
|
||||
let f64_1 = f64::from_be_bytes(f64_triplet_raw[8..16].try_into().unwrap());
|
||||
assert_eq!(f64_1, 0.2);
|
||||
let f64_2 = f64::from_be_bytes(f64_triplet_raw[16..24].try_into().unwrap());
|
||||
assert_eq!(f64_2, 0.3);
|
||||
test_writing_fails(&f64_triplet);
|
||||
test_cloning_works(&f64_triplet);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u8_ecss_enum() {
|
||||
let value = 200;
|
||||
let u8p = EcssEnumU8::new(value);
|
||||
test_cloning_works(&u8p);
|
||||
let praw = ParamsEcssEnum::from(u8p);
|
||||
assert_eq!(praw.written_len(), 1);
|
||||
let mut buf = [0; 1];
|
||||
praw.write_to_be_bytes(&mut buf)
|
||||
.expect("writing to buffer failed");
|
||||
buf[0] = 200;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u16_ecss_enum() {
|
||||
let value = 60000;
|
||||
let u16p = EcssEnumU16::new(value);
|
||||
test_cloning_works(&u16p);
|
||||
let praw = ParamsEcssEnum::from(u16p);
|
||||
assert_eq!(praw.written_len(), 2);
|
||||
let mut buf = [0; 2];
|
||||
praw.write_to_be_bytes(&mut buf)
|
||||
.expect("writing to buffer failed");
|
||||
assert_eq!(u16::from_be_bytes(buf), value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u32_ecss_enum() {
|
||||
let value = 70000;
|
||||
let u32p = EcssEnumU32::new(value);
|
||||
test_cloning_works(&u32p);
|
||||
let praw = ParamsEcssEnum::from(u32p);
|
||||
assert_eq!(praw.written_len(), 4);
|
||||
let mut buf = [0; 4];
|
||||
praw.write_to_be_bytes(&mut buf)
|
||||
.expect("writing to buffer failed");
|
||||
assert_eq!(u32::from_be_bytes(buf), value);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_u64_ecss_enum() {
|
||||
let value = 0xffffffffff;
|
||||
let u64p = EcssEnumU64::new(value);
|
||||
test_cloning_works(&u64p);
|
||||
let praw = ParamsEcssEnum::from(u64p);
|
||||
assert_eq!(praw.written_len(), 8);
|
||||
let mut buf = [0; 8];
|
||||
praw.write_to_be_bytes(&mut buf)
|
||||
.expect("writing to buffer failed");
|
||||
assert_eq!(u64::from_be_bytes(buf), value);
|
||||
}
|
||||
}
|
||||
|
@ -72,6 +72,7 @@
|
||||
//! }
|
||||
//! ```
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub use alloc_mod::*;
|
||||
use core::fmt::{Display, Formatter};
|
||||
use delegate::delegate;
|
||||
@ -82,7 +83,7 @@ use spacepackets::ByteConversionError;
|
||||
use std::error::Error;
|
||||
|
||||
type NumBlocks = u16;
|
||||
pub type PoolAddr = u64;
|
||||
pub type StoreAddr = u64;
|
||||
|
||||
/// Simple address type used for transactions with the local pool.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
@ -100,14 +101,14 @@ impl StaticPoolAddr {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<StaticPoolAddr> for PoolAddr {
|
||||
impl From<StaticPoolAddr> for StoreAddr {
|
||||
fn from(value: StaticPoolAddr) -> Self {
|
||||
((value.pool_idx as u64) << 16) | value.packet_idx as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PoolAddr> for StaticPoolAddr {
|
||||
fn from(value: PoolAddr) -> Self {
|
||||
impl From<StoreAddr> for StaticPoolAddr {
|
||||
fn from(value: StoreAddr) -> Self {
|
||||
Self {
|
||||
pool_idx: ((value >> 16) & 0xff) as u16,
|
||||
packet_idx: (value & 0xff) as u16,
|
||||
@ -150,59 +151,59 @@ impl Error for StoreIdError {}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum PoolError {
|
||||
pub enum StoreError {
|
||||
/// Requested data block is too large
|
||||
DataTooLarge(usize),
|
||||
/// The store is full. Contains the index of the full subpool
|
||||
StoreFull(u16),
|
||||
/// Store ID is invalid. This also includes partial errors where only the subpool is invalid
|
||||
InvalidStoreId(StoreIdError, Option<PoolAddr>),
|
||||
InvalidStoreId(StoreIdError, Option<StoreAddr>),
|
||||
/// Valid subpool and packet index, but no data is stored at the given address
|
||||
DataDoesNotExist(PoolAddr),
|
||||
DataDoesNotExist(StoreAddr),
|
||||
ByteConversionError(spacepackets::ByteConversionError),
|
||||
LockError,
|
||||
/// Internal or configuration errors
|
||||
InternalError(u32),
|
||||
}
|
||||
|
||||
impl Display for PoolError {
|
||||
impl Display for StoreError {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
|
||||
match self {
|
||||
PoolError::DataTooLarge(size) => {
|
||||
StoreError::DataTooLarge(size) => {
|
||||
write!(f, "data to store with size {size} is too large")
|
||||
}
|
||||
PoolError::StoreFull(u16) => {
|
||||
StoreError::StoreFull(u16) => {
|
||||
write!(f, "store is too full. index for full subpool: {u16}")
|
||||
}
|
||||
PoolError::InvalidStoreId(id_e, addr) => {
|
||||
StoreError::InvalidStoreId(id_e, addr) => {
|
||||
write!(f, "invalid store ID: {id_e}, address: {addr:?}")
|
||||
}
|
||||
PoolError::DataDoesNotExist(addr) => {
|
||||
StoreError::DataDoesNotExist(addr) => {
|
||||
write!(f, "no data exists at address {addr:?}")
|
||||
}
|
||||
PoolError::InternalError(e) => {
|
||||
StoreError::InternalError(e) => {
|
||||
write!(f, "internal error: {e}")
|
||||
}
|
||||
PoolError::ByteConversionError(e) => {
|
||||
StoreError::ByteConversionError(e) => {
|
||||
write!(f, "store error: {e}")
|
||||
}
|
||||
PoolError::LockError => {
|
||||
StoreError::LockError => {
|
||||
write!(f, "lock error")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ByteConversionError> for PoolError {
|
||||
impl From<ByteConversionError> for StoreError {
|
||||
fn from(value: ByteConversionError) -> Self {
|
||||
Self::ByteConversionError(value)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl Error for PoolError {
|
||||
impl Error for StoreError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
if let PoolError::InvalidStoreId(e, _) = self {
|
||||
if let StoreError::InvalidStoreId(e, _) = self {
|
||||
return Some(e);
|
||||
}
|
||||
None
|
||||
@ -217,41 +218,44 @@ impl Error for PoolError {
|
||||
/// pool structure being wrapped inside a lock.
|
||||
pub trait PoolProvider {
|
||||
/// Add new data to the pool. The provider should attempt to reserve a memory block with the
|
||||
/// appropriate size and then copy the given data to the block. Yields a [PoolAddr] which can
|
||||
/// appropriate size and then copy the given data to the block. Yields a [StoreAddr] which can
|
||||
/// be used to access the data stored in the pool
|
||||
fn add(&mut self, data: &[u8]) -> Result<PoolAddr, PoolError>;
|
||||
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError>;
|
||||
|
||||
/// The provider should attempt to reserve a free memory block with the appropriate size first.
|
||||
/// It then executes a user-provided closure and passes a mutable reference to that memory
|
||||
/// block to the closure. This allows the user to write data to the memory block.
|
||||
/// The function should yield a [PoolAddr] which can be used to access the data stored in the
|
||||
/// The function should yield a [StoreAddr] which can be used to access the data stored in the
|
||||
/// pool.
|
||||
fn free_element<W: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
len: usize,
|
||||
writer: W,
|
||||
) -> Result<PoolAddr, PoolError>;
|
||||
) -> Result<StoreAddr, StoreError>;
|
||||
|
||||
/// Modify data added previously using a given [PoolAddr]. The provider should use the store
|
||||
/// Modify data added previously using a given [StoreAddr]. The provider should use the store
|
||||
/// address to determine if a memory block exists for that address. If it does, it should
|
||||
/// call the user-provided closure and pass a mutable reference to the memory block
|
||||
/// to the closure. This allows the user to modify the memory block.
|
||||
fn modify<U: FnMut(&mut [u8])>(&mut self, addr: &PoolAddr, updater: U)
|
||||
-> Result<(), PoolError>;
|
||||
fn modify<U: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
addr: &StoreAddr,
|
||||
updater: U,
|
||||
) -> Result<(), StoreError>;
|
||||
|
||||
/// The provider should copy the data from the memory block to the user-provided buffer if
|
||||
/// it exists.
|
||||
fn read(&self, addr: &PoolAddr, buf: &mut [u8]) -> Result<usize, PoolError>;
|
||||
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError>;
|
||||
|
||||
/// Delete data inside the pool given a [PoolAddr].
|
||||
fn delete(&mut self, addr: PoolAddr) -> Result<(), PoolError>;
|
||||
fn has_element_at(&self, addr: &PoolAddr) -> Result<bool, PoolError>;
|
||||
/// Delete data inside the pool given a [StoreAddr].
|
||||
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError>;
|
||||
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError>;
|
||||
|
||||
/// Retrieve the length of the data at the given store address.
|
||||
fn len_of_data(&self, addr: &PoolAddr) -> Result<usize, PoolError>;
|
||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError>;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
fn read_as_vec(&self, addr: &PoolAddr) -> Result<alloc::vec::Vec<u8>, PoolError> {
|
||||
fn read_as_vec(&self, addr: &StoreAddr) -> Result<alloc::vec::Vec<u8>, StoreError> {
|
||||
let mut vec = alloc::vec![0; self.len_of_data(addr)?];
|
||||
self.read(addr, &mut vec)?;
|
||||
Ok(vec)
|
||||
@ -268,7 +272,7 @@ pub trait PoolProviderWithGuards: PoolProvider {
|
||||
/// This can prevent memory leaks. Users can read the data and release the guard
|
||||
/// if the data in the store is valid for further processing. If the data is faulty, no
|
||||
/// manual deletion is necessary when returning from a processing function prematurely.
|
||||
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self>;
|
||||
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self>;
|
||||
|
||||
/// This function behaves like [PoolProvider::modify], but consumes the provided
|
||||
/// address and returns a RAII conformant guard object.
|
||||
@ -278,20 +282,20 @@ pub trait PoolProviderWithGuards: PoolProvider {
|
||||
/// This can prevent memory leaks. Users can read (and modify) the data and release the guard
|
||||
/// if the data in the store is valid for further processing. If the data is faulty, no
|
||||
/// manual deletion is necessary when returning from a processing function prematurely.
|
||||
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self>;
|
||||
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self>;
|
||||
}
|
||||
|
||||
pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
|
||||
pool: &'a mut MemProvider,
|
||||
pub addr: PoolAddr,
|
||||
pub addr: StoreAddr,
|
||||
no_deletion: bool,
|
||||
deletion_failed_error: Option<PoolError>,
|
||||
deletion_failed_error: Option<StoreError>,
|
||||
}
|
||||
|
||||
/// This helper object can be used to safely access pool data without worrying about memory
|
||||
/// leaks.
|
||||
impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
|
||||
pub fn new(pool: &'a mut MemProvider, addr: PoolAddr) -> Self {
|
||||
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
|
||||
Self {
|
||||
pool,
|
||||
addr,
|
||||
@ -300,12 +304,12 @@ impl<'a, MemProvider: PoolProvider> PoolGuard<'a, MemProvider> {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize, PoolError> {
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError> {
|
||||
self.pool.read(&self.addr, buf)
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub fn read_as_vec(&self) -> Result<alloc::vec::Vec<u8>, PoolError> {
|
||||
pub fn read_as_vec(&self) -> Result<alloc::vec::Vec<u8>, StoreError> {
|
||||
self.pool.read_as_vec(&self.addr)
|
||||
}
|
||||
|
||||
@ -331,19 +335,19 @@ pub struct PoolRwGuard<'a, MemProvider: PoolProvider + ?Sized> {
|
||||
}
|
||||
|
||||
impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> {
|
||||
pub fn new(pool: &'a mut MemProvider, addr: PoolAddr) -> Self {
|
||||
pub fn new(pool: &'a mut MemProvider, addr: StoreAddr) -> Self {
|
||||
Self {
|
||||
guard: PoolGuard::new(pool, addr),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update<U: FnMut(&mut [u8])>(&mut self, updater: &mut U) -> Result<(), PoolError> {
|
||||
pub fn update<U: FnMut(&mut [u8])>(&mut self, updater: &mut U) -> Result<(), StoreError> {
|
||||
self.guard.pool.modify(&self.guard.addr, updater)
|
||||
}
|
||||
|
||||
delegate!(
|
||||
to self.guard {
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize, PoolError>;
|
||||
pub fn read(&self, buf: &mut [u8]) -> Result<usize, StoreError>;
|
||||
/// Releasing the pool guard will disable the automatic deletion of the data when the guard
|
||||
/// is dropped.
|
||||
pub fn release(&mut self);
|
||||
@ -354,7 +358,7 @@ impl<'a, MemProvider: PoolProvider> PoolRwGuard<'a, MemProvider> {
|
||||
#[cfg(feature = "alloc")]
|
||||
mod alloc_mod {
|
||||
use super::{PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticPoolAddr};
|
||||
use crate::pool::{NumBlocks, PoolAddr, PoolError, StoreIdError};
|
||||
use crate::pool::{NumBlocks, StoreAddr, StoreError, StoreIdError};
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use spacepackets::ByteConversionError;
|
||||
@ -419,7 +423,7 @@ mod alloc_mod {
|
||||
/// fitting subpool is full. This might be added in the future.
|
||||
///
|
||||
/// Transactions with the [pool][StaticMemoryPool] are done using a generic
|
||||
/// [address][PoolAddr] type. Adding any data to the pool will yield a store address.
|
||||
/// [address][StoreAddr] type. Adding any data to the pool will yield a store address.
|
||||
/// Modification and read operations are done using a reference to a store address. Deletion
|
||||
/// will consume the store address.
|
||||
pub struct StaticMemoryPool {
|
||||
@ -449,41 +453,41 @@ mod alloc_mod {
|
||||
local_pool
|
||||
}
|
||||
|
||||
fn addr_check(&self, addr: &StaticPoolAddr) -> Result<usize, PoolError> {
|
||||
fn addr_check(&self, addr: &StaticPoolAddr) -> Result<usize, StoreError> {
|
||||
self.validate_addr(addr)?;
|
||||
let pool_idx = addr.pool_idx as usize;
|
||||
let size_list = self.sizes_lists.get(pool_idx).unwrap();
|
||||
let curr_size = size_list[addr.packet_idx as usize];
|
||||
if curr_size == STORE_FREE {
|
||||
return Err(PoolError::DataDoesNotExist(PoolAddr::from(*addr)));
|
||||
return Err(StoreError::DataDoesNotExist(StoreAddr::from(*addr)));
|
||||
}
|
||||
Ok(curr_size)
|
||||
}
|
||||
|
||||
fn validate_addr(&self, addr: &StaticPoolAddr) -> Result<(), PoolError> {
|
||||
fn validate_addr(&self, addr: &StaticPoolAddr) -> Result<(), StoreError> {
|
||||
let pool_idx = addr.pool_idx as usize;
|
||||
if pool_idx >= self.pool_cfg.cfg.len() {
|
||||
return Err(PoolError::InvalidStoreId(
|
||||
return Err(StoreError::InvalidStoreId(
|
||||
StoreIdError::InvalidSubpool(addr.pool_idx),
|
||||
Some(PoolAddr::from(*addr)),
|
||||
Some(StoreAddr::from(*addr)),
|
||||
));
|
||||
}
|
||||
if addr.packet_idx >= self.pool_cfg.cfg[addr.pool_idx as usize].0 {
|
||||
return Err(PoolError::InvalidStoreId(
|
||||
return Err(StoreError::InvalidStoreId(
|
||||
StoreIdError::InvalidPacketIdx(addr.packet_idx),
|
||||
Some(PoolAddr::from(*addr)),
|
||||
Some(StoreAddr::from(*addr)),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, PoolError> {
|
||||
fn reserve(&mut self, data_len: usize) -> Result<StaticPoolAddr, StoreError> {
|
||||
let mut subpool_idx = self.find_subpool(data_len, 0)?;
|
||||
|
||||
if self.pool_cfg.spill_to_higher_subpools {
|
||||
while let Err(PoolError::StoreFull(_)) = self.find_empty(subpool_idx) {
|
||||
while let Err(StoreError::StoreFull(_)) = self.find_empty(subpool_idx) {
|
||||
if (subpool_idx + 1) as usize == self.sizes_lists.len() {
|
||||
return Err(PoolError::StoreFull(subpool_idx));
|
||||
return Err(StoreError::StoreFull(subpool_idx));
|
||||
}
|
||||
subpool_idx += 1;
|
||||
}
|
||||
@ -497,7 +501,7 @@ mod alloc_mod {
|
||||
})
|
||||
}
|
||||
|
||||
fn find_subpool(&self, req_size: usize, start_at_subpool: u16) -> Result<u16, PoolError> {
|
||||
fn find_subpool(&self, req_size: usize, start_at_subpool: u16) -> Result<u16, StoreError> {
|
||||
for (i, &(_, elem_size)) in self.pool_cfg.cfg.iter().enumerate() {
|
||||
if i < start_at_subpool as usize {
|
||||
continue;
|
||||
@ -506,21 +510,21 @@ mod alloc_mod {
|
||||
return Ok(i as u16);
|
||||
}
|
||||
}
|
||||
Err(PoolError::DataTooLarge(req_size))
|
||||
Err(StoreError::DataTooLarge(req_size))
|
||||
}
|
||||
|
||||
fn write(&mut self, addr: &StaticPoolAddr, data: &[u8]) -> Result<(), PoolError> {
|
||||
let packet_pos = self.raw_pos(addr).ok_or(PoolError::InternalError(0))?;
|
||||
fn write(&mut self, addr: &StaticPoolAddr, data: &[u8]) -> Result<(), StoreError> {
|
||||
let packet_pos = self.raw_pos(addr).ok_or(StoreError::InternalError(0))?;
|
||||
let subpool = self
|
||||
.pool
|
||||
.get_mut(addr.pool_idx as usize)
|
||||
.ok_or(PoolError::InternalError(1))?;
|
||||
.ok_or(StoreError::InternalError(1))?;
|
||||
let pool_slice = &mut subpool[packet_pos..packet_pos + data.len()];
|
||||
pool_slice.copy_from_slice(data);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), PoolError> {
|
||||
fn find_empty(&mut self, subpool: u16) -> Result<(u16, &mut usize), StoreError> {
|
||||
if let Some(size_list) = self.sizes_lists.get_mut(subpool as usize) {
|
||||
for (i, elem_size) in size_list.iter_mut().enumerate() {
|
||||
if *elem_size == STORE_FREE {
|
||||
@ -528,12 +532,12 @@ mod alloc_mod {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return Err(PoolError::InvalidStoreId(
|
||||
return Err(StoreError::InvalidStoreId(
|
||||
StoreIdError::InvalidSubpool(subpool),
|
||||
None,
|
||||
));
|
||||
}
|
||||
Err(PoolError::StoreFull(subpool))
|
||||
Err(StoreError::StoreFull(subpool))
|
||||
}
|
||||
|
||||
fn raw_pos(&self, addr: &StaticPoolAddr) -> Option<usize> {
|
||||
@ -543,10 +547,10 @@ mod alloc_mod {
|
||||
}
|
||||
|
||||
impl PoolProvider for StaticMemoryPool {
|
||||
fn add(&mut self, data: &[u8]) -> Result<PoolAddr, PoolError> {
|
||||
fn add(&mut self, data: &[u8]) -> Result<StoreAddr, StoreError> {
|
||||
let data_len = data.len();
|
||||
if data_len > POOL_MAX_SIZE {
|
||||
return Err(PoolError::DataTooLarge(data_len));
|
||||
return Err(StoreError::DataTooLarge(data_len));
|
||||
}
|
||||
let addr = self.reserve(data_len)?;
|
||||
self.write(&addr, data)?;
|
||||
@ -557,9 +561,9 @@ mod alloc_mod {
|
||||
&mut self,
|
||||
len: usize,
|
||||
mut writer: W,
|
||||
) -> Result<PoolAddr, PoolError> {
|
||||
) -> Result<StoreAddr, StoreError> {
|
||||
if len > POOL_MAX_SIZE {
|
||||
return Err(PoolError::DataTooLarge(len));
|
||||
return Err(StoreError::DataTooLarge(len));
|
||||
}
|
||||
let addr = self.reserve(len)?;
|
||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||
@ -571,9 +575,9 @@ mod alloc_mod {
|
||||
|
||||
fn modify<U: FnMut(&mut [u8])>(
|
||||
&mut self,
|
||||
addr: &PoolAddr,
|
||||
addr: &StoreAddr,
|
||||
mut updater: U,
|
||||
) -> Result<(), PoolError> {
|
||||
) -> Result<(), StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
let curr_size = self.addr_check(&addr)?;
|
||||
let raw_pos = self.raw_pos(&addr).unwrap();
|
||||
@ -583,7 +587,7 @@ mod alloc_mod {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read(&self, addr: &PoolAddr, buf: &mut [u8]) -> Result<usize, PoolError> {
|
||||
fn read(&self, addr: &StoreAddr, buf: &mut [u8]) -> Result<usize, StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
let curr_size = self.addr_check(&addr)?;
|
||||
if buf.len() < curr_size {
|
||||
@ -601,7 +605,7 @@ mod alloc_mod {
|
||||
Ok(curr_size)
|
||||
}
|
||||
|
||||
fn delete(&mut self, addr: PoolAddr) -> Result<(), PoolError> {
|
||||
fn delete(&mut self, addr: StoreAddr) -> Result<(), StoreError> {
|
||||
let addr = StaticPoolAddr::from(addr);
|
||||
self.addr_check(&addr)?;
|
||||
let block_size = self.pool_cfg.cfg.get(addr.pool_idx as usize).unwrap().1;
|
||||
@ -614,7 +618,7 @@ mod alloc_mod {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn has_element_at(&self, addr: &PoolAddr) -> Result<bool, PoolError> {
|
||||
fn has_element_at(&self, addr: &StoreAddr) -> Result<bool, StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
self.validate_addr(&addr)?;
|
||||
let pool_idx = addr.pool_idx as usize;
|
||||
@ -626,7 +630,7 @@ mod alloc_mod {
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
fn len_of_data(&self, addr: &PoolAddr) -> Result<usize, PoolError> {
|
||||
fn len_of_data(&self, addr: &StoreAddr) -> Result<usize, StoreError> {
|
||||
let addr = StaticPoolAddr::from(*addr);
|
||||
self.validate_addr(&addr)?;
|
||||
let pool_idx = addr.pool_idx as usize;
|
||||
@ -640,11 +644,11 @@ mod alloc_mod {
|
||||
}
|
||||
|
||||
impl PoolProviderWithGuards for StaticMemoryPool {
|
||||
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self> {
|
||||
fn modify_with_guard(&mut self, addr: StoreAddr) -> PoolRwGuard<Self> {
|
||||
PoolRwGuard::new(self, addr)
|
||||
}
|
||||
|
||||
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self> {
|
||||
fn read_with_guard(&mut self, addr: StoreAddr) -> PoolGuard<Self> {
|
||||
PoolGuard::new(self, addr)
|
||||
}
|
||||
}
|
||||
@ -653,8 +657,8 @@ mod alloc_mod {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::pool::{
|
||||
PoolError, PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticMemoryPool,
|
||||
StaticPoolAddr, StaticPoolConfig, StoreIdError, POOL_MAX_SIZE,
|
||||
PoolGuard, PoolProvider, PoolProviderWithGuards, PoolRwGuard, StaticMemoryPool,
|
||||
StaticPoolAddr, StaticPoolConfig, StoreError, StoreIdError, POOL_MAX_SIZE,
|
||||
};
|
||||
use std::vec;
|
||||
|
||||
@ -778,7 +782,7 @@ mod tests {
|
||||
let res = local_pool.free_element(8, |_| {});
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert_eq!(err, PoolError::StoreFull(1));
|
||||
assert_eq!(err, StoreError::StoreFull(1));
|
||||
|
||||
// Verify that the two deletions are successful
|
||||
assert!(local_pool.delete(addr0).is_ok());
|
||||
@ -800,7 +804,7 @@ mod tests {
|
||||
assert!(res.is_err());
|
||||
assert!(matches!(
|
||||
res.unwrap_err(),
|
||||
PoolError::DataDoesNotExist { .. }
|
||||
StoreError::DataDoesNotExist { .. }
|
||||
));
|
||||
}
|
||||
|
||||
@ -813,8 +817,8 @@ mod tests {
|
||||
let res = local_pool.add(&test_buf);
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert!(matches!(err, PoolError::StoreFull { .. }));
|
||||
if let PoolError::StoreFull(subpool) = err {
|
||||
assert!(matches!(err, StoreError::StoreFull { .. }));
|
||||
if let StoreError::StoreFull(subpool) = err {
|
||||
assert_eq!(subpool, 2);
|
||||
}
|
||||
}
|
||||
@ -832,7 +836,7 @@ mod tests {
|
||||
let err = res.unwrap_err();
|
||||
assert!(matches!(
|
||||
err,
|
||||
PoolError::InvalidStoreId(StoreIdError::InvalidSubpool(3), Some(_))
|
||||
StoreError::InvalidStoreId(StoreIdError::InvalidSubpool(3), Some(_))
|
||||
));
|
||||
}
|
||||
|
||||
@ -849,7 +853,7 @@ mod tests {
|
||||
let err = res.unwrap_err();
|
||||
assert!(matches!(
|
||||
err,
|
||||
PoolError::InvalidStoreId(StoreIdError::InvalidPacketIdx(1), Some(_))
|
||||
StoreError::InvalidStoreId(StoreIdError::InvalidPacketIdx(1), Some(_))
|
||||
));
|
||||
}
|
||||
|
||||
@ -860,7 +864,7 @@ mod tests {
|
||||
let res = local_pool.add(&data_too_large);
|
||||
assert!(res.is_err());
|
||||
let err = res.unwrap_err();
|
||||
assert_eq!(err, PoolError::DataTooLarge(20));
|
||||
assert_eq!(err, StoreError::DataTooLarge(20));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -868,7 +872,10 @@ mod tests {
|
||||
let mut local_pool = basic_small_pool();
|
||||
let res = local_pool.free_element(POOL_MAX_SIZE + 1, |_| {});
|
||||
assert!(res.is_err());
|
||||
assert_eq!(res.unwrap_err(), PoolError::DataTooLarge(POOL_MAX_SIZE + 1));
|
||||
assert_eq!(
|
||||
res.unwrap_err(),
|
||||
StoreError::DataTooLarge(POOL_MAX_SIZE + 1)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -877,7 +884,7 @@ mod tests {
|
||||
// Try to request a slot which is too large
|
||||
let res = local_pool.free_element(20, |_| {});
|
||||
assert!(res.is_err());
|
||||
assert_eq!(res.unwrap_err(), PoolError::DataTooLarge(20));
|
||||
assert_eq!(res.unwrap_err(), StoreError::DataTooLarge(20));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -997,7 +1004,7 @@ mod tests {
|
||||
let should_fail = local_pool.free_element(8, |_| {});
|
||||
assert!(should_fail.is_err());
|
||||
if let Err(err) = should_fail {
|
||||
assert_eq!(err, PoolError::StoreFull(1));
|
||||
assert_eq!(err, StoreError::StoreFull(1));
|
||||
} else {
|
||||
panic!("unexpected store address");
|
||||
}
|
||||
@ -1028,7 +1035,7 @@ mod tests {
|
||||
let should_fail = local_pool.free_element(8, |_| {});
|
||||
assert!(should_fail.is_err());
|
||||
if let Err(err) = should_fail {
|
||||
assert_eq!(err, PoolError::StoreFull(2));
|
||||
assert_eq!(err, StoreError::StoreFull(2));
|
||||
} else {
|
||||
panic!("unexpected store address");
|
||||
}
|
||||
|
@ -1,197 +1,397 @@
|
||||
use crate::{
|
||||
action::{ActionId, ActionRequest},
|
||||
params::Params,
|
||||
request::{GenericMessage, MessageMetadata, RequestId},
|
||||
};
|
||||
use crate::{action::ActionRequest, TargetId};
|
||||
|
||||
use satrs_shared::res_code::ResultU16;
|
||||
use super::verification::{TcStateAccepted, VerificationToken};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub use std_mod::*;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(unused_imports)]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub use alloc_mod::*;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ActionRequestWithId {
|
||||
pub request_id: RequestId,
|
||||
pub request: ActionRequest,
|
||||
}
|
||||
|
||||
/// A reply to an action request, but tailored to the PUS standard verification process.
|
||||
#[non_exhaustive]
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub enum ActionReplyVariant {
|
||||
Completed,
|
||||
StepSuccess {
|
||||
step: u16,
|
||||
},
|
||||
CompletionFailed {
|
||||
error_code: ResultU16,
|
||||
params: Option<Params>,
|
||||
},
|
||||
StepFailed {
|
||||
error_code: ResultU16,
|
||||
step: u16,
|
||||
params: Option<Params>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
pub struct ActionReplyPus {
|
||||
pub action_id: ActionId,
|
||||
pub variant: ActionReplyVariant,
|
||||
}
|
||||
|
||||
impl ActionReplyPus {
|
||||
pub fn new(action_id: ActionId, variant: ActionReplyVariant) -> Self {
|
||||
Self { action_id, variant }
|
||||
}
|
||||
}
|
||||
|
||||
pub type GenericActionReplyPus = GenericMessage<ActionReplyPus>;
|
||||
|
||||
impl GenericActionReplyPus {
|
||||
pub fn new_action_reply(
|
||||
replier_info: MessageMetadata,
|
||||
action_id: ActionId,
|
||||
reply: ActionReplyVariant,
|
||||
) -> Self {
|
||||
Self::new(replier_info, ActionReplyPus::new(action_id, reply))
|
||||
}
|
||||
/// This trait is an abstraction for the routing of PUS service 8 action requests to a dedicated
|
||||
/// recipient using the generic [TargetId].
|
||||
pub trait PusActionRequestRouter {
|
||||
type Error;
|
||||
fn route(
|
||||
&self,
|
||||
target_id: TargetId,
|
||||
hk_request: ActionRequest,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub mod alloc_mod {
|
||||
use crate::{
|
||||
action::ActionRequest,
|
||||
queue::GenericTargetedMessagingError,
|
||||
request::{
|
||||
GenericMessage, MessageReceiver, MessageSender, MessageSenderAndReceiver, RequestId,
|
||||
},
|
||||
ComponentId,
|
||||
};
|
||||
use spacepackets::ecss::tc::PusTcReader;
|
||||
|
||||
use super::ActionReplyPus;
|
||||
use crate::pus::verification::VerificationReportingProvider;
|
||||
|
||||
/// Helper type definition for a mode handler which can handle mode requests.
|
||||
pub type ActionRequestHandlerInterface<S, R> =
|
||||
MessageSenderAndReceiver<ActionReplyPus, ActionRequest, S, R>;
|
||||
use super::*;
|
||||
|
||||
impl<S: MessageSender<ActionReplyPus>, R: MessageReceiver<ActionRequest>>
|
||||
ActionRequestHandlerInterface<S, R>
|
||||
{
|
||||
pub fn try_recv_action_request(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ActionRequest>>, GenericTargetedMessagingError> {
|
||||
self.try_recv_message()
|
||||
}
|
||||
|
||||
pub fn send_action_reply(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
target_id: ComponentId,
|
||||
reply: ActionReplyPus,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.send_message(request_id, target_id, reply)
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper type defintion for a mode handler object which can send mode requests and receive
|
||||
/// mode replies.
|
||||
pub type ActionRequestorInterface<S, R> =
|
||||
MessageSenderAndReceiver<ActionRequest, ActionReplyPus, S, R>;
|
||||
|
||||
impl<S: MessageSender<ActionRequest>, R: MessageReceiver<ActionReplyPus>>
|
||||
ActionRequestorInterface<S, R>
|
||||
{
|
||||
pub fn try_recv_action_reply(
|
||||
&self,
|
||||
) -> Result<Option<GenericMessage<ActionReplyPus>>, GenericTargetedMessagingError> {
|
||||
self.try_recv_message()
|
||||
}
|
||||
|
||||
pub fn send_action_request(
|
||||
&self,
|
||||
request_id: RequestId,
|
||||
target_id: ComponentId,
|
||||
request: ActionRequest,
|
||||
) -> Result<(), GenericTargetedMessagingError> {
|
||||
self.send_message(request_id, target_id, request)
|
||||
}
|
||||
/// This trait is an abstraction for the conversion of a PUS service 8 action telecommand into
|
||||
/// an [ActionRequest].
|
||||
///
|
||||
/// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard.
|
||||
/// The only requirement is that a valid [TargetId] and an [ActionRequest] are returned by the
|
||||
/// core conversion function.
|
||||
///
|
||||
/// The user should take care of performing the error handling as well. Some of the following
|
||||
/// aspects might be relevant:
|
||||
///
|
||||
/// - Checking the validity of the APID, service ID, subservice ID.
|
||||
/// - Checking the validity of the user data.
|
||||
///
|
||||
/// A [VerificationReportingProvider] instance is passed to the user to also allow handling
|
||||
/// of the verification process as part of the PUS standard requirements.
|
||||
pub trait PusActionToRequestConverter {
|
||||
type Error;
|
||||
fn convert(
|
||||
&mut self,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
tc: &PusTcReader,
|
||||
time_stamp: &[u8],
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
) -> Result<(TargetId, ActionRequest), Self::Error>;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod std_mod {
|
||||
use std::sync::mpsc;
|
||||
|
||||
use crate::{
|
||||
pus::{
|
||||
verification::{self, TcStateToken},
|
||||
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap,
|
||||
},
|
||||
ComponentId,
|
||||
use crate::pus::{
|
||||
get_current_cds_short_timestamp, verification::VerificationReportingProvider,
|
||||
EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericRoutingError,
|
||||
PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ActivePusActionRequestStd {
|
||||
pub action_id: ActionId,
|
||||
common: ActivePusRequestStd,
|
||||
/// This is a high-level handler for the PUS service 8 action service.
|
||||
///
|
||||
/// It performs the following handling steps:
|
||||
///
|
||||
/// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter]
|
||||
/// allows to configure the used telecommand memory backend.
|
||||
/// 2. Convert the TC to a targeted action request using the provided
|
||||
/// [PusActionToRequestConverter]. The generic error type is constrained to the
|
||||
/// [PusPacketHandlingError] for the concrete implementation which offers a packet handler.
|
||||
/// 3. Route the action request using the provided [PusActionRequestRouter].
|
||||
/// 4. Handle all routing errors using the provided [PusRoutingErrorHandler].
|
||||
pub struct PusService8ActionHandler<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
RequestConverter: PusActionToRequestConverter,
|
||||
RequestRouter: PusActionRequestRouter<Error = RoutingError>,
|
||||
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
|
||||
RoutingError = GenericRoutingError,
|
||||
> {
|
||||
service_helper:
|
||||
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
pub request_converter: RequestConverter,
|
||||
pub request_router: RequestRouter,
|
||||
pub routing_error_handler: RoutingErrorHandler,
|
||||
}
|
||||
|
||||
impl ActiveRequestProvider for ActivePusActionRequestStd {
|
||||
delegate::delegate! {
|
||||
to self.common {
|
||||
fn target_id(&self) -> ComponentId;
|
||||
fn token(&self) -> verification::TcStateToken;
|
||||
fn set_token(&mut self, token: verification::TcStateToken);
|
||||
fn has_timed_out(&self) -> bool;
|
||||
fn timeout(&self) -> core::time::Duration;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ActivePusActionRequestStd {
|
||||
pub fn new_from_common_req(action_id: ActionId, common: ActivePusRequestStd) -> Self {
|
||||
Self { action_id, common }
|
||||
}
|
||||
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
RequestConverter: PusActionToRequestConverter<Error = PusPacketHandlingError>,
|
||||
RequestRouter: PusActionRequestRouter<Error = RoutingError>,
|
||||
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
|
||||
RoutingError: Clone,
|
||||
>
|
||||
PusService8ActionHandler<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
RequestConverter,
|
||||
RequestRouter,
|
||||
RoutingErrorHandler,
|
||||
RoutingError,
|
||||
>
|
||||
where
|
||||
PusPacketHandlingError: From<RoutingError>,
|
||||
{
|
||||
pub fn new(
|
||||
action_id: ActionId,
|
||||
target_id: ComponentId,
|
||||
token: TcStateToken,
|
||||
timeout: core::time::Duration,
|
||||
service_helper: PusServiceHelper<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
>,
|
||||
request_converter: RequestConverter,
|
||||
request_router: RequestRouter,
|
||||
routing_error_handler: RoutingErrorHandler,
|
||||
) -> Self {
|
||||
Self {
|
||||
action_id,
|
||||
common: ActivePusRequestStd::new(target_id, token, timeout),
|
||||
service_helper,
|
||||
request_converter,
|
||||
request_router,
|
||||
routing_error_handler,
|
||||
}
|
||||
}
|
||||
|
||||
/// Core function to poll the next TC packet and try to handle it.
|
||||
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
return Ok(PusPacketHandlerResult::Empty);
|
||||
}
|
||||
let ecss_tc_and_token = possible_packet.unwrap();
|
||||
let tc = self
|
||||
.service_helper
|
||||
.tc_in_mem_converter
|
||||
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
|
||||
let mut partial_error = None;
|
||||
let time_stamp = get_current_cds_short_timestamp(&mut partial_error);
|
||||
let (target_id, action_request) = self.request_converter.convert(
|
||||
ecss_tc_and_token.token,
|
||||
&tc,
|
||||
&time_stamp,
|
||||
&self.service_helper.common.verification_handler,
|
||||
)?;
|
||||
if let Err(e) =
|
||||
self.request_router
|
||||
.route(target_id, action_request, ecss_tc_and_token.token)
|
||||
{
|
||||
self.routing_error_handler.handle_error(
|
||||
target_id,
|
||||
ecss_tc_and_token.token,
|
||||
&tc,
|
||||
e.clone(),
|
||||
&time_stamp,
|
||||
&self.service_helper.common.verification_handler,
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
}
|
||||
}
|
||||
pub type DefaultActiveActionRequestMap = DefaultActiveRequestMap<ActivePusActionRequestStd>;
|
||||
|
||||
pub type ActionRequestHandlerMpsc = ActionRequestHandlerInterface<
|
||||
mpsc::Sender<GenericMessage<ActionReplyPus>>,
|
||||
mpsc::Receiver<GenericMessage<ActionRequest>>,
|
||||
>;
|
||||
pub type ActionRequestHandlerMpscBounded = ActionRequestHandlerInterface<
|
||||
mpsc::SyncSender<GenericMessage<ActionReplyPus>>,
|
||||
mpsc::Receiver<GenericMessage<ActionRequest>>,
|
||||
>;
|
||||
|
||||
pub type ActionRequestorMpsc = ActionRequestorInterface<
|
||||
mpsc::Sender<GenericMessage<ActionRequest>>,
|
||||
mpsc::Receiver<GenericMessage<ActionReplyPus>>,
|
||||
>;
|
||||
pub type ActionRequestorBoundedMpsc = ActionRequestorInterface<
|
||||
mpsc::SyncSender<GenericMessage<ActionRequest>>,
|
||||
mpsc::Receiver<GenericMessage<ActionReplyPus>>,
|
||||
>;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {}
|
||||
mod tests {
|
||||
use delegate::delegate;
|
||||
|
||||
use spacepackets::{
|
||||
ecss::{
|
||||
tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader},
|
||||
tm::PusTmReader,
|
||||
PusPacket,
|
||||
},
|
||||
CcsdsPacket, SequenceFlags, SpHeader,
|
||||
};
|
||||
|
||||
use crate::pus::{
|
||||
tests::{
|
||||
PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler, TestConverter,
|
||||
TestRouter, TestRoutingErrorHandler, APP_DATA_TOO_SHORT, TEST_APID,
|
||||
},
|
||||
verification::{
|
||||
tests::TestVerificationReporter, FailParams, RequestId, VerificationReportingProvider,
|
||||
},
|
||||
EcssTcInVecConverter, GenericRoutingError, MpscTcReceiver, PusPacketHandlerResult,
|
||||
PusPacketHandlingError, TmAsVecSenderWithMpsc,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
impl PusActionRequestRouter for TestRouter<ActionRequest> {
|
||||
type Error = GenericRoutingError;
|
||||
|
||||
fn route(
|
||||
&self,
|
||||
target_id: TargetId,
|
||||
hk_request: ActionRequest,
|
||||
_token: VerificationToken<TcStateAccepted>,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.routing_requests
|
||||
.borrow_mut()
|
||||
.push_back((target_id, hk_request));
|
||||
self.check_for_injected_error()
|
||||
}
|
||||
}
|
||||
|
||||
impl PusActionToRequestConverter for TestConverter<8> {
|
||||
type Error = PusPacketHandlingError;
|
||||
fn convert(
|
||||
&mut self,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
tc: &PusTcReader,
|
||||
time_stamp: &[u8],
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
) -> Result<(TargetId, ActionRequest), Self::Error> {
|
||||
self.conversion_request.push_back(tc.raw_data().to_vec());
|
||||
self.check_service(tc)?;
|
||||
let target_id = tc.apid();
|
||||
if tc.user_data().len() < 4 {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
token,
|
||||
FailParams::new(
|
||||
time_stamp,
|
||||
&APP_DATA_TOO_SHORT,
|
||||
(tc.user_data().len() as u32).to_be_bytes().as_ref(),
|
||||
),
|
||||
)
|
||||
.expect("start success failure");
|
||||
return Err(PusPacketHandlingError::NotEnoughAppData {
|
||||
expected: 4,
|
||||
found: tc.user_data().len(),
|
||||
});
|
||||
}
|
||||
if tc.subservice() == 1 {
|
||||
verif_reporter
|
||||
.start_success(token, time_stamp)
|
||||
.expect("start success failure");
|
||||
return Ok((
|
||||
target_id.into(),
|
||||
ActionRequest::UnsignedIdAndVecData {
|
||||
action_id: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()),
|
||||
data: tc.user_data()[4..].to_vec(),
|
||||
},
|
||||
));
|
||||
}
|
||||
Err(PusPacketHandlingError::InvalidAppData(
|
||||
"unexpected app data".into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct Pus8HandlerWithVecTester {
|
||||
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>,
|
||||
handler: PusService8ActionHandler<
|
||||
MpscTcReceiver,
|
||||
TmAsVecSenderWithMpsc,
|
||||
EcssTcInVecConverter,
|
||||
TestVerificationReporter,
|
||||
TestConverter<8>,
|
||||
TestRouter<ActionRequest>,
|
||||
TestRoutingErrorHandler,
|
||||
>,
|
||||
}
|
||||
|
||||
impl Pus8HandlerWithVecTester {
|
||||
pub fn new() -> Self {
|
||||
let (common, srv_handler) =
|
||||
PusServiceHandlerWithVecCommon::new_with_test_verif_sender();
|
||||
Self {
|
||||
common,
|
||||
handler: PusService8ActionHandler::new(
|
||||
srv_handler,
|
||||
TestConverter::default(),
|
||||
TestRouter::default(),
|
||||
TestRoutingErrorHandler::default(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
delegate! {
|
||||
to self.handler.request_converter {
|
||||
pub fn check_next_conversion(&mut self, tc: &PusTcCreator);
|
||||
}
|
||||
}
|
||||
delegate! {
|
||||
to self.handler.request_router {
|
||||
pub fn retrieve_next_request(&mut self) -> (TargetId, ActionRequest);
|
||||
}
|
||||
}
|
||||
delegate! {
|
||||
to self.handler.routing_error_handler {
|
||||
pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusTestHarness for Pus8HandlerWithVecTester {
|
||||
delegate! {
|
||||
to self.common {
|
||||
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
|
||||
fn read_next_tm(&mut self) -> PusTmReader<'_>;
|
||||
fn check_no_tm_available(&self) -> bool;
|
||||
fn check_next_verification_tm(
|
||||
&self,
|
||||
subservice: u8,
|
||||
expected_request_id: RequestId,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
impl SimplePusPacketHandler for Pus8HandlerWithVecTester {
|
||||
delegate! {
|
||||
to self.handler {
|
||||
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_test() {
|
||||
let mut action_handler = Pus8HandlerWithVecTester::new();
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(8, 1);
|
||||
let action_id: u32 = 1;
|
||||
let action_id_raw = action_id.to_be_bytes();
|
||||
let tc = PusTcCreator::new(&mut sp_header, sec_header, action_id_raw.as_ref(), true);
|
||||
action_handler.send_tc(&tc);
|
||||
let result = action_handler.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
action_handler.check_next_conversion(&tc);
|
||||
let (target_id, action_req) = action_handler.retrieve_next_request();
|
||||
assert_eq!(target_id, TEST_APID.into());
|
||||
if let ActionRequest::UnsignedIdAndVecData { action_id, data } = action_req {
|
||||
assert_eq!(action_id, 1);
|
||||
assert_eq!(data, &[]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_routing_error() {
|
||||
let mut action_handler = Pus8HandlerWithVecTester::new();
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(8, 1);
|
||||
let action_id: u32 = 1;
|
||||
let action_id_raw = action_id.to_be_bytes();
|
||||
let tc = PusTcCreator::new(&mut sp_header, sec_header, action_id_raw.as_ref(), true);
|
||||
let error = GenericRoutingError::UnknownTargetId(25);
|
||||
action_handler
|
||||
.handler
|
||||
.request_router
|
||||
.inject_routing_error(error);
|
||||
action_handler.send_tc(&tc);
|
||||
let result = action_handler.handle_one_tc();
|
||||
assert!(result.is_err());
|
||||
let check_error = |routing_error: GenericRoutingError| {
|
||||
if let GenericRoutingError::UnknownTargetId(id) = routing_error {
|
||||
assert_eq!(id, 25);
|
||||
} else {
|
||||
panic!("unexpected error type");
|
||||
}
|
||||
};
|
||||
if let PusPacketHandlingError::RequestRoutingError(routing_error) = result.unwrap_err() {
|
||||
check_error(routing_error);
|
||||
} else {
|
||||
panic!("unexpected error type");
|
||||
}
|
||||
|
||||
action_handler.check_next_conversion(&tc);
|
||||
let (target_id, action_req) = action_handler.retrieve_next_request();
|
||||
assert_eq!(target_id, TEST_APID.into());
|
||||
if let ActionRequest::UnsignedIdAndVecData { action_id, data } = action_req {
|
||||
assert_eq!(action_id, 1);
|
||||
assert_eq!(data, &[]);
|
||||
}
|
||||
|
||||
let (target_id, found_error) = action_handler.retrieve_next_error();
|
||||
assert_eq!(target_id, TEST_APID.into());
|
||||
check_error(found_error);
|
||||
}
|
||||
}
|
||||
|
@ -1,13 +1,13 @@
|
||||
use crate::pus::source_buffer_large_enough;
|
||||
use crate::pus::{source_buffer_large_enough, EcssTmtcError};
|
||||
use spacepackets::ecss::tm::PusTmCreator;
|
||||
use spacepackets::ecss::tm::PusTmSecondaryHeader;
|
||||
use spacepackets::ecss::EcssEnumeration;
|
||||
use spacepackets::ecss::{EcssEnumeration, PusError};
|
||||
use spacepackets::ByteConversionError;
|
||||
use spacepackets::{SpHeader, MAX_APID};
|
||||
|
||||
use crate::pus::EcssTmSenderCore;
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
|
||||
pub use alloc_mod::EventReporter;
|
||||
pub use spacepackets::ecss::event::*;
|
||||
|
||||
pub struct EventReportCreator {
|
||||
@ -16,112 +16,117 @@ pub struct EventReportCreator {
|
||||
}
|
||||
|
||||
impl EventReportCreator {
|
||||
pub fn new(apid: u16, dest_id: u16) -> Option<Self> {
|
||||
pub fn new(apid: u16) -> Option<Self> {
|
||||
if apid > MAX_APID {
|
||||
return None;
|
||||
}
|
||||
Some(Self { dest_id, apid })
|
||||
Some(Self {
|
||||
// msg_count: 0,
|
||||
dest_id: 0,
|
||||
apid,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn event_info<'time, 'src_data>(
|
||||
&self,
|
||||
&mut self,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
time_stamp: &'time [u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&'src_data [u8]>,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
aux_data: Option<&'src_data [u8]>,
|
||||
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
|
||||
self.generate_and_send_generic_tm(
|
||||
src_data_buf,
|
||||
Subservice::TmInfoReport,
|
||||
time_stamp,
|
||||
event_id,
|
||||
params,
|
||||
src_data_buf,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_low_severity<'time, 'src_data>(
|
||||
&self,
|
||||
&mut self,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
time_stamp: &'time [u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&'src_data [u8]>,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
aux_data: Option<&'src_data [u8]>,
|
||||
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
|
||||
self.generate_and_send_generic_tm(
|
||||
src_data_buf,
|
||||
Subservice::TmLowSeverityReport,
|
||||
time_stamp,
|
||||
event_id,
|
||||
params,
|
||||
src_data_buf,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_medium_severity<'time, 'src_data>(
|
||||
&self,
|
||||
&mut self,
|
||||
buf: &'src_data mut [u8],
|
||||
time_stamp: &'time [u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&'src_data [u8]>,
|
||||
buf: &'src_data mut [u8],
|
||||
aux_data: Option<&'src_data [u8]>,
|
||||
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
|
||||
self.generate_and_send_generic_tm(
|
||||
buf,
|
||||
Subservice::TmMediumSeverityReport,
|
||||
time_stamp,
|
||||
event_id,
|
||||
params,
|
||||
buf,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn event_high_severity<'time, 'src_data>(
|
||||
&self,
|
||||
&mut self,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
time_stamp: &'time [u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&'src_data [u8]>,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
aux_data: Option<&'src_data [u8]>,
|
||||
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
|
||||
self.generate_and_send_generic_tm(
|
||||
src_data_buf,
|
||||
Subservice::TmHighSeverityReport,
|
||||
time_stamp,
|
||||
event_id,
|
||||
params,
|
||||
src_data_buf,
|
||||
aux_data,
|
||||
)
|
||||
}
|
||||
|
||||
fn generate_and_send_generic_tm<'time, 'src_data>(
|
||||
&self,
|
||||
&mut self,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
subservice: Subservice,
|
||||
time_stamp: &'time [u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&'src_data [u8]>,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
aux_data: Option<&'src_data [u8]>,
|
||||
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
|
||||
self.generate_generic_event_tm(subservice, time_stamp, event_id, params, src_data_buf)
|
||||
self.generate_generic_event_tm(src_data_buf, subservice, time_stamp, event_id, aux_data)
|
||||
}
|
||||
|
||||
fn generate_generic_event_tm<'time, 'src_data>(
|
||||
&self,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
subservice: Subservice,
|
||||
time_stamp: &'time [u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&'src_data [u8]>,
|
||||
src_data_buf: &'src_data mut [u8],
|
||||
aux_data: Option<&'src_data [u8]>,
|
||||
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
|
||||
let mut src_data_len = event_id.size();
|
||||
if let Some(aux_data) = params {
|
||||
if let Some(aux_data) = aux_data {
|
||||
src_data_len += aux_data.len();
|
||||
}
|
||||
source_buffer_large_enough(src_data_buf.len(), src_data_len)?;
|
||||
let mut sp_header = SpHeader::tm_unseg(self.apid, 0, 0).unwrap();
|
||||
let sec_header =
|
||||
PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, time_stamp);
|
||||
PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, Some(time_stamp));
|
||||
let mut current_idx = 0;
|
||||
event_id.write_to_be_bytes(&mut src_data_buf[0..event_id.size()])?;
|
||||
current_idx += event_id.size();
|
||||
if let Some(aux_data) = params {
|
||||
if let Some(aux_data) = aux_data {
|
||||
src_data_buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data);
|
||||
current_idx += aux_data.len();
|
||||
}
|
||||
Ok(PusTmCreator::new(
|
||||
SpHeader::new_from_apid(self.apid),
|
||||
&mut sp_header,
|
||||
sec_header,
|
||||
&src_data_buf[0..current_idx],
|
||||
true,
|
||||
@ -132,131 +137,99 @@ impl EventReportCreator {
|
||||
#[cfg(feature = "alloc")]
|
||||
mod alloc_mod {
|
||||
use super::*;
|
||||
use crate::pus::{EcssTmSender, EcssTmtcError};
|
||||
use crate::ComponentId;
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use core::cell::RefCell;
|
||||
use spacepackets::ecss::PusError;
|
||||
|
||||
pub trait EventTmHookProvider {
|
||||
fn modify_tm(&self, tm: &mut PusTmCreator);
|
||||
pub struct EventReporter {
|
||||
source_data_buf: Vec<u8>,
|
||||
pub reporter: EventReportCreator,
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct DummyEventHook {}
|
||||
|
||||
impl EventTmHookProvider for DummyEventHook {
|
||||
fn modify_tm(&self, _tm: &mut PusTmCreator) {}
|
||||
}
|
||||
|
||||
pub struct EventReporter<EventTmHook: EventTmHookProvider = DummyEventHook> {
|
||||
id: ComponentId,
|
||||
// Use interior mutability pattern here. This is just an intermediate buffer to the PUS event packet
|
||||
// generation.
|
||||
source_data_buf: RefCell<Vec<u8>>,
|
||||
pub report_creator: EventReportCreator,
|
||||
pub tm_hook: EventTmHook,
|
||||
}
|
||||
|
||||
impl EventReporter<DummyEventHook> {
|
||||
pub fn new(
|
||||
id: ComponentId,
|
||||
default_apid: u16,
|
||||
default_dest_id: u16,
|
||||
max_event_id_and_aux_data_size: usize,
|
||||
) -> Option<Self> {
|
||||
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
|
||||
impl EventReporter {
|
||||
pub fn new(apid: u16, max_event_id_and_aux_data_size: usize) -> Option<Self> {
|
||||
let reporter = EventReportCreator::new(apid)?;
|
||||
Some(Self {
|
||||
id,
|
||||
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
|
||||
report_creator: reporter,
|
||||
tm_hook: DummyEventHook::default(),
|
||||
source_data_buf: vec![0; max_event_id_and_aux_data_size],
|
||||
reporter,
|
||||
})
|
||||
}
|
||||
}
|
||||
impl<EventTmHook: EventTmHookProvider> EventReporter<EventTmHook> {
|
||||
pub fn new_with_hook(
|
||||
id: ComponentId,
|
||||
default_apid: u16,
|
||||
default_dest_id: u16,
|
||||
max_event_id_and_aux_data_size: usize,
|
||||
tm_hook: EventTmHook,
|
||||
) -> Option<Self> {
|
||||
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
|
||||
Some(Self {
|
||||
id,
|
||||
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
|
||||
report_creator: reporter,
|
||||
tm_hook,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn event_info(
|
||||
&self,
|
||||
sender: &(impl EcssTmSender + ?Sized),
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&[u8]>,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
let mut mut_buf = self.source_data_buf.borrow_mut();
|
||||
let mut tm_creator = self
|
||||
.report_creator
|
||||
.event_info(time_stamp, event_id, params, mut_buf.as_mut_slice())
|
||||
let tm_creator = self
|
||||
.reporter
|
||||
.event_info(
|
||||
self.source_data_buf.as_mut_slice(),
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
.map_err(PusError::ByteConversion)?;
|
||||
self.tm_hook.modify_tm(&mut tm_creator);
|
||||
sender.send_tm(self.id, tm_creator.into())?;
|
||||
sender.send_tm(tm_creator.into())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn event_low_severity(
|
||||
&self,
|
||||
sender: &(impl EcssTmSender + ?Sized),
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&[u8]>,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
let mut mut_buf = self.source_data_buf.borrow_mut();
|
||||
let mut tm_creator = self
|
||||
.report_creator
|
||||
.event_low_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
|
||||
let tm_creator = self
|
||||
.reporter
|
||||
.event_low_severity(
|
||||
self.source_data_buf.as_mut_slice(),
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
.map_err(PusError::ByteConversion)?;
|
||||
self.tm_hook.modify_tm(&mut tm_creator);
|
||||
sender.send_tm(self.id, tm_creator.into())?;
|
||||
sender.send_tm(tm_creator.into())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn event_medium_severity(
|
||||
&self,
|
||||
sender: &(impl EcssTmSender + ?Sized),
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&[u8]>,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
let mut mut_buf = self.source_data_buf.borrow_mut();
|
||||
let mut tm_creator = self
|
||||
.report_creator
|
||||
.event_medium_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
|
||||
let tm_creator = self
|
||||
.reporter
|
||||
.event_medium_severity(
|
||||
self.source_data_buf.as_mut_slice(),
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
.map_err(PusError::ByteConversion)?;
|
||||
self.tm_hook.modify_tm(&mut tm_creator);
|
||||
sender.send_tm(self.id, tm_creator.into())?;
|
||||
sender.send_tm(tm_creator.into())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn event_high_severity(
|
||||
&self,
|
||||
sender: &(impl EcssTmSender + ?Sized),
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event_id: impl EcssEnumeration,
|
||||
params: Option<&[u8]>,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<(), EcssTmtcError> {
|
||||
let mut mut_buf = self.source_data_buf.borrow_mut();
|
||||
let mut tm_creator = self
|
||||
.report_creator
|
||||
.event_high_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
|
||||
let tm_creator = self
|
||||
.reporter
|
||||
.event_high_severity(
|
||||
self.source_data_buf.as_mut_slice(),
|
||||
time_stamp,
|
||||
event_id,
|
||||
aux_data,
|
||||
)
|
||||
.map_err(PusError::ByteConversion)?;
|
||||
self.tm_hook.modify_tm(&mut tm_creator);
|
||||
sender.send_tm(self.id, tm_creator.into())?;
|
||||
sender.send_tm(tm_creator.into())?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@ -266,11 +239,9 @@ mod alloc_mod {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::events::{EventU32, Severity};
|
||||
use crate::pus::test_util::TEST_COMPONENT_ID_0;
|
||||
use crate::pus::tests::CommonTmInfo;
|
||||
use crate::pus::{ChannelWithId, EcssTmSender, EcssTmtcError, PusTmVariant};
|
||||
use crate::ComponentId;
|
||||
use spacepackets::ecss::PusError;
|
||||
use crate::pus::{EcssChannel, PusTmWrapper};
|
||||
use crate::ChannelId;
|
||||
use spacepackets::ByteConversionError;
|
||||
use std::cell::RefCell;
|
||||
use std::collections::VecDeque;
|
||||
@ -284,7 +255,6 @@ mod tests {
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Clone)]
|
||||
struct TmInfo {
|
||||
pub sender_id: ComponentId,
|
||||
pub common: CommonTmInfo,
|
||||
pub event: EventU32,
|
||||
pub aux_data: Vec<u8>,
|
||||
@ -295,19 +265,19 @@ mod tests {
|
||||
pub service_queue: RefCell<VecDeque<TmInfo>>,
|
||||
}
|
||||
|
||||
impl ChannelWithId for TestSender {
|
||||
fn id(&self) -> ComponentId {
|
||||
impl EcssChannel for TestSender {
|
||||
fn channel_id(&self) -> ChannelId {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
impl EcssTmSender for TestSender {
|
||||
fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
|
||||
impl EcssTmSenderCore for TestSender {
|
||||
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
|
||||
match tm {
|
||||
PusTmVariant::InStore(_) => {
|
||||
PusTmWrapper::InStore(_) => {
|
||||
panic!("TestSender: unexpected call with address");
|
||||
}
|
||||
PusTmVariant::Direct(tm) => {
|
||||
PusTmWrapper::Direct(tm) => {
|
||||
assert!(!tm.source_data().is_empty());
|
||||
let src_data = tm.source_data();
|
||||
assert!(src_data.len() >= 4);
|
||||
@ -318,7 +288,6 @@ mod tests {
|
||||
aux_data.extend_from_slice(&src_data[4..]);
|
||||
}
|
||||
self.service_queue.borrow_mut().push_back(TmInfo {
|
||||
sender_id,
|
||||
common: CommonTmInfo::new_from_tm(&tm),
|
||||
event,
|
||||
aux_data,
|
||||
@ -331,10 +300,10 @@ mod tests {
|
||||
|
||||
fn severity_to_subservice(severity: Severity) -> Subservice {
|
||||
match severity {
|
||||
Severity::Info => Subservice::TmInfoReport,
|
||||
Severity::Low => Subservice::TmLowSeverityReport,
|
||||
Severity::Medium => Subservice::TmMediumSeverityReport,
|
||||
Severity::High => Subservice::TmHighSeverityReport,
|
||||
Severity::INFO => Subservice::TmInfoReport,
|
||||
Severity::LOW => Subservice::TmLowSeverityReport,
|
||||
Severity::MEDIUM => Subservice::TmMediumSeverityReport,
|
||||
Severity::HIGH => Subservice::TmHighSeverityReport,
|
||||
}
|
||||
}
|
||||
|
||||
@ -347,22 +316,22 @@ mod tests {
|
||||
aux_data: Option<&[u8]>,
|
||||
) {
|
||||
match severity {
|
||||
Severity::Info => {
|
||||
Severity::INFO => {
|
||||
reporter
|
||||
.event_info(sender, time_stamp, event, aux_data)
|
||||
.expect("Error reporting info event");
|
||||
}
|
||||
Severity::Low => {
|
||||
Severity::LOW => {
|
||||
reporter
|
||||
.event_low_severity(sender, time_stamp, event, aux_data)
|
||||
.expect("Error reporting low event");
|
||||
}
|
||||
Severity::Medium => {
|
||||
Severity::MEDIUM => {
|
||||
reporter
|
||||
.event_medium_severity(sender, time_stamp, event, aux_data)
|
||||
.expect("Error reporting medium event");
|
||||
}
|
||||
Severity::High => {
|
||||
Severity::HIGH => {
|
||||
reporter
|
||||
.event_high_severity(sender, time_stamp, event, aux_data)
|
||||
.expect("Error reporting high event");
|
||||
@ -376,12 +345,7 @@ mod tests {
|
||||
error_data: Option<&[u8]>,
|
||||
) {
|
||||
let mut sender = TestSender::default();
|
||||
let reporter = EventReporter::new(
|
||||
TEST_COMPONENT_ID_0.id(),
|
||||
EXAMPLE_APID,
|
||||
0,
|
||||
max_event_aux_data_buf,
|
||||
);
|
||||
let reporter = EventReporter::new(EXAMPLE_APID, max_event_aux_data_buf);
|
||||
assert!(reporter.is_some());
|
||||
let mut reporter = reporter.unwrap();
|
||||
let time_stamp_empty: [u8; 7] = [0; 7];
|
||||
@ -389,7 +353,7 @@ mod tests {
|
||||
if let Some(err_data) = error_data {
|
||||
error_copy.extend_from_slice(err_data);
|
||||
}
|
||||
let event = EventU32::new_checked(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
|
||||
let event = EventU32::new(severity, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
|
||||
.expect("Error creating example event");
|
||||
report_basic_event(
|
||||
&mut reporter,
|
||||
@ -407,45 +371,44 @@ mod tests {
|
||||
severity_to_subservice(severity) as u8
|
||||
);
|
||||
assert_eq!(tm_info.common.dest_id, 0);
|
||||
assert_eq!(tm_info.common.timestamp, time_stamp_empty);
|
||||
assert_eq!(tm_info.common.time_stamp, time_stamp_empty);
|
||||
assert_eq!(tm_info.common.msg_counter, 0);
|
||||
assert_eq!(tm_info.common.apid, EXAMPLE_APID);
|
||||
assert_eq!(tm_info.event, event);
|
||||
assert_eq!(tm_info.sender_id, TEST_COMPONENT_ID_0.id());
|
||||
assert_eq!(tm_info.aux_data, error_copy);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_info_event_generation() {
|
||||
basic_event_test(4, Severity::Info, None);
|
||||
basic_event_test(4, Severity::INFO, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_low_severity_event() {
|
||||
basic_event_test(4, Severity::Low, None);
|
||||
basic_event_test(4, Severity::LOW, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_medium_severity_event() {
|
||||
basic_event_test(4, Severity::Medium, None);
|
||||
basic_event_test(4, Severity::MEDIUM, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_high_severity_event() {
|
||||
basic_event_test(4, Severity::High, None);
|
||||
basic_event_test(4, Severity::HIGH, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn event_with_info_string() {
|
||||
let info_string = "Test Information";
|
||||
basic_event_test(32, Severity::Info, Some(info_string.as_bytes()));
|
||||
basic_event_test(32, Severity::INFO, Some(info_string.as_bytes()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn low_severity_with_raw_err_data() {
|
||||
let raw_err_param: i32 = -1;
|
||||
let raw_err = raw_err_param.to_be_bytes();
|
||||
basic_event_test(8, Severity::Low, Some(&raw_err))
|
||||
basic_event_test(8, Severity::LOW, Some(&raw_err))
|
||||
}
|
||||
|
||||
fn check_buf_too_small(
|
||||
@ -454,7 +417,7 @@ mod tests {
|
||||
expected_found_len: usize,
|
||||
) {
|
||||
let time_stamp_empty: [u8; 7] = [0; 7];
|
||||
let event = EventU32::new_checked(Severity::Info, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
|
||||
let event = EventU32::new(Severity::INFO, EXAMPLE_GROUP_ID, EXAMPLE_EVENT_ID_0)
|
||||
.expect("Error creating example event");
|
||||
let err = reporter.event_info(sender, &time_stamp_empty, event, None);
|
||||
assert!(err.is_err());
|
||||
@ -474,7 +437,7 @@ mod tests {
|
||||
fn insufficient_buffer() {
|
||||
let mut sender = TestSender::default();
|
||||
for i in 0..3 {
|
||||
let reporter = EventReporter::new(0, EXAMPLE_APID, 0, i);
|
||||
let reporter = EventReporter::new(EXAMPLE_APID, i);
|
||||
assert!(reporter.is_some());
|
||||
let mut reporter = reporter.unwrap();
|
||||
check_buf_too_small(&mut reporter, &mut sender, i);
|
||||
|
@ -10,11 +10,13 @@ use hashbrown::HashSet;
|
||||
pub use crate::pus::event::EventReporter;
|
||||
use crate::pus::verification::TcStateToken;
|
||||
#[cfg(feature = "alloc")]
|
||||
use crate::pus::EcssTmSender;
|
||||
use crate::pus::EcssTmSenderCore;
|
||||
use crate::pus::EcssTmtcError;
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub use alloc_mod::*;
|
||||
#[cfg(feature = "heapless")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
|
||||
pub use heapless_mod::*;
|
||||
|
||||
/// This trait allows the PUS event manager implementation to stay generic over various types
|
||||
@ -28,7 +30,7 @@ pub use heapless_mod::*;
|
||||
/// structure to track disabled events. A more primitive and embedded friendly
|
||||
/// solution could track this information in a static or pre-allocated list which contains
|
||||
/// the disabled events.
|
||||
pub trait PusEventReportingMapProvider<Event: GenericEvent> {
|
||||
pub trait PusEventMgmtBackendProvider<Event: GenericEvent> {
|
||||
type Error;
|
||||
|
||||
fn event_enabled(&self, event: &Event) -> bool;
|
||||
@ -42,6 +44,7 @@ pub mod heapless_mod {
|
||||
use crate::events::LargestEventRaw;
|
||||
use core::marker::PhantomData;
|
||||
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
|
||||
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
|
||||
// regular Event type again.
|
||||
#[derive(Default)]
|
||||
@ -50,7 +53,13 @@ pub mod heapless_mod {
|
||||
phantom: PhantomData<Provider>,
|
||||
}
|
||||
|
||||
impl<const N: usize, Provider: GenericEvent> PusEventReportingMapProvider<Provider>
|
||||
/// Safety: All contained field are [Send] as well
|
||||
unsafe impl<const N: usize, Event: GenericEvent + Send> Send
|
||||
for HeaplessPusMgmtBackendProvider<N, Event>
|
||||
{
|
||||
}
|
||||
|
||||
impl<const N: usize, Provider: GenericEvent> PusEventMgmtBackendProvider<Provider>
|
||||
for HeaplessPusMgmtBackendProvider<N, Provider>
|
||||
{
|
||||
type Error = ();
|
||||
@ -99,24 +108,20 @@ impl From<EcssTmtcError> for EventManError {
|
||||
pub mod alloc_mod {
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use crate::{
|
||||
events::EventU16,
|
||||
params::{Params, WritableToBeBytes},
|
||||
pus::event::{DummyEventHook, EventTmHookProvider},
|
||||
};
|
||||
use crate::events::EventU16;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// Default backend provider which uses a hash set as the event reporting status container
|
||||
/// like mentioned in the example of the [PusEventReportingMapProvider] documentation.
|
||||
/// like mentioned in the example of the [PusEventMgmtBackendProvider] documentation.
|
||||
///
|
||||
/// This provider is a good option for host systems or larger embedded systems where
|
||||
/// the expected occasional memory allocation performed by the [HashSet] is not an issue.
|
||||
pub struct DefaultPusEventReportingMap<Event: GenericEvent = EventU32> {
|
||||
pub struct DefaultPusEventMgmtBackend<Event: GenericEvent = EventU32> {
|
||||
disabled: HashSet<Event>,
|
||||
}
|
||||
|
||||
impl<Event: GenericEvent> Default for DefaultPusEventReportingMap<Event> {
|
||||
impl<Event: GenericEvent> Default for DefaultPusEventMgmtBackend<Event> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
disabled: HashSet::default(),
|
||||
@ -124,176 +129,118 @@ pub mod alloc_mod {
|
||||
}
|
||||
}
|
||||
|
||||
impl<Event: GenericEvent + PartialEq + Eq + Hash + Copy + Clone>
|
||||
PusEventReportingMapProvider<Event> for DefaultPusEventReportingMap<Event>
|
||||
impl<EV: GenericEvent + PartialEq + Eq + Hash + Copy + Clone> PusEventMgmtBackendProvider<EV>
|
||||
for DefaultPusEventMgmtBackend<EV>
|
||||
{
|
||||
type Error = ();
|
||||
|
||||
fn event_enabled(&self, event: &Event) -> bool {
|
||||
fn event_enabled(&self, event: &EV) -> bool {
|
||||
!self.disabled.contains(event)
|
||||
}
|
||||
|
||||
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
|
||||
fn enable_event_reporting(&mut self, event: &EV) -> Result<bool, Self::Error> {
|
||||
Ok(self.disabled.remove(event))
|
||||
}
|
||||
|
||||
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error> {
|
||||
fn disable_event_reporting(&mut self, event: &EV) -> Result<bool, Self::Error> {
|
||||
Ok(self.disabled.insert(*event))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
pub struct EventGenerationResult {
|
||||
pub event_was_enabled: bool,
|
||||
pub params_were_propagated: bool,
|
||||
}
|
||||
|
||||
pub struct PusEventTmCreatorWithMap<
|
||||
ReportingMap: PusEventReportingMapProvider<Event>,
|
||||
Event: GenericEvent,
|
||||
EventTmHook: EventTmHookProvider = DummyEventHook,
|
||||
pub struct PusEventDispatcher<
|
||||
B: PusEventMgmtBackendProvider<EV, Error = E>,
|
||||
EV: GenericEvent,
|
||||
E,
|
||||
> {
|
||||
pub reporter: EventReporter<EventTmHook>,
|
||||
reporting_map: ReportingMap,
|
||||
phantom: PhantomData<Event>,
|
||||
reporter: EventReporter,
|
||||
backend: B,
|
||||
phantom: PhantomData<(E, EV)>,
|
||||
}
|
||||
|
||||
impl<
|
||||
ReportingMap: PusEventReportingMapProvider<Event>,
|
||||
Event: GenericEvent,
|
||||
EventTmHook: EventTmHookProvider,
|
||||
> PusEventTmCreatorWithMap<ReportingMap, Event, EventTmHook>
|
||||
impl<B: PusEventMgmtBackendProvider<EV, Error = E>, EV: GenericEvent, E>
|
||||
PusEventDispatcher<B, EV, E>
|
||||
{
|
||||
pub fn new(reporter: EventReporter<EventTmHook>, backend: ReportingMap) -> Self {
|
||||
pub fn new(reporter: EventReporter, backend: B) -> Self {
|
||||
Self {
|
||||
reporter,
|
||||
reporting_map: backend,
|
||||
backend,
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, ReportingMap::Error> {
|
||||
self.reporting_map.enable_event_reporting(event)
|
||||
pub fn enable_tm_for_event(&mut self, event: &EV) -> Result<bool, E> {
|
||||
self.backend.enable_event_reporting(event)
|
||||
}
|
||||
|
||||
pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, ReportingMap::Error> {
|
||||
self.reporting_map.disable_event_reporting(event)
|
||||
pub fn disable_tm_for_event(&mut self, event: &EV) -> Result<bool, E> {
|
||||
self.backend.disable_event_reporting(event)
|
||||
}
|
||||
|
||||
pub fn generate_pus_event_tm_generic(
|
||||
&self,
|
||||
sender: &(impl EcssTmSender + ?Sized),
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event: Event,
|
||||
params: Option<&[u8]>,
|
||||
event: EV,
|
||||
aux_data: Option<&[u8]>,
|
||||
) -> Result<bool, EventManError> {
|
||||
if !self.reporting_map.event_enabled(&event) {
|
||||
if !self.backend.event_enabled(&event) {
|
||||
return Ok(false);
|
||||
}
|
||||
match event.severity() {
|
||||
Severity::Info => self
|
||||
Severity::INFO => self
|
||||
.reporter
|
||||
.event_info(sender, time_stamp, event, params)
|
||||
.event_info(sender, time_stamp, event, aux_data)
|
||||
.map(|_| true)
|
||||
.map_err(|e| e.into()),
|
||||
Severity::Low => self
|
||||
Severity::LOW => self
|
||||
.reporter
|
||||
.event_low_severity(sender, time_stamp, event, params)
|
||||
.event_low_severity(sender, time_stamp, event, aux_data)
|
||||
.map(|_| true)
|
||||
.map_err(|e| e.into()),
|
||||
Severity::Medium => self
|
||||
Severity::MEDIUM => self
|
||||
.reporter
|
||||
.event_medium_severity(sender, time_stamp, event, params)
|
||||
.event_medium_severity(sender, time_stamp, event, aux_data)
|
||||
.map(|_| true)
|
||||
.map_err(|e| e.into()),
|
||||
Severity::High => self
|
||||
Severity::HIGH => self
|
||||
.reporter
|
||||
.event_high_severity(sender, time_stamp, event, params)
|
||||
.event_high_severity(sender, time_stamp, event, aux_data)
|
||||
.map(|_| true)
|
||||
.map_err(|e| e.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_pus_event_tm_generic_with_generic_params(
|
||||
&self,
|
||||
sender: &(impl EcssTmSender + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event: Event,
|
||||
small_data_buf: &mut [u8],
|
||||
params: Option<&Params>,
|
||||
) -> Result<EventGenerationResult, EventManError> {
|
||||
let mut result = EventGenerationResult {
|
||||
event_was_enabled: false,
|
||||
params_were_propagated: true,
|
||||
};
|
||||
if params.is_none() {
|
||||
result.event_was_enabled =
|
||||
self.generate_pus_event_tm_generic(sender, time_stamp, event, None)?;
|
||||
return Ok(result);
|
||||
}
|
||||
let params = params.unwrap();
|
||||
result.event_was_enabled = match params {
|
||||
Params::Heapless(heapless_param) => {
|
||||
heapless_param
|
||||
.write_to_be_bytes(&mut small_data_buf[..heapless_param.written_len()])
|
||||
.map_err(EcssTmtcError::ByteConversion)?;
|
||||
self.generate_pus_event_tm_generic(
|
||||
sender,
|
||||
time_stamp,
|
||||
event,
|
||||
Some(small_data_buf),
|
||||
)?
|
||||
}
|
||||
Params::Vec(vec) => {
|
||||
self.generate_pus_event_tm_generic(sender, time_stamp, event, Some(vec))?
|
||||
}
|
||||
Params::String(string) => self.generate_pus_event_tm_generic(
|
||||
sender,
|
||||
time_stamp,
|
||||
event,
|
||||
Some(string.as_bytes()),
|
||||
)?,
|
||||
_ => {
|
||||
result.params_were_propagated = false;
|
||||
self.generate_pus_event_tm_generic(sender, time_stamp, event, None)?
|
||||
}
|
||||
};
|
||||
Ok(result)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Event: GenericEvent + Copy + PartialEq + Eq + Hash, EventTmHook: EventTmHookProvider>
|
||||
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<Event>, Event, EventTmHook>
|
||||
impl<EV: GenericEvent + Copy + PartialEq + Eq + Hash>
|
||||
PusEventDispatcher<DefaultPusEventMgmtBackend<EV>, EV, ()>
|
||||
{
|
||||
pub fn new_with_default_backend(reporter: EventReporter<EventTmHook>) -> Self {
|
||||
pub fn new_with_default_backend(reporter: EventReporter) -> Self {
|
||||
Self {
|
||||
reporter,
|
||||
reporting_map: DefaultPusEventReportingMap::default(),
|
||||
backend: DefaultPusEventMgmtBackend::default(),
|
||||
phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<ReportingMap: PusEventReportingMapProvider<EventU32>>
|
||||
PusEventTmCreatorWithMap<ReportingMap, EventU32>
|
||||
{
|
||||
impl<B: PusEventMgmtBackendProvider<EventU32, Error = E>, E> PusEventDispatcher<B, EventU32, E> {
|
||||
pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>(
|
||||
&mut self,
|
||||
event: &EventU32TypedSev<Severity>,
|
||||
) -> Result<bool, ReportingMap::Error> {
|
||||
self.reporting_map.enable_event_reporting(event.as_ref())
|
||||
) -> Result<bool, E> {
|
||||
self.backend.enable_event_reporting(event.as_ref())
|
||||
}
|
||||
|
||||
pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>(
|
||||
&mut self,
|
||||
event: &EventU32TypedSev<Severity>,
|
||||
) -> Result<bool, ReportingMap::Error> {
|
||||
self.reporting_map.disable_event_reporting(event.as_ref())
|
||||
) -> Result<bool, E> {
|
||||
self.backend.disable_event_reporting(event.as_ref())
|
||||
}
|
||||
|
||||
pub fn generate_pus_event_tm<Severity: HasSeverity>(
|
||||
&self,
|
||||
sender: &(impl EcssTmSender + ?Sized),
|
||||
&mut self,
|
||||
sender: &mut (impl EcssTmSenderCore + ?Sized),
|
||||
time_stamp: &[u8],
|
||||
event: EventU32TypedSev<Severity>,
|
||||
aux_data: Option<&[u8]>,
|
||||
@ -302,48 +249,39 @@ pub mod alloc_mod {
|
||||
}
|
||||
}
|
||||
|
||||
pub type DefaultPusEventU16TmCreator<EventTmHook = DummyEventHook> =
|
||||
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<EventU16>, EventU16, EventTmHook>;
|
||||
pub type DefaultPusEventU32TmCreator<EventTmHook = DummyEventHook> =
|
||||
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<EventU32>, EventU32, EventTmHook>;
|
||||
pub type DefaultPusEventU16Dispatcher<E> =
|
||||
PusEventDispatcher<DefaultPusEventMgmtBackend<EventU16>, EventU16, E>;
|
||||
pub type DefaultPusEventU32Dispatcher<E> =
|
||||
PusEventDispatcher<DefaultPusEventMgmtBackend<EventU32>, EventU32, E>;
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloc::string::{String, ToString};
|
||||
use alloc::vec;
|
||||
use spacepackets::ecss::event::Subservice;
|
||||
use spacepackets::ecss::tm::PusTmReader;
|
||||
use spacepackets::ecss::PusPacket;
|
||||
|
||||
use super::*;
|
||||
use crate::request::UniqueApidTargetId;
|
||||
use crate::{events::SeverityInfo, tmtc::PacketAsVec};
|
||||
use crate::{events::SeverityInfo, pus::TmAsVecSenderWithMpsc};
|
||||
use std::sync::mpsc::{self, TryRecvError};
|
||||
|
||||
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
|
||||
const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
|
||||
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
|
||||
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
|
||||
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
|
||||
const EMPTY_STAMP: [u8; 7] = [0; 7];
|
||||
const TEST_APID: u16 = 0x02;
|
||||
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05);
|
||||
|
||||
fn create_basic_man_1() -> DefaultPusEventU32TmCreator {
|
||||
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
|
||||
.expect("Creating event repoter failed");
|
||||
PusEventTmCreatorWithMap::new_with_default_backend(reporter)
|
||||
fn create_basic_man_1() -> DefaultPusEventU32Dispatcher<()> {
|
||||
let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed");
|
||||
PusEventDispatcher::new_with_default_backend(reporter)
|
||||
}
|
||||
fn create_basic_man_2() -> DefaultPusEventU32TmCreator {
|
||||
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
|
||||
.expect("Creating event repoter failed");
|
||||
let backend = DefaultPusEventReportingMap::default();
|
||||
PusEventTmCreatorWithMap::new(reporter, backend)
|
||||
fn create_basic_man_2() -> DefaultPusEventU32Dispatcher<()> {
|
||||
let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed");
|
||||
let backend = DefaultPusEventMgmtBackend::default();
|
||||
PusEventDispatcher::new(reporter, backend)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic() {
|
||||
let event_man = create_basic_man_1();
|
||||
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
|
||||
let mut event_man = create_basic_man_1();
|
||||
let (event_tx, event_rx) = mpsc::channel();
|
||||
let mut sender = TmAsVecSenderWithMpsc::new(0, "test_sender", event_tx);
|
||||
let event_sent = event_man
|
||||
.generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.expect("Sending info event failed");
|
||||
|
||||
assert!(event_sent);
|
||||
@ -354,13 +292,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_disable_event() {
|
||||
let mut event_man = create_basic_man_2();
|
||||
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
|
||||
// let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
|
||||
let (event_tx, event_rx) = mpsc::channel();
|
||||
let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
|
||||
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
|
||||
assert!(res.is_ok());
|
||||
assert!(res.unwrap());
|
||||
let mut event_sent = event_man
|
||||
.generate_pus_event_tm_generic(&event_tx, &EMPTY_STAMP, LOW_SEV_EVENT, None)
|
||||
.generate_pus_event_tm_generic(&mut sender, &EMPTY_STAMP, LOW_SEV_EVENT, None)
|
||||
.expect("Sending low severity event failed");
|
||||
assert!(!event_sent);
|
||||
let res = event_rx.try_recv();
|
||||
@ -368,7 +306,7 @@ mod tests {
|
||||
assert!(matches!(res.unwrap_err(), TryRecvError::Empty));
|
||||
// Check that only the low severity event was disabled
|
||||
event_sent = event_man
|
||||
.generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.expect("Sending info event failed");
|
||||
assert!(event_sent);
|
||||
event_rx.try_recv().expect("No info event received");
|
||||
@ -377,7 +315,8 @@ mod tests {
|
||||
#[test]
|
||||
fn test_reenable_event() {
|
||||
let mut event_man = create_basic_man_1();
|
||||
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
|
||||
let (event_tx, event_rx) = mpsc::channel();
|
||||
let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
|
||||
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
|
||||
assert!(res.is_ok());
|
||||
assert!(res.unwrap());
|
||||
@ -385,75 +324,9 @@ mod tests {
|
||||
assert!(res.is_ok());
|
||||
assert!(res.unwrap());
|
||||
let event_sent = event_man
|
||||
.generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
|
||||
.expect("Sending info event failed");
|
||||
assert!(event_sent);
|
||||
event_rx.try_recv().expect("No info event received");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_event_with_generic_string_param() {
|
||||
let event_man = create_basic_man_1();
|
||||
let mut small_data_buf = [0; 128];
|
||||
let param_data = "hello world";
|
||||
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
|
||||
let res = event_man.generate_pus_event_tm_generic_with_generic_params(
|
||||
&event_tx,
|
||||
&EMPTY_STAMP,
|
||||
INFO_EVENT.into(),
|
||||
&mut small_data_buf,
|
||||
Some(¶m_data.to_string().into()),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
let res = res.unwrap();
|
||||
assert!(res.event_was_enabled);
|
||||
assert!(res.params_were_propagated);
|
||||
let event_tm = event_rx.try_recv().expect("no event received");
|
||||
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
|
||||
assert_eq!(tm.service(), 5);
|
||||
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
|
||||
assert_eq!(tm.user_data().len(), 4 + param_data.len());
|
||||
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
|
||||
assert_eq!(u32_event, INFO_EVENT.raw());
|
||||
let string_data = String::from_utf8_lossy(&tm.user_data()[4..]);
|
||||
assert_eq!(string_data, param_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_event_with_generic_vec_param() {
|
||||
let event_man = create_basic_man_1();
|
||||
let mut small_data_buf = [0; 128];
|
||||
let param_data = vec![1, 2, 3, 4];
|
||||
let (event_tx, event_rx) = mpsc::channel::<PacketAsVec>();
|
||||
let res = event_man.generate_pus_event_tm_generic_with_generic_params(
|
||||
&event_tx,
|
||||
&EMPTY_STAMP,
|
||||
INFO_EVENT.into(),
|
||||
&mut small_data_buf,
|
||||
Some(¶m_data.clone().into()),
|
||||
);
|
||||
assert!(res.is_ok());
|
||||
let res = res.unwrap();
|
||||
assert!(res.event_was_enabled);
|
||||
assert!(res.params_were_propagated);
|
||||
let event_tm = event_rx.try_recv().expect("no event received");
|
||||
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
|
||||
assert_eq!(tm.service(), 5);
|
||||
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
|
||||
assert_eq!(tm.user_data().len(), 4 + param_data.len());
|
||||
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
|
||||
assert_eq!(u32_event, INFO_EVENT.raw());
|
||||
let vec_data = tm.user_data()[4..].to_vec();
|
||||
assert_eq!(vec_data, param_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_event_with_generic_store_param_not_propagated() {
|
||||
// TODO: Test this.
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_event_with_generic_heapless_param() {
|
||||
// TODO: Test this.
|
||||
}
|
||||
}
|
||||
|
@ -1,21 +1,20 @@
|
||||
use crate::events::EventU32;
|
||||
use crate::pus::event_man::{EventRequest, EventRequestWithToken};
|
||||
use crate::pus::verification::TcStateToken;
|
||||
use crate::pus::{DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError};
|
||||
use crate::queue::GenericSendError;
|
||||
use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError};
|
||||
use spacepackets::ecss::event::Subservice;
|
||||
use spacepackets::ecss::PusPacket;
|
||||
use std::sync::mpsc::Sender;
|
||||
|
||||
use super::verification::VerificationReportingProvider;
|
||||
use super::{
|
||||
EcssTcInMemConverter, EcssTcReceiver, EcssTmSender, GenericConversionError,
|
||||
GenericRoutingError, HandlingStatus, PusServiceHelper,
|
||||
get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore,
|
||||
PusServiceHelper,
|
||||
};
|
||||
|
||||
pub struct PusEventServiceHandler<
|
||||
TcReceiver: EcssTcReceiver,
|
||||
TmSender: EcssTmSender,
|
||||
pub struct PusService5EventHandler<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> {
|
||||
@ -25,11 +24,11 @@ pub struct PusEventServiceHandler<
|
||||
}
|
||||
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiver,
|
||||
TmSender: EcssTmSender,
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
> PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
|
||||
> PusService5EventHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
|
||||
{
|
||||
pub fn new(
|
||||
service_helper: PusServiceHelper<
|
||||
@ -46,99 +45,91 @@ impl<
|
||||
}
|
||||
}
|
||||
|
||||
pub fn poll_and_handle_next_tc<ErrorCb: FnMut(&PartialPusHandlingError)>(
|
||||
&mut self,
|
||||
mut error_callback: ErrorCb,
|
||||
time_stamp: &[u8],
|
||||
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
|
||||
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
return Ok(HandlingStatus::Empty.into());
|
||||
return Ok(PusPacketHandlerResult::Empty);
|
||||
}
|
||||
let ecss_tc_and_token = possible_packet.unwrap();
|
||||
self.service_helper
|
||||
.tc_in_mem_converter_mut()
|
||||
.cache(&ecss_tc_and_token.tc_in_memory)?;
|
||||
let tc = self.service_helper.tc_in_mem_converter().convert()?;
|
||||
let tc = self
|
||||
.service_helper
|
||||
.tc_in_mem_converter
|
||||
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
|
||||
let subservice = tc.subservice();
|
||||
let srv = Subservice::try_from(subservice);
|
||||
if srv.is_err() {
|
||||
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
|
||||
return Ok(PusPacketHandlerResult::CustomSubservice(
|
||||
tc.subservice(),
|
||||
ecss_tc_and_token.token,
|
||||
));
|
||||
}
|
||||
let mut handle_enable_disable_request =
|
||||
|enable: bool| -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
|
||||
if tc.user_data().len() < 4 {
|
||||
return Err(GenericConversionError::NotEnoughAppData {
|
||||
expected: 4,
|
||||
found: tc.user_data().len(),
|
||||
}
|
||||
.into());
|
||||
let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| {
|
||||
if tc.user_data().len() < 4 {
|
||||
return Err(PusPacketHandlingError::NotEnoughAppData {
|
||||
expected: 4,
|
||||
found: tc.user_data().len(),
|
||||
});
|
||||
}
|
||||
let user_data = tc.user_data();
|
||||
let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
|
||||
let start_token = self
|
||||
.service_helper
|
||||
.common
|
||||
.verification_handler
|
||||
.start_success(ecss_tc_and_token.token, &stamp)
|
||||
.map_err(|_| PartialPusHandlingError::Verification);
|
||||
let partial_error = start_token.clone().err();
|
||||
let mut token: TcStateToken = ecss_tc_and_token.token.into();
|
||||
if let Ok(start_token) = start_token {
|
||||
token = start_token.into();
|
||||
}
|
||||
let event_req_with_token = if enable {
|
||||
EventRequestWithToken {
|
||||
request: EventRequest::Enable(event_u32),
|
||||
token,
|
||||
}
|
||||
let user_data = tc.user_data();
|
||||
let event_u32 =
|
||||
EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
|
||||
let mut token: TcStateToken = ecss_tc_and_token.token.into();
|
||||
match self.service_helper.common.verif_reporter.start_success(
|
||||
&self.service_helper.common.tm_sender,
|
||||
ecss_tc_and_token.token,
|
||||
time_stamp,
|
||||
) {
|
||||
Ok(start_token) => {
|
||||
token = start_token.into();
|
||||
}
|
||||
Err(e) => {
|
||||
error_callback(&PartialPusHandlingError::Verification(e));
|
||||
}
|
||||
} else {
|
||||
EventRequestWithToken {
|
||||
request: EventRequest::Disable(event_u32),
|
||||
token,
|
||||
}
|
||||
|
||||
let event_req_with_token = if enable {
|
||||
EventRequestWithToken {
|
||||
request: EventRequest::Enable(event_u32),
|
||||
token,
|
||||
}
|
||||
} else {
|
||||
EventRequestWithToken {
|
||||
request: EventRequest::Disable(event_u32),
|
||||
token,
|
||||
}
|
||||
};
|
||||
self.event_request_tx
|
||||
.send(event_req_with_token)
|
||||
.map_err(|_| {
|
||||
PusPacketHandlingError::RequestRouting(GenericRoutingError::Send(
|
||||
GenericSendError::RxDisconnected,
|
||||
))
|
||||
})?;
|
||||
Ok(HandlingStatus::HandledOne.into())
|
||||
};
|
||||
|
||||
self.event_request_tx
|
||||
.send(event_req_with_token)
|
||||
.map_err(|_| {
|
||||
PusPacketHandlingError::Other("Forwarding event request failed".into())
|
||||
})?;
|
||||
if let Some(partial_error) = partial_error {
|
||||
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
|
||||
partial_error,
|
||||
));
|
||||
}
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
};
|
||||
let mut partial_error = None;
|
||||
let time_stamp = get_current_cds_short_timestamp(&mut partial_error);
|
||||
match srv.unwrap() {
|
||||
Subservice::TmInfoReport
|
||||
| Subservice::TmLowSeverityReport
|
||||
| Subservice::TmMediumSeverityReport
|
||||
| Subservice::TmHighSeverityReport => {
|
||||
return Err(PusPacketHandlingError::RequestConversion(
|
||||
GenericConversionError::WrongService(tc.subservice()),
|
||||
))
|
||||
return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice()))
|
||||
}
|
||||
Subservice::TcEnableEventGeneration => {
|
||||
handle_enable_disable_request(true)?;
|
||||
handle_enable_disable_request(true, time_stamp)?;
|
||||
}
|
||||
Subservice::TcDisableEventGeneration => {
|
||||
handle_enable_disable_request(false)?;
|
||||
handle_enable_disable_request(false, time_stamp)?;
|
||||
}
|
||||
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
|
||||
return Ok(DirectPusPacketHandlerResult::SubserviceNotImplemented(
|
||||
return Ok(PusPacketHandlerResult::SubserviceNotImplemented(
|
||||
subservice,
|
||||
ecss_tc_and_token.token,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(HandlingStatus::HandledOne.into())
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
}
|
||||
}
|
||||
|
||||
@ -146,75 +137,60 @@ impl<
|
||||
mod tests {
|
||||
use delegate::delegate;
|
||||
use spacepackets::ecss::event::Subservice;
|
||||
use spacepackets::time::{cds, TimeWriter};
|
||||
use spacepackets::util::UnsignedEnum;
|
||||
use spacepackets::{
|
||||
ecss::{
|
||||
tc::{PusTcCreator, PusTcSecondaryHeader},
|
||||
tm::PusTmReader,
|
||||
},
|
||||
SpHeader,
|
||||
SequenceFlags, SpHeader,
|
||||
};
|
||||
use std::sync::mpsc::{self, Sender};
|
||||
|
||||
use crate::pus::event_man::EventRequest;
|
||||
use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID};
|
||||
use crate::pus::tests::SimplePusPacketHandler;
|
||||
use crate::pus::verification::{
|
||||
RequestId, VerificationReporter, VerificationReportingProvider,
|
||||
RequestId, VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
};
|
||||
use crate::pus::{GenericConversionError, HandlingStatus, MpscTcReceiver};
|
||||
use crate::tmtc::PacketSenderWithSharedPool;
|
||||
use crate::pus::{MpscTcReceiver, TmInSharedPoolSenderWithBoundedMpsc};
|
||||
use crate::{
|
||||
events::EventU32,
|
||||
pus::{
|
||||
event_man::EventRequestWithToken,
|
||||
tests::PusServiceHandlerWithSharedStoreCommon,
|
||||
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness, TEST_APID},
|
||||
verification::{TcStateAccepted, VerificationToken},
|
||||
DirectPusPacketHandlerResult, EcssTcInSharedStoreConverter, PusPacketHandlingError,
|
||||
EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError,
|
||||
},
|
||||
};
|
||||
|
||||
use super::PusEventServiceHandler;
|
||||
use super::PusService5EventHandler;
|
||||
|
||||
const TEST_EVENT_0: EventU32 = EventU32::new(crate::events::Severity::Info, 5, 25);
|
||||
const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25);
|
||||
|
||||
struct Pus5HandlerWithStoreTester {
|
||||
common: PusServiceHandlerWithSharedStoreCommon,
|
||||
handler: PusEventServiceHandler<
|
||||
handler: PusService5EventHandler<
|
||||
MpscTcReceiver,
|
||||
PacketSenderWithSharedPool,
|
||||
TmInSharedPoolSenderWithBoundedMpsc,
|
||||
EcssTcInSharedStoreConverter,
|
||||
VerificationReporter,
|
||||
VerificationReporterWithSharedPoolMpscBoundedSender,
|
||||
>,
|
||||
}
|
||||
|
||||
impl Pus5HandlerWithStoreTester {
|
||||
pub fn new(event_request_tx: Sender<EventRequestWithToken>) -> Self {
|
||||
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(0);
|
||||
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new();
|
||||
Self {
|
||||
common,
|
||||
handler: PusEventServiceHandler::new(srv_handler, event_request_tx),
|
||||
handler: PusService5EventHandler::new(srv_handler, event_request_tx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusTestHarness for Pus5HandlerWithStoreTester {
|
||||
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
|
||||
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
|
||||
self.handler
|
||||
.service_helper
|
||||
.verif_reporter()
|
||||
.acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7])
|
||||
.expect("acceptance success failure")
|
||||
}
|
||||
|
||||
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator) {
|
||||
self.common
|
||||
.send_tc(self.handler.service_helper.id(), token, tc);
|
||||
}
|
||||
|
||||
delegate! {
|
||||
to self.common {
|
||||
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
|
||||
fn read_next_tm(&mut self) -> PusTmReader<'_>;
|
||||
fn check_no_tm_available(&self) -> bool;
|
||||
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
|
||||
@ -224,11 +200,10 @@ mod tests {
|
||||
}
|
||||
|
||||
impl SimplePusPacketHandler for Pus5HandlerWithStoreTester {
|
||||
fn handle_one_tc(
|
||||
&mut self,
|
||||
) -> Result<DirectPusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
|
||||
self.handler.poll_and_handle_next_tc(|_| {}, &time_stamp)
|
||||
delegate! {
|
||||
to self.handler {
|
||||
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -238,16 +213,15 @@ mod tests {
|
||||
expected_event_req: EventRequest,
|
||||
event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
|
||||
) {
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8);
|
||||
let mut app_data = [0; 4];
|
||||
TEST_EVENT_0
|
||||
.write_to_be_bytes(&mut app_data)
|
||||
.expect("writing test event failed");
|
||||
let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
|
||||
let token = test_harness.init_verification(&ping_tc);
|
||||
test_harness.send_tc(&token, &ping_tc);
|
||||
let request_id = token.request_id();
|
||||
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true);
|
||||
let token = test_harness.send_tc(&ping_tc);
|
||||
let request_id = token.req_id();
|
||||
test_harness.handle_one_tc().unwrap();
|
||||
test_harness.check_next_verification_tm(1, request_id);
|
||||
test_harness.check_next_verification_tm(3, request_id);
|
||||
@ -290,28 +264,24 @@ mod tests {
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
let result = result.unwrap();
|
||||
assert!(
|
||||
matches!(
|
||||
result,
|
||||
DirectPusPacketHandlerResult::Handled(HandlingStatus::Empty)
|
||||
),
|
||||
"unexpected result type {result:?}"
|
||||
)
|
||||
if let PusPacketHandlerResult::Empty = result {
|
||||
} else {
|
||||
panic!("unexpected result type {result:?}")
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sending_custom_subservice() {
|
||||
let (event_request_tx, _) = mpsc::channel();
|
||||
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(5, 200);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
|
||||
let token = test_harness.init_verification(&ping_tc);
|
||||
test_harness.send_tc(&token, &ping_tc);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
|
||||
test_harness.send_tc(&ping_tc);
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
let result = result.unwrap();
|
||||
if let DirectPusPacketHandlerResult::CustomSubservice(subservice, _) = result {
|
||||
if let PusPacketHandlerResult::CustomSubservice(subservice, _) = result {
|
||||
assert_eq!(subservice, 200);
|
||||
} else {
|
||||
panic!("unexpected result type {result:?}")
|
||||
@ -322,19 +292,15 @@ mod tests {
|
||||
fn test_sending_invalid_app_data() {
|
||||
let (event_request_tx, _) = mpsc::channel();
|
||||
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
|
||||
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header =
|
||||
PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8);
|
||||
let ping_tc = PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], true);
|
||||
let token = test_harness.init_verification(&ping_tc);
|
||||
test_harness.send_tc(&token, &ping_tc);
|
||||
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &[0, 1, 2], true);
|
||||
test_harness.send_tc(&ping_tc);
|
||||
let result = test_harness.handle_one_tc();
|
||||
assert!(result.is_err());
|
||||
let result = result.unwrap_err();
|
||||
if let PusPacketHandlingError::RequestConversion(
|
||||
GenericConversionError::NotEnoughAppData { expected, found },
|
||||
) = result
|
||||
{
|
||||
if let PusPacketHandlingError::NotEnoughAppData { expected, found } = result {
|
||||
assert_eq!(expected, 4);
|
||||
assert_eq!(found, 3);
|
||||
} else {
|
||||
|
406
satrs/src/pus/hk.rs
Normal file
406
satrs/src/pus/hk.rs
Normal file
@ -0,0 +1,406 @@
|
||||
pub use spacepackets::ecss::hk::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub use std_mod::*;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub use alloc_mod::*;
|
||||
|
||||
use crate::{hk::HkRequest, TargetId};
|
||||
|
||||
use super::verification::{TcStateAccepted, VerificationToken};
|
||||
|
||||
/// This trait is an abstraction for the routing of PUS service 3 housekeeping requests to a
|
||||
/// dedicated recipient using the generic [TargetId].
|
||||
pub trait PusHkRequestRouter {
|
||||
type Error;
|
||||
fn route(
|
||||
&self,
|
||||
target_id: TargetId,
|
||||
hk_request: HkRequest,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
pub mod alloc_mod {
|
||||
use spacepackets::ecss::tc::PusTcReader;
|
||||
|
||||
use crate::pus::verification::VerificationReportingProvider;
|
||||
|
||||
use super::*;
|
||||
|
||||
/// This trait is an abstraction for the conversion of a PUS service 8 action telecommand into
|
||||
/// a [HkRequest].
|
||||
///
|
||||
/// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard.
|
||||
/// The only requirement is that a valid [TargetId] and a [HkRequest] are returned by the
|
||||
/// core conversion function.
|
||||
///
|
||||
/// The user should take care of performing the error handling as well. Some of the following
|
||||
/// aspects might be relevant:
|
||||
///
|
||||
/// - Checking the validity of the APID, service ID, subservice ID.
|
||||
/// - Checking the validity of the user data.
|
||||
///
|
||||
/// A [VerificationReportingProvider] is passed to the user to also allow handling
|
||||
/// of the verification process as part of the PUS standard requirements.
|
||||
pub trait PusHkToRequestConverter {
|
||||
type Error;
|
||||
fn convert(
|
||||
&mut self,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
tc: &PusTcReader,
|
||||
time_stamp: &[u8],
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
) -> Result<(TargetId, HkRequest), Self::Error>;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod std_mod {
|
||||
use crate::pus::{
|
||||
get_current_cds_short_timestamp, verification::VerificationReportingProvider,
|
||||
EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericRoutingError,
|
||||
PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
/// This is a generic high-level handler for the PUS service 3 housekeeping service.
|
||||
///
|
||||
/// It performs the following handling steps:
|
||||
///
|
||||
/// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter]
|
||||
/// allows to configure the used telecommand memory backend.
|
||||
/// 2. Convert the TC to a targeted action request using the provided
|
||||
/// [PusHkToRequestConverter]. The generic error type is constrained to the
|
||||
/// [PusPacketHandlerResult] for the concrete implementation which offers a packet handler.
|
||||
/// 3. Route the action request using the provided [PusHkRequestRouter]. The generic error
|
||||
/// type is constrained to the [GenericRoutingError] for the concrete implementation.
|
||||
/// 4. Handle all routing errors using the provided [PusRoutingErrorHandler]. The generic error
|
||||
/// type is constrained to the [GenericRoutingError] for the concrete implementation.
|
||||
pub struct PusService3HkHandler<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
RequestConverter: PusHkToRequestConverter,
|
||||
RequestRouter: PusHkRequestRouter<Error = RoutingError>,
|
||||
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
|
||||
RoutingError = GenericRoutingError,
|
||||
> {
|
||||
service_helper:
|
||||
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
|
||||
pub request_converter: RequestConverter,
|
||||
pub request_router: RequestRouter,
|
||||
pub routing_error_handler: RoutingErrorHandler,
|
||||
}
|
||||
|
||||
impl<
|
||||
TcReceiver: EcssTcReceiverCore,
|
||||
TmSender: EcssTmSenderCore,
|
||||
TcInMemConverter: EcssTcInMemConverter,
|
||||
VerificationReporter: VerificationReportingProvider,
|
||||
RequestConverter: PusHkToRequestConverter<Error = PusPacketHandlingError>,
|
||||
RequestRouter: PusHkRequestRouter<Error = RoutingError>,
|
||||
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
|
||||
RoutingError: Clone,
|
||||
>
|
||||
PusService3HkHandler<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
RequestConverter,
|
||||
RequestRouter,
|
||||
RoutingErrorHandler,
|
||||
RoutingError,
|
||||
>
|
||||
where
|
||||
PusPacketHandlingError: From<RoutingError>,
|
||||
{
|
||||
pub fn new(
|
||||
service_helper: PusServiceHelper<
|
||||
TcReceiver,
|
||||
TmSender,
|
||||
TcInMemConverter,
|
||||
VerificationReporter,
|
||||
>,
|
||||
request_converter: RequestConverter,
|
||||
request_router: RequestRouter,
|
||||
routing_error_handler: RoutingErrorHandler,
|
||||
) -> Self {
|
||||
Self {
|
||||
service_helper,
|
||||
request_converter,
|
||||
request_router,
|
||||
routing_error_handler,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
|
||||
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
|
||||
if possible_packet.is_none() {
|
||||
return Ok(PusPacketHandlerResult::Empty);
|
||||
}
|
||||
let ecss_tc_and_token = possible_packet.unwrap();
|
||||
let tc = self
|
||||
.service_helper
|
||||
.tc_in_mem_converter
|
||||
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
|
||||
let mut partial_error = None;
|
||||
let time_stamp = get_current_cds_short_timestamp(&mut partial_error);
|
||||
let (target_id, hk_request) = self.request_converter.convert(
|
||||
ecss_tc_and_token.token,
|
||||
&tc,
|
||||
&time_stamp,
|
||||
&self.service_helper.common.verification_handler,
|
||||
)?;
|
||||
if let Err(e) =
|
||||
self.request_router
|
||||
.route(target_id, hk_request, ecss_tc_and_token.token)
|
||||
{
|
||||
self.routing_error_handler.handle_error(
|
||||
target_id,
|
||||
ecss_tc_and_token.token,
|
||||
&tc,
|
||||
e.clone(),
|
||||
&time_stamp,
|
||||
&self.service_helper.common.verification_handler,
|
||||
);
|
||||
return Err(e.into());
|
||||
}
|
||||
Ok(PusPacketHandlerResult::RequestHandled)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use delegate::delegate;
|
||||
use spacepackets::ecss::hk::Subservice;
|
||||
|
||||
use spacepackets::{
|
||||
ecss::{
|
||||
tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader},
|
||||
tm::PusTmReader,
|
||||
PusPacket,
|
||||
},
|
||||
CcsdsPacket, SequenceFlags, SpHeader,
|
||||
};
|
||||
|
||||
use crate::pus::{MpscTcReceiver, TmAsVecSenderWithMpsc};
|
||||
use crate::{
|
||||
hk::HkRequest,
|
||||
pus::{
|
||||
tests::{
|
||||
PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler,
|
||||
TestConverter, TestRouter, TestRoutingErrorHandler, APP_DATA_TOO_SHORT, TEST_APID,
|
||||
},
|
||||
verification::{
|
||||
tests::TestVerificationReporter, FailParams, RequestId, TcStateAccepted,
|
||||
VerificationReportingProvider, VerificationToken,
|
||||
},
|
||||
EcssTcInVecConverter, GenericRoutingError, PusPacketHandlerResult,
|
||||
PusPacketHandlingError,
|
||||
},
|
||||
TargetId,
|
||||
};
|
||||
|
||||
use super::{PusHkRequestRouter, PusHkToRequestConverter, PusService3HkHandler};
|
||||
|
||||
impl PusHkRequestRouter for TestRouter<HkRequest> {
|
||||
type Error = GenericRoutingError;
|
||||
|
||||
fn route(
|
||||
&self,
|
||||
target_id: TargetId,
|
||||
hk_request: HkRequest,
|
||||
_token: VerificationToken<TcStateAccepted>,
|
||||
) -> Result<(), Self::Error> {
|
||||
self.routing_requests
|
||||
.borrow_mut()
|
||||
.push_back((target_id, hk_request));
|
||||
self.check_for_injected_error()
|
||||
}
|
||||
}
|
||||
|
||||
impl PusHkToRequestConverter for TestConverter<3> {
|
||||
type Error = PusPacketHandlingError;
|
||||
fn convert(
|
||||
&mut self,
|
||||
token: VerificationToken<TcStateAccepted>,
|
||||
tc: &PusTcReader,
|
||||
time_stamp: &[u8],
|
||||
verif_reporter: &impl VerificationReportingProvider,
|
||||
) -> Result<(TargetId, HkRequest), Self::Error> {
|
||||
self.conversion_request.push_back(tc.raw_data().to_vec());
|
||||
self.check_service(tc)?;
|
||||
let target_id = tc.apid();
|
||||
if tc.user_data().len() < 4 {
|
||||
verif_reporter
|
||||
.start_failure(
|
||||
token,
|
||||
FailParams::new(
|
||||
time_stamp,
|
||||
&APP_DATA_TOO_SHORT,
|
||||
(tc.user_data().len() as u32).to_be_bytes().as_ref(),
|
||||
),
|
||||
)
|
||||
.expect("start success failure");
|
||||
return Err(PusPacketHandlingError::NotEnoughAppData {
|
||||
expected: 4,
|
||||
found: tc.user_data().len(),
|
||||
});
|
||||
}
|
||||
if tc.subservice() == Subservice::TcGenerateOneShotHk as u8 {
|
||||
verif_reporter
|
||||
.start_success(token, time_stamp)
|
||||
.expect("start success failure");
|
||||
return Ok((
|
||||
target_id.into(),
|
||||
HkRequest::OneShot(u32::from_be_bytes(
|
||||
tc.user_data()[0..4].try_into().unwrap(),
|
||||
)),
|
||||
));
|
||||
}
|
||||
Err(PusPacketHandlingError::InvalidAppData(
|
||||
"unexpected app data".into(),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
struct Pus3HandlerWithVecTester {
|
||||
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>,
|
||||
handler: PusService3HkHandler<
|
||||
MpscTcReceiver,
|
||||
TmAsVecSenderWithMpsc,
|
||||
EcssTcInVecConverter,
|
||||
TestVerificationReporter,
|
||||
TestConverter<3>,
|
||||
TestRouter<HkRequest>,
|
||||
TestRoutingErrorHandler,
|
||||
>,
|
||||
}
|
||||
|
||||
impl Pus3HandlerWithVecTester {
|
||||
pub fn new() -> Self {
|
||||
let (common, srv_handler) =
|
||||
PusServiceHandlerWithVecCommon::new_with_test_verif_sender();
|
||||
Self {
|
||||
common,
|
||||
handler: PusService3HkHandler::new(
|
||||
srv_handler,
|
||||
TestConverter::default(),
|
||||
TestRouter::default(),
|
||||
TestRoutingErrorHandler::default(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
delegate! {
|
||||
to self.handler.request_converter {
|
||||
pub fn check_next_conversion(&mut self, tc: &PusTcCreator);
|
||||
}
|
||||
}
|
||||
delegate! {
|
||||
to self.handler.request_router {
|
||||
pub fn retrieve_next_request(&mut self) -> (TargetId, HkRequest);
|
||||
}
|
||||
}
|
||||
delegate! {
|
||||
to self.handler.routing_error_handler {
|
||||
pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PusTestHarness for Pus3HandlerWithVecTester {
|
||||
delegate! {
|
||||
to self.common {
|
||||
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
|
||||
fn read_next_tm(&mut self) -> PusTmReader<'_>;
|
||||
fn check_no_tm_available(&self) -> bool;
|
||||
fn check_next_verification_tm(
|
||||
&self,
|
||||
subservice: u8,
|
||||
expected_request_id: RequestId,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
impl SimplePusPacketHandler for Pus3HandlerWithVecTester {
|
||||
delegate! {
|
||||
to self.handler {
|
||||
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn basic_test() {
|
||||
let mut hk_handler = Pus3HandlerWithVecTester::new();
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(3, Subservice::TcGenerateOneShotHk as u8);
|
||||
let unique_id: u32 = 1;
|
||||
let unique_id_raw = unique_id.to_be_bytes();
|
||||
let tc = PusTcCreator::new(&mut sp_header, sec_header, unique_id_raw.as_ref(), true);
|
||||
hk_handler.send_tc(&tc);
|
||||
let result = hk_handler.handle_one_tc();
|
||||
assert!(result.is_ok());
|
||||
hk_handler.check_next_conversion(&tc);
|
||||
let (target_id, hk_request) = hk_handler.retrieve_next_request();
|
||||
assert_eq!(target_id, TEST_APID.into());
|
||||
if let HkRequest::OneShot(id) = hk_request {
|
||||
assert_eq!(id, unique_id);
|
||||
} else {
|
||||
panic!("unexpected request");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_routing_error() {
|
||||
let mut hk_handler = Pus3HandlerWithVecTester::new();
|
||||
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(3, Subservice::TcGenerateOneShotHk as u8);
|
||||
let unique_id: u32 = 1;
|
||||
let unique_id_raw = unique_id.to_be_bytes();
|
||||
let tc = PusTcCreator::new(&mut sp_header, sec_header, unique_id_raw.as_ref(), true);
|
||||
let error = GenericRoutingError::UnknownTargetId(25);
|
||||
hk_handler
|
||||
.handler
|
||||
.request_router
|
||||
.inject_routing_error(error);
|
||||
hk_handler.send_tc(&tc);
|
||||
let result = hk_handler.handle_one_tc();
|
||||
assert!(result.is_err());
|
||||
let check_error = |routing_error: GenericRoutingError| {
|
||||
if let GenericRoutingError::UnknownTargetId(id) = routing_error {
|
||||
assert_eq!(id, 25);
|
||||
} else {
|
||||
panic!("unexpected error type");
|
||||
}
|
||||
};
|
||||
if let PusPacketHandlingError::RequestRoutingError(routing_error) = result.unwrap_err() {
|
||||
check_error(routing_error);
|
||||
} else {
|
||||
panic!("unexpected error type");
|
||||
}
|
||||
|
||||
hk_handler.check_next_conversion(&tc);
|
||||
let (target_id, hk_req) = hk_handler.retrieve_next_request();
|
||||
assert_eq!(target_id, TEST_APID.into());
|
||||
if let HkRequest::OneShot(unique_id) = hk_req {
|
||||
assert_eq!(unique_id, 1);
|
||||
}
|
||||
|
||||
let (target_id, found_error) = hk_handler.retrieve_next_error();
|
||||
assert_eq!(target_id, TEST_APID.into());
|
||||
check_error(found_error);
|
||||
}
|
||||
}
|
1356
satrs/src/pus/mod.rs
1356
satrs/src/pus/mod.rs
File diff suppressed because it is too large
Load Diff
@ -2,16 +2,6 @@ use num_enum::{IntoPrimitive, TryFromPrimitive};
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(unused_imports)]
|
||||
pub use alloc_mod::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[allow(unused_imports)]
|
||||
pub use std_mod::*;
|
||||
|
||||
pub const MODE_SERVICE_ID: u8 = 200;
|
||||
|
||||
#[derive(Debug, Eq, PartialEq, Copy, Clone, IntoPrimitive, TryFromPrimitive)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
#[repr(u8)]
|
||||
@ -24,132 +14,3 @@ pub enum Subservice {
|
||||
TmCantReachMode = 7,
|
||||
TmWrongModeReply = 8,
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod alloc_mod {}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
pub mod std_mod {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use std::sync::mpsc;
|
||||
|
||||
use crate::{
|
||||
mode::{
|
||||
ModeAndSubmode, ModeReply, ModeReplySender, ModeRequest, ModeRequestSender,
|
||||
ModeRequestorAndHandlerMpsc, ModeRequestorMpsc,
|
||||
},
|
||||
request::{GenericMessage, MessageMetadata},
|
||||
};
|
||||
|
||||
const TEST_COMPONENT_ID_0: u64 = 5;
|
||||
const TEST_COMPONENT_ID_1: u64 = 6;
|
||||
const TEST_COMPONENT_ID_2: u64 = 7;
|
||||
|
||||
#[test]
|
||||
fn test_simple_mode_requestor() {
|
||||
let (reply_sender, reply_receiver) = mpsc::channel();
|
||||
let (request_sender, request_receiver) = mpsc::channel();
|
||||
let mut mode_requestor = ModeRequestorMpsc::new(TEST_COMPONENT_ID_0, reply_receiver);
|
||||
mode_requestor.add_message_target(TEST_COMPONENT_ID_1, request_sender);
|
||||
|
||||
// Send a request and verify it arrives at the receiver.
|
||||
let request_id = 2;
|
||||
let sent_request = ModeRequest::ReadMode;
|
||||
mode_requestor
|
||||
.send_mode_request(request_id, TEST_COMPONENT_ID_1, sent_request)
|
||||
.expect("send failed");
|
||||
let request = request_receiver.recv().expect("recv failed");
|
||||
assert_eq!(request.request_id(), 2);
|
||||
assert_eq!(request.sender_id(), TEST_COMPONENT_ID_0);
|
||||
assert_eq!(request.message, sent_request);
|
||||
|
||||
// Send a reply and verify it arrives at the requestor.
|
||||
let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(1, 5));
|
||||
reply_sender
|
||||
.send(GenericMessage::new(
|
||||
MessageMetadata::new(request_id, TEST_COMPONENT_ID_1),
|
||||
mode_reply,
|
||||
))
|
||||
.expect("send failed");
|
||||
let reply = mode_requestor.try_recv_mode_reply().expect("recv failed");
|
||||
assert!(reply.is_some());
|
||||
let reply = reply.unwrap();
|
||||
assert_eq!(reply.sender_id(), TEST_COMPONENT_ID_1);
|
||||
assert_eq!(reply.request_id(), 2);
|
||||
assert_eq!(reply.message, mode_reply);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mode_requestor_and_request_handler_request_sending() {
|
||||
let (_reply_sender_to_connector, reply_receiver_of_connector) = mpsc::channel();
|
||||
let (_request_sender_to_connector, request_receiver_of_connector) = mpsc::channel();
|
||||
|
||||
let (request_sender_to_channel_1, request_receiver_channel_1) = mpsc::channel();
|
||||
//let (reply_sender_to_channel_2, reply_receiver_channel_2) = mpsc::channel();
|
||||
let mut mode_connector = ModeRequestorAndHandlerMpsc::new(
|
||||
TEST_COMPONENT_ID_0,
|
||||
request_receiver_of_connector,
|
||||
reply_receiver_of_connector,
|
||||
);
|
||||
assert_eq!(
|
||||
ModeRequestSender::local_channel_id(&mode_connector),
|
||||
TEST_COMPONENT_ID_0
|
||||
);
|
||||
assert_eq!(
|
||||
ModeReplySender::local_channel_id(&mode_connector),
|
||||
TEST_COMPONENT_ID_0
|
||||
);
|
||||
assert_eq!(
|
||||
mode_connector.local_channel_id_generic(),
|
||||
TEST_COMPONENT_ID_0
|
||||
);
|
||||
|
||||
mode_connector.add_request_target(TEST_COMPONENT_ID_1, request_sender_to_channel_1);
|
||||
|
||||
// Send a request and verify it arrives at the receiver.
|
||||
let request_id = 2;
|
||||
let sent_request = ModeRequest::ReadMode;
|
||||
mode_connector
|
||||
.send_mode_request(request_id, TEST_COMPONENT_ID_1, sent_request)
|
||||
.expect("send failed");
|
||||
|
||||
let request = request_receiver_channel_1.recv().expect("recv failed");
|
||||
assert_eq!(request.request_id(), 2);
|
||||
assert_eq!(request.sender_id(), TEST_COMPONENT_ID_0);
|
||||
assert_eq!(request.message, ModeRequest::ReadMode);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mode_requestor_and_request_handler_reply_sending() {
|
||||
let (_reply_sender_to_connector, reply_receiver_of_connector) = mpsc::channel();
|
||||
let (_request_sender_to_connector, request_receiver_of_connector) = mpsc::channel();
|
||||
|
||||
let (reply_sender_to_channel_2, reply_receiver_channel_2) = mpsc::channel();
|
||||
let mut mode_connector = ModeRequestorAndHandlerMpsc::new(
|
||||
TEST_COMPONENT_ID_0,
|
||||
request_receiver_of_connector,
|
||||
reply_receiver_of_connector,
|
||||
);
|
||||
mode_connector.add_reply_target(TEST_COMPONENT_ID_2, reply_sender_to_channel_2);
|
||||
|
||||
// Send a reply and verify it arrives at the receiver.
|
||||
let request_id = 2;
|
||||
let sent_reply = ModeReply::ModeReply(ModeAndSubmode::new(3, 5));
|
||||
mode_connector
|
||||
.send_mode_reply(
|
||||
MessageMetadata::new(request_id, TEST_COMPONENT_ID_2),
|
||||
sent_reply,
|
||||
)
|
||||
.expect("send failed");
|
||||
let reply = reply_receiver_channel_2.recv().expect("recv failed");
|
||||
assert_eq!(reply.request_id(), 2);
|
||||
assert_eq!(reply.sender_id(), TEST_COMPONENT_ID_0);
|
||||
assert_eq!(reply.message, sent_reply);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mode_reply_handler() {}
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ use spacepackets::{ByteConversionError, CcsdsPacket};
|
||||
#[cfg(feature = "std")]
|
||||
use std::error::Error;
|
||||
|
||||
use crate::pool::{PoolError, PoolProvider};
|
||||
use crate::pool::{PoolProvider, StoreError};
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use alloc_mod::*;
|
||||
|
||||
@ -151,7 +151,7 @@ pub enum ScheduleError {
|
||||
},
|
||||
/// Nested time-tagged commands are not allowed.
|
||||
NestedScheduledTc,
|
||||
StoreError(PoolError),
|
||||
StoreError(StoreError),
|
||||
TcDataEmpty,
|
||||
TimestampError(TimestampError),
|
||||
WrongSubservice(u8),
|
||||
@ -206,8 +206,8 @@ impl From<PusError> for ScheduleError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PoolError> for ScheduleError {
|
||||
fn from(e: PoolError) -> Self {
|
||||
impl From<StoreError> for ScheduleError {
|
||||
fn from(e: StoreError) -> Self {
|
||||
Self::StoreError(e)
|
||||
}
|
||||
}
|
||||
@ -240,7 +240,7 @@ impl Error for ScheduleError {
|
||||
pub trait PusSchedulerProvider {
|
||||
type TimeProvider: CcsdsTimeProvider + TimeReader;
|
||||
|
||||
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), PoolError>;
|
||||
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError>;
|
||||
|
||||
fn is_enabled(&self) -> bool;
|
||||
|
||||
@ -345,9 +345,12 @@ pub mod alloc_mod {
|
||||
},
|
||||
vec::Vec,
|
||||
};
|
||||
use spacepackets::time::cds::{self, DaysLen24Bits};
|
||||
use spacepackets::time::{
|
||||
cds::{self, DaysLen24Bits},
|
||||
UnixTime,
|
||||
};
|
||||
|
||||
use crate::pool::PoolAddr;
|
||||
use crate::pool::StoreAddr;
|
||||
|
||||
use super::*;
|
||||
|
||||
@ -368,8 +371,8 @@ pub mod alloc_mod {
|
||||
}
|
||||
|
||||
enum DeletionResult {
|
||||
WithoutStoreDeletion(Option<PoolAddr>),
|
||||
WithStoreDeletion(Result<bool, PoolError>),
|
||||
WithoutStoreDeletion(Option<StoreAddr>),
|
||||
WithStoreDeletion(Result<bool, StoreError>),
|
||||
}
|
||||
|
||||
/// This is the core data structure for scheduling PUS telecommands with [alloc] support.
|
||||
@ -378,7 +381,7 @@ pub mod alloc_mod {
|
||||
/// a [crate::pool::PoolProvider] API. This data structure just tracks the store
|
||||
/// addresses and their release times and offers a convenient API to insert and release
|
||||
/// telecommands and perform other functionality specified by the ECSS standard in section 6.11.
|
||||
/// The time is tracked as a [spacepackets::time::UnixTime] but the only requirement to
|
||||
/// The time is tracked as a [spacepackets::time::UnixTimestamp] but the only requirement to
|
||||
/// the timekeeping of the user is that it is convertible to that timestamp.
|
||||
///
|
||||
/// The standard also specifies that the PUS scheduler can be enabled and disabled.
|
||||
@ -423,6 +426,7 @@ pub mod alloc_mod {
|
||||
|
||||
/// Like [Self::new], but sets the `init_current_time` parameter to the current system time.
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub fn new_with_current_init_time(time_margin: Duration) -> Result<Self, SystemTimeError> {
|
||||
Ok(Self::new(UnixTime::now()?, time_margin))
|
||||
}
|
||||
@ -524,7 +528,7 @@ pub mod alloc_mod {
|
||||
&mut self,
|
||||
time_window: TimeWindow<TimeProvider>,
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, PoolError)> {
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
let range = self.retrieve_by_time_filter(time_window);
|
||||
let mut del_packets = 0;
|
||||
let mut res_if_fails = None;
|
||||
@ -554,7 +558,7 @@ pub mod alloc_mod {
|
||||
pub fn delete_all(
|
||||
&mut self,
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, PoolError)> {
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.delete_by_time_filter(TimeWindow::<cds::CdsTime>::new_select_all(), pool)
|
||||
}
|
||||
|
||||
@ -600,7 +604,7 @@ pub mod alloc_mod {
|
||||
/// Please note that this function will stop on the first telecommand with a request ID match.
|
||||
/// In case of duplicate IDs (which should generally not happen), this function needs to be
|
||||
/// called repeatedly.
|
||||
pub fn delete_by_request_id(&mut self, req_id: &RequestId) -> Option<PoolAddr> {
|
||||
pub fn delete_by_request_id(&mut self, req_id: &RequestId) -> Option<StoreAddr> {
|
||||
if let DeletionResult::WithoutStoreDeletion(v) =
|
||||
self.delete_by_request_id_internal_without_store_deletion(req_id)
|
||||
{
|
||||
@ -614,7 +618,7 @@ pub mod alloc_mod {
|
||||
&mut self,
|
||||
req_id: &RequestId,
|
||||
pool: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<bool, PoolError> {
|
||||
) -> Result<bool, StoreError> {
|
||||
if let DeletionResult::WithStoreDeletion(v) =
|
||||
self.delete_by_request_id_internal_with_store_deletion(req_id, pool)
|
||||
{
|
||||
@ -666,6 +670,7 @@ pub mod alloc_mod {
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError> {
|
||||
self.current_time = UnixTime::now()?;
|
||||
Ok(())
|
||||
@ -691,7 +696,7 @@ pub mod alloc_mod {
|
||||
releaser: R,
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
tc_buf: &mut [u8],
|
||||
) -> Result<u64, (u64, PoolError)> {
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.release_telecommands_internal(releaser, tc_store, Some(tc_buf))
|
||||
}
|
||||
|
||||
@ -705,7 +710,7 @@ pub mod alloc_mod {
|
||||
&mut self,
|
||||
releaser: R,
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
) -> Result<u64, (u64, PoolError)> {
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
self.release_telecommands_internal(releaser, tc_store, None)
|
||||
}
|
||||
|
||||
@ -714,7 +719,7 @@ pub mod alloc_mod {
|
||||
mut releaser: R,
|
||||
tc_store: &mut (impl PoolProvider + ?Sized),
|
||||
mut tc_buf: Option<&mut [u8]>,
|
||||
) -> Result<u64, (u64, PoolError)> {
|
||||
) -> Result<u64, (u64, StoreError)> {
|
||||
let tcs_to_release = self.telecommands_to_release();
|
||||
let mut released_tcs = 0;
|
||||
let mut store_error = Ok(());
|
||||
@ -760,7 +765,7 @@ pub mod alloc_mod {
|
||||
mut releaser: R,
|
||||
tc_store: &(impl PoolProvider + ?Sized),
|
||||
tc_buf: &mut [u8],
|
||||
) -> Result<alloc::vec::Vec<TcInfo>, (alloc::vec::Vec<TcInfo>, PoolError)> {
|
||||
) -> Result<alloc::vec::Vec<TcInfo>, (alloc::vec::Vec<TcInfo>, StoreError)> {
|
||||
let tcs_to_release = self.telecommands_to_release();
|
||||
let mut released_tcs = alloc::vec::Vec::new();
|
||||
for tc in tcs_to_release {
|
||||
@ -791,7 +796,7 @@ pub mod alloc_mod {
|
||||
/// The holding store for the telecommands needs to be passed so all the stored telecommands
|
||||
/// can be deleted to avoid a memory leak. If at last one deletion operation fails, the error
|
||||
/// will be returned but the method will still try to delete all the commands in the schedule.
|
||||
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), PoolError> {
|
||||
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), StoreError> {
|
||||
self.enabled = false;
|
||||
let mut deletion_ok = Ok(());
|
||||
for tc_lists in &mut self.tc_map {
|
||||
@ -849,7 +854,7 @@ pub mod alloc_mod {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::pool::{
|
||||
PoolAddr, PoolError, PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig,
|
||||
PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig, StoreAddr, StoreError,
|
||||
};
|
||||
use alloc::collections::btree_map::Range;
|
||||
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
|
||||
@ -866,28 +871,28 @@ mod tests {
|
||||
cds::CdsTime::from_unix_time_with_u16_days(×tamp, cds::SubmillisPrecision::Absent)
|
||||
.unwrap();
|
||||
let len_time_stamp = cds_time.write_to_bytes(buf).unwrap();
|
||||
let len_packet = base_ping_tc_simple_ctor(0, &[])
|
||||
let len_packet = base_ping_tc_simple_ctor(0, None)
|
||||
.write_to_bytes(&mut buf[len_time_stamp..])
|
||||
.unwrap();
|
||||
(
|
||||
SpHeader::new_for_unseg_tc(0x02, 0x34, len_packet as u16),
|
||||
SpHeader::tc_unseg(0x02, 0x34, len_packet as u16).unwrap(),
|
||||
len_packet + len_time_stamp,
|
||||
)
|
||||
}
|
||||
|
||||
fn scheduled_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
|
||||
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
|
||||
PusTcCreator::new_simple(sph, 11, 4, &buf[..len_app_data], true)
|
||||
let (mut sph, len_app_data) = pus_tc_base(timestamp, buf);
|
||||
PusTcCreator::new_simple(&mut sph, 11, 4, Some(&buf[..len_app_data]), true)
|
||||
}
|
||||
|
||||
fn wrong_tc_service(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
|
||||
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
|
||||
PusTcCreator::new_simple(sph, 12, 4, &buf[..len_app_data], true)
|
||||
let (mut sph, len_app_data) = pus_tc_base(timestamp, buf);
|
||||
PusTcCreator::new_simple(&mut sph, 12, 4, Some(&buf[..len_app_data]), true)
|
||||
}
|
||||
|
||||
fn wrong_tc_subservice(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
|
||||
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
|
||||
PusTcCreator::new_simple(sph, 11, 5, &buf[..len_app_data], true)
|
||||
let (mut sph, len_app_data) = pus_tc_base(timestamp, buf);
|
||||
PusTcCreator::new_simple(&mut sph, 11, 5, Some(&buf[..len_app_data]), true)
|
||||
}
|
||||
|
||||
fn double_wrapped_time_tagged_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
|
||||
@ -895,31 +900,40 @@ mod tests {
|
||||
cds::CdsTime::from_unix_time_with_u16_days(×tamp, cds::SubmillisPrecision::Absent)
|
||||
.unwrap();
|
||||
let len_time_stamp = cds_time.write_to_bytes(buf).unwrap();
|
||||
let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 0);
|
||||
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 0).unwrap();
|
||||
// app data should not matter, double wrapped time-tagged commands should be rejected right
|
||||
// away
|
||||
let inner_time_tagged_tc = PusTcCreator::new_simple(sph, 11, 4, &[], true);
|
||||
let inner_time_tagged_tc = PusTcCreator::new_simple(&mut sph, 11, 4, None, true);
|
||||
let packet_len = inner_time_tagged_tc
|
||||
.write_to_bytes(&mut buf[len_time_stamp..])
|
||||
.expect("writing inner time tagged tc failed");
|
||||
PusTcCreator::new_simple(sph, 11, 4, &buf[..len_time_stamp + packet_len], true)
|
||||
PusTcCreator::new_simple(
|
||||
&mut sph,
|
||||
11,
|
||||
4,
|
||||
Some(&buf[..len_time_stamp + packet_len]),
|
||||
true,
|
||||
)
|
||||
}
|
||||
|
||||
fn invalid_time_tagged_cmd() -> PusTcCreator<'static> {
|
||||
let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 1);
|
||||
PusTcCreator::new_simple(sph, 11, 4, &[], true)
|
||||
let mut sph = SpHeader::tc_unseg(0x02, 0x34, 1).unwrap();
|
||||
PusTcCreator::new_simple(&mut sph, 11, 4, None, true)
|
||||
}
|
||||
|
||||
fn base_ping_tc_simple_ctor(seq_count: u16, app_data: &'static [u8]) -> PusTcCreator<'static> {
|
||||
let sph = SpHeader::new_for_unseg_tc(0x02, seq_count, 0);
|
||||
PusTcCreator::new_simple(sph, 17, 1, app_data, true)
|
||||
fn base_ping_tc_simple_ctor(
|
||||
seq_count: u16,
|
||||
app_data: Option<&'static [u8]>,
|
||||
) -> PusTcCreator<'static> {
|
||||
let mut sph = SpHeader::tc_unseg(0x02, seq_count, 0).unwrap();
|
||||
PusTcCreator::new_simple(&mut sph, 17, 1, app_data, true)
|
||||
}
|
||||
|
||||
fn ping_tc_to_store(
|
||||
pool: &mut StaticMemoryPool,
|
||||
buf: &mut [u8],
|
||||
seq_count: u16,
|
||||
app_data: &'static [u8],
|
||||
app_data: Option<&'static [u8]>,
|
||||
) -> TcInfo {
|
||||
let ping_tc = base_ping_tc_simple_ctor(seq_count, app_data);
|
||||
let ping_size = ping_tc.write_to_bytes(buf).expect("writing ping TC failed");
|
||||
@ -943,7 +957,7 @@ mod tests {
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(
|
||||
@ -953,7 +967,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let app_data = &[0, 1, 2];
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, app_data);
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, Some(app_data));
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(
|
||||
UnixTime::new_only_secs(200),
|
||||
@ -962,7 +976,7 @@ mod tests {
|
||||
.unwrap();
|
||||
|
||||
let app_data = &[0, 1, 2];
|
||||
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, app_data);
|
||||
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, Some(app_data));
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(
|
||||
UnixTime::new_only_secs(300),
|
||||
@ -988,7 +1002,7 @@ mod tests {
|
||||
.insert_unwrapped_and_stored_tc(
|
||||
UnixTime::new_only_secs(100),
|
||||
TcInfo::new(
|
||||
PoolAddr::from(StaticPoolAddr {
|
||||
StoreAddr::from(StaticPoolAddr {
|
||||
pool_idx: 0,
|
||||
packet_idx: 1,
|
||||
}),
|
||||
@ -1005,7 +1019,7 @@ mod tests {
|
||||
.insert_unwrapped_and_stored_tc(
|
||||
UnixTime::new_only_secs(100),
|
||||
TcInfo::new(
|
||||
PoolAddr::from(StaticPoolAddr {
|
||||
StoreAddr::from(StaticPoolAddr {
|
||||
pool_idx: 0,
|
||||
packet_idx: 2,
|
||||
}),
|
||||
@ -1049,8 +1063,8 @@ mod tests {
|
||||
|
||||
fn common_check(
|
||||
enabled: bool,
|
||||
store_addr: &PoolAddr,
|
||||
expected_store_addrs: Vec<PoolAddr>,
|
||||
store_addr: &StoreAddr,
|
||||
expected_store_addrs: Vec<StoreAddr>,
|
||||
counter: &mut usize,
|
||||
) {
|
||||
assert!(enabled);
|
||||
@ -1059,8 +1073,8 @@ mod tests {
|
||||
}
|
||||
fn common_check_disabled(
|
||||
enabled: bool,
|
||||
store_addr: &PoolAddr,
|
||||
expected_store_addrs: Vec<PoolAddr>,
|
||||
store_addr: &StoreAddr,
|
||||
expected_store_addrs: Vec<StoreAddr>,
|
||||
counter: &mut usize,
|
||||
) {
|
||||
assert!(!enabled);
|
||||
@ -1073,10 +1087,10 @@ mod tests {
|
||||
let src_id_to_set = 12;
|
||||
let apid_to_set = 0x22;
|
||||
let seq_count = 105;
|
||||
let sp_header = SpHeader::new_for_unseg_tc(apid_to_set, 105, 0);
|
||||
let mut sp_header = SpHeader::tc_unseg(apid_to_set, 105, 0).unwrap();
|
||||
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
sec_header.source_id = src_id_to_set;
|
||||
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true);
|
||||
let req_id = RequestId::from_tc(&ping_tc);
|
||||
assert_eq!(req_id.source_id(), src_id_to_set);
|
||||
assert_eq!(req_id.apid(), apid_to_set);
|
||||
@ -1092,13 +1106,13 @@ mod tests {
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("insertion failed");
|
||||
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
|
||||
.expect("insertion failed");
|
||||
@ -1157,13 +1171,13 @@ mod tests {
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("insertion failed");
|
||||
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1)
|
||||
.expect("insertion failed");
|
||||
@ -1216,13 +1230,13 @@ mod tests {
|
||||
scheduler.disable();
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("insertion failed");
|
||||
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
|
||||
.expect("insertion failed");
|
||||
@ -1280,7 +1294,7 @@ mod tests {
|
||||
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
|
||||
let info = scheduler
|
||||
.insert_unwrapped_tc(
|
||||
@ -1295,7 +1309,7 @@ mod tests {
|
||||
let mut read_buf: [u8; 64] = [0; 64];
|
||||
pool.read(&tc_info_0.addr(), &mut read_buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
|
||||
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
|
||||
|
||||
@ -1318,7 +1332,7 @@ mod tests {
|
||||
let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1342,7 +1356,7 @@ mod tests {
|
||||
let read_len = pool.read(&info.addr, &mut buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
|
||||
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
|
||||
|
||||
@ -1367,7 +1381,7 @@ mod tests {
|
||||
let read_len = pool.read(&addr_vec[0], &mut buf).unwrap();
|
||||
let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data");
|
||||
assert_eq!(read_len, check_tc.1);
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
|
||||
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, None));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -1492,7 +1506,7 @@ mod tests {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("insertion failed");
|
||||
@ -1514,7 +1528,7 @@ mod tests {
|
||||
// TC could not even be read..
|
||||
assert_eq!(err.0, 0);
|
||||
match err.1 {
|
||||
PoolError::DataDoesNotExist(addr) => {
|
||||
StoreError::DataDoesNotExist(addr) => {
|
||||
assert_eq!(tc_info_0.addr(), addr);
|
||||
}
|
||||
_ => panic!("unexpected error {}", err.1),
|
||||
@ -1526,7 +1540,7 @@ mod tests {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("insertion failed");
|
||||
@ -1537,7 +1551,7 @@ mod tests {
|
||||
assert!(reset_res.is_err());
|
||||
let err = reset_res.unwrap_err();
|
||||
match err {
|
||||
PoolError::DataDoesNotExist(addr) => {
|
||||
StoreError::DataDoesNotExist(addr) => {
|
||||
assert_eq!(addr, tc_info_0.addr());
|
||||
}
|
||||
_ => panic!("unexpected error {err}"),
|
||||
@ -1549,7 +1563,7 @@ mod tests {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("inserting tc failed");
|
||||
@ -1567,7 +1581,7 @@ mod tests {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("inserting tc failed");
|
||||
@ -1585,15 +1599,15 @@ mod tests {
|
||||
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new(vec![(10, 32), (5, 64)], false));
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("inserting tc failed");
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1)
|
||||
.expect("inserting tc failed");
|
||||
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, &[]);
|
||||
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_2)
|
||||
.expect("inserting tc failed");
|
||||
@ -1639,7 +1653,7 @@ mod tests {
|
||||
let err = insert_res.unwrap_err();
|
||||
match err {
|
||||
ScheduleError::StoreError(e) => match e {
|
||||
PoolError::StoreFull(_) => {}
|
||||
StoreError::StoreFull(_) => {}
|
||||
_ => panic!("unexpected store error {e}"),
|
||||
},
|
||||
_ => panic!("unexpected error {err}"),
|
||||
@ -1653,7 +1667,7 @@ mod tests {
|
||||
release_secs: u64,
|
||||
) -> TcInfo {
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info = ping_tc_to_store(pool, &mut buf, seq_count, &[]);
|
||||
let tc_info = ping_tc_to_store(pool, &mut buf, seq_count, None);
|
||||
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(release_secs as i64), tc_info)
|
||||
@ -1901,13 +1915,13 @@ mod tests {
|
||||
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
|
||||
|
||||
let mut buf: [u8; 32] = [0; 32];
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
|
||||
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, None);
|
||||
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
|
||||
.expect("insertion failed");
|
||||
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
|
||||
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, None);
|
||||
scheduler
|
||||
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
|
||||
.expect("insertion failed");
|
||||
@ -1935,13 +1949,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_generic_insert_app_data_test() {
|
||||
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
|
||||
let sph = SpHeader::new(
|
||||
PacketId::new(PacketType::Tc, true, 0x002),
|
||||
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
|
||||
let mut sph = SpHeader::new(
|
||||
PacketId::const_new(PacketType::Tc, true, 0x002),
|
||||
PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5),
|
||||
0,
|
||||
);
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true);
|
||||
let mut buf: [u8; 64] = [0; 64];
|
||||
let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc);
|
||||
assert!(result.is_ok());
|
||||
@ -1957,13 +1971,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_generic_insert_app_data_test_byte_conv_error() {
|
||||
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
|
||||
let sph = SpHeader::new(
|
||||
PacketId::new(PacketType::Tc, true, 0x002),
|
||||
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
|
||||
let mut sph = SpHeader::new(
|
||||
PacketId::const_new(PacketType::Tc, true, 0x002),
|
||||
PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5),
|
||||
0,
|
||||
);
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true);
|
||||
let mut buf: [u8; 16] = [0; 16];
|
||||
let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc);
|
||||
assert!(result.is_err());
|
||||
@ -1986,13 +2000,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_generic_insert_app_data_test_as_vec() {
|
||||
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
|
||||
let sph = SpHeader::new(
|
||||
PacketId::new(PacketType::Tc, true, 0x002),
|
||||
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
|
||||
let mut sph = SpHeader::new(
|
||||
PacketId::const_new(PacketType::Tc, true, 0x002),
|
||||
PacketSequenceCtrl::const_new(SequenceFlags::Unsegmented, 5),
|
||||
0,
|
||||
);
|
||||
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
|
||||
let ping_tc = PusTcCreator::new_no_app_data(&mut sph, sec_header, true);
|
||||
let mut buf: [u8; 64] = [0; 64];
|
||||
generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc).unwrap();
|
||||
let vec = generate_insert_telecommand_app_data_as_vec(&time_writer, &ping_tc)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user