Compare commits
183 Commits
satrs-core
...
satrs-core
Author | SHA1 | Date | |
---|---|---|---|
df38cf6491 | |||
d89e2ecfce
|
|||
5aa4680513 | |||
e7e243d0c1
|
|||
6a0396a337
|
|||
41145f89aa
|
|||
7f2257c30a | |||
9bec69291b
|
|||
ecf5ebaf2f
|
|||
338243b228 | |||
2d62d4a61b
|
|||
9f37c84dfc | |||
35cef32ebf
|
|||
0117482da1 | |||
0aa4b51300 | |||
f0ccc35e80
|
|||
a62df6dbf8 | |||
683ae899f5 | |||
c96de203b8
|
|||
26dd8d9815 | |||
06e3d32217
|
|||
81a9584dc4
|
|||
d85a684333
|
|||
cd3e213b1e
|
|||
01b55a1df1
|
|||
a0b0716564
|
|||
7af3888f90
|
|||
67fdfb98e5
|
|||
822023fc8a
|
|||
86f2c2acec
|
|||
97a7c994b8
|
|||
0b0d7a44c3
|
|||
0d0cc98dfe
|
|||
536c5f6949
|
|||
175d995a0e
|
|||
5cf943f9e4
|
|||
d5668b2f20 | |||
c51f903ef2 | |||
520ee17551
|
|||
ffcab9592e
|
|||
fb86e6dfc0
|
|||
4fedcc47c2
|
|||
6579c6d864
|
|||
eb6a94980a
|
|||
1cd7baa367
|
|||
5ec2881f01
|
|||
0e4eebdda5 | |||
216874d329
|
|||
0d49dbcc2a
|
|||
39621cf855
|
|||
c3bce27747
|
|||
e717999cb0
|
|||
3f73b73ded
|
|||
1517811d13
|
|||
1851b74279
|
|||
4017b5afc2
|
|||
f314e69ed8
|
|||
afd7999d5c
|
|||
567a0a1cf5
|
|||
3aba6b4276
|
|||
22254e4bbe
|
|||
9ccb6bb000
|
|||
e1998a8bcc
|
|||
b62d60f579
|
|||
35e1f7a983
|
|||
4dd85f294c
|
|||
aa556ad746
|
|||
d5722b7f39
|
|||
d0e6ccdaa3
|
|||
86ec0f50b8
|
|||
e4d8c0c9a7
|
|||
2f08365247
|
|||
88a5a390d9
|
|||
7536e107da
|
|||
54bc37b086
|
|||
d42999d2ad
|
|||
3207be7ffe
|
|||
047256f2f8
|
|||
5aa339286a
|
|||
b622c3871a
|
|||
079da20640
|
|||
8582d226ec
|
|||
8d8e319aee
|
|||
de690b3eed
|
|||
d582ce212e
|
|||
706dde51c4
|
|||
b5813f9c90
|
|||
51e31f70f7
|
|||
e3043ce2d7
|
|||
0e6d903942
|
|||
eb5c755dd3
|
|||
84b5ac5ef2 | |||
f9f68ac171 | |||
d299b55870 | |||
1af5601d63
|
|||
3d6e33bc00
|
|||
13cacb0b53
|
|||
28801a8952
|
|||
6593d289be | |||
3e9a07b732
|
|||
bbd6cec8ac
|
|||
603bf61f6c | |||
1bb4238e9f
|
|||
e2bbcedf3e
|
|||
d328a3591c
|
|||
83c5784b9d
|
|||
ead708b1bb
|
|||
3ec6590c23
|
|||
fc464d4078
|
|||
6c47efc244
|
|||
609b3c11b1
|
|||
7aecc94fda
|
|||
2a72967c26
|
|||
b17b53abe3
|
|||
1d90c3058b | |||
c39a2d084b
|
|||
c2ebe1bd55
|
|||
3d9d486027 | |||
e142215065
|
|||
73830afcb7
|
|||
778f30ef1b
|
|||
3f3a7e8efc
|
|||
dca7449edd
|
|||
40c8c36af3
|
|||
2322d3a9b3
|
|||
696d9fe48d
|
|||
4e186541ec
|
|||
44905fb700
|
|||
df90c22fef
|
|||
1a38de760a
|
|||
309ceda5a5
|
|||
8798a3457e
|
|||
e9944d52cc | |||
c153276454
|
|||
3bffa8ed83
|
|||
dc6b7f6487
|
|||
f8a92cef3d
|
|||
73a4955fb3
|
|||
37261e512c
|
|||
f271ae5689
|
|||
bcf22f42d4
|
|||
57c5f72428 | |||
f1d468b298 | |||
a351cc255f
|
|||
2f5522b0d5 | |||
2e512ee895
|
|||
cbef94edd5
|
|||
ec9abea11a
|
|||
2e3b142519
|
|||
6960830fd9
|
|||
5009ada21b
|
|||
e09ffc6dc7 | |||
f694a94df2
|
|||
725713bc4b | |||
1b208bc540
|
|||
afd9395cee
|
|||
143b0869a4
|
|||
a0f2d858ce
|
|||
c664cdb332
|
|||
d2cdcf9c79
|
|||
eb85741684
|
|||
a415cd8f6c
|
|||
c606fe7d0c
|
|||
6fdaf02cc7
|
|||
1bae0c30bb
|
|||
c1252f949e
|
|||
7469be6b72
|
|||
8a73a99f26
|
|||
f69035a868
|
|||
471a955bb1
|
|||
beb80b2188
|
|||
f3d862ac19
|
|||
c8c18c54df
|
|||
05391bbafe
|
|||
95a1295718
|
|||
f5c0b0f6bb
|
|||
0ea0f90b25
|
|||
c0e1cb8bcf
|
|||
9bbd2cdad1
|
|||
6c87ae0b67
|
|||
2213a25508
|
|||
beebf00565
|
|||
0e2a413505
|
@ -1,5 +1,5 @@
|
||||
[workspace]
|
||||
|
||||
resolver = "2"
|
||||
members = [
|
||||
"satrs-core",
|
||||
"satrs-mib",
|
||||
@ -9,3 +9,4 @@ members = [
|
||||
exclude = [
|
||||
"satrs-example-stm32f3-disco",
|
||||
]
|
||||
|
||||
|
2
NOTICE
2
NOTICE
@ -1 +1,3 @@
|
||||
This software contains code developed at the University of Stuttgart's Institute of Space Systems.
|
||||
|
||||
The sat-rs logo was designed by Nadine Eunous.
|
||||
|
@ -1,3 +1,5 @@
|
||||
<p align="center"> <img src="misc/satrs-logo.png" width="40%"> </p>
|
||||
|
||||
sat-rs
|
||||
=========
|
||||
|
||||
@ -21,7 +23,7 @@ This project currently contains following crates:
|
||||
on a host computer or on any system with a standard runtime like a Raspberry Pi.
|
||||
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad/src/branch/main/satrs-mib):
|
||||
Components to build a mission information base from the on-board software directly.
|
||||
* [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/satrs-example-stm32f3-disco):
|
||||
* [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example-stm32f3-disco):
|
||||
Example of a simple example on-board software using sat-rs components on a bare-metal system
|
||||
with constrained resources.
|
||||
|
||||
|
@ -2,10 +2,10 @@
|
||||
# docker build -f automation/Dockerfile -t <NAME> .
|
||||
# docker run -it <NAME>
|
||||
FROM rust:latest
|
||||
RUN apt-get update
|
||||
RUN apt-get --yes upgrade
|
||||
RUN apt-get update && apt-get --yes upgrade
|
||||
# tzdata is a dependency, won't install otherwise
|
||||
ARG DEBIAN_FRONTEND=noninteractive
|
||||
RUN apt-get --yes install rsync curl
|
||||
|
||||
# set CROSS_CONTAINER_IN_CONTAINER to inform `cross` that it is executed from within a container
|
||||
ENV CROSS_CONTAINER_IN_CONTAINER=true
|
||||
@ -13,3 +13,13 @@ ENV CROSS_CONTAINER_IN_CONTAINER=true
|
||||
RUN rustup install nightly && \
|
||||
rustup target add thumbv7em-none-eabihf armv7-unknown-linux-gnueabihf && \
|
||||
rustup component add rustfmt clippy
|
||||
|
||||
# RUN cargo install mdbook --no-default-features --features search --vers "^0.4" --locked
|
||||
RUN curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.34/mdbook-v0.4.34-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory /usr/local/bin
|
||||
|
||||
# SSH stuff to allow deployment to doc server
|
||||
RUN adduser --uid 114 jenkins
|
||||
|
||||
# Add documentation server to known hosts
|
||||
RUN echo "|1|/LzCV4BuTmTb2wKnD146l9fTKgQ=|NJJtVjvWbtRt8OYqFgcYRnMQyVw= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNL8ssTonYtgiR/6RRlSIK9WU1ywOcJmxFTLcEblAwH7oifZzmYq3XRfwXrgfMpylEfMFYfCU8JRqtmi19xc21A=" >> /etc/ssh/ssh_known_hosts
|
||||
RUN echo "|1|CcBvBc3EG03G+XM5rqRHs6gK/Gg=|oGeJQ+1I8NGI2THIkJsW92DpTzs= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBNL8ssTonYtgiR/6RRlSIK9WU1ywOcJmxFTLcEblAwH7oifZzmYq3XRfwXrgfMpylEfMFYfCU8JRqtmi19xc21A=" >> /etc/ssh/ssh_known_hosts
|
||||
|
118
automation/Jenkinsfile
vendored
118
automation/Jenkinsfile
vendored
@ -1,52 +1,76 @@
|
||||
pipeline {
|
||||
|
||||
agent {
|
||||
dockerfile {
|
||||
dir 'automation'
|
||||
reuseNode true
|
||||
}
|
||||
agent {
|
||||
dockerfile {
|
||||
dir 'automation'
|
||||
reuseNode true
|
||||
args '--network host'
|
||||
}
|
||||
}
|
||||
|
||||
stages {
|
||||
stage('Clippy') {
|
||||
steps {
|
||||
sh 'cargo clippy'
|
||||
}
|
||||
}
|
||||
stage('Docs') {
|
||||
steps {
|
||||
sh 'cargo +nightly doc --all-features'
|
||||
}
|
||||
}
|
||||
stage('Rustfmt') {
|
||||
steps {
|
||||
sh 'cargo fmt --all --check'
|
||||
}
|
||||
}
|
||||
stage('Test') {
|
||||
steps {
|
||||
sh 'cargo test --all-features'
|
||||
}
|
||||
}
|
||||
stage('Check with all features') {
|
||||
steps {
|
||||
sh 'cargo check --all-features'
|
||||
}
|
||||
}
|
||||
stage('Check with no features') {
|
||||
steps {
|
||||
sh 'cargo check --no-default-features'
|
||||
}
|
||||
}
|
||||
stage('Check Cross Embedded Bare Metal') {
|
||||
steps {
|
||||
sh 'cargo check -p satrs-core --target thumbv7em-none-eabihf --no-default-features'
|
||||
}
|
||||
}
|
||||
stage('Check Cross Embedded Linux') {
|
||||
steps {
|
||||
sh 'cargo check --target armv7-unknown-linux-gnueabihf'
|
||||
}
|
||||
}
|
||||
stages {
|
||||
stage('Rust Toolchain Info') {
|
||||
steps {
|
||||
sh 'rustc --version'
|
||||
}
|
||||
}
|
||||
stage('Clippy') {
|
||||
steps {
|
||||
sh 'cargo clippy'
|
||||
}
|
||||
}
|
||||
stage('Docs') {
|
||||
steps {
|
||||
catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') {
|
||||
sh 'cargo +nightly doc --all-features'
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Rustfmt') {
|
||||
steps {
|
||||
sh 'cargo fmt --all --check'
|
||||
}
|
||||
}
|
||||
stage('Test') {
|
||||
steps {
|
||||
sh 'cargo test --all-features'
|
||||
}
|
||||
}
|
||||
stage('Check with all features') {
|
||||
steps {
|
||||
sh 'cargo check --all-features'
|
||||
}
|
||||
}
|
||||
stage('Check with no features') {
|
||||
steps {
|
||||
sh 'cargo check --no-default-features'
|
||||
}
|
||||
}
|
||||
stage('Check Cross Embedded Bare Metal') {
|
||||
steps {
|
||||
sh 'cargo check -p satrs-core --target thumbv7em-none-eabihf --no-default-features'
|
||||
}
|
||||
}
|
||||
stage('Check Cross Embedded Linux') {
|
||||
steps {
|
||||
sh 'cargo check --target armv7-unknown-linux-gnueabihf'
|
||||
}
|
||||
}
|
||||
stage('Deploy satrs-book') {
|
||||
when {
|
||||
anyOf {
|
||||
branch 'main';
|
||||
branch pattern: 'mdbook-deployment*'
|
||||
}
|
||||
}
|
||||
steps {
|
||||
dir('satrs-book') {
|
||||
sh 'mdbook build'
|
||||
sshagent(credentials: ['documentation-buildfix']) {
|
||||
// Deploy to Apache webserver
|
||||
sh 'rsync -r --delete book/ buildfix@documentation.irs.uni-stuttgart.de:/projects/sat-rs'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
BIN
misc/satrs-logo.png
Normal file
BIN
misc/satrs-logo.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 62 KiB |
1
satrs-book/.gitignore
vendored
Normal file
1
satrs-book/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
book
|
6
satrs-book/book.toml
Normal file
6
satrs-book/book.toml
Normal file
@ -0,0 +1,6 @@
|
||||
[book]
|
||||
authors = ["Robin Mueller"]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "The sat-rs book"
|
10
satrs-book/src/SUMMARY.md
Normal file
10
satrs-book/src/SUMMARY.md
Normal file
@ -0,0 +1,10 @@
|
||||
# Summary
|
||||
|
||||
- [Introduction](./introduction.md)
|
||||
- [Design](./design.md)
|
||||
- [Communication with Space Systems](./communication.md)
|
||||
- [Working with Constrained Systems](./constrained-systems.md)
|
||||
- [Actions](./actions.md)
|
||||
- [Modes and Health](./modes-and-health.md)
|
||||
- [Housekeeping Data](./housekeeping.md)
|
||||
- [Events](./events.md)
|
9
satrs-book/src/TODO.md
Normal file
9
satrs-book/src/TODO.md
Normal file
@ -0,0 +1,9 @@
|
||||
- [Power Components](./power.md)
|
||||
- [Thermal Components](./thermal.md)
|
||||
- [Persistent TM storage](./persistent-tm-storage.md)
|
||||
- [FDIR](./fdir.md)
|
||||
- [Serialization of Data](./serialization.md)
|
||||
- [Logging](./logging.md)
|
||||
- [Modelling space systems](./modelling-space-systems.md)
|
||||
- [Ground Segments](./ground-segments.md)
|
||||
|
42
satrs-book/src/actions.md
Normal file
42
satrs-book/src/actions.md
Normal file
@ -0,0 +1,42 @@
|
||||
# Working with Actions
|
||||
|
||||
Space systems generally need to be commanded regularly. This can include commands periodically
|
||||
required to ensure a healthy system, or commands to reach the mission goals.
|
||||
|
||||
These commands can be modelled using the concept of Actions. the ECSS PUS standard also provides
|
||||
the PUS service 8 for actions, but provides few concrete subservices and specification on how
|
||||
action commanding could look like.
|
||||
|
||||
`sat-rs` proposes two recommended ways to perform action commanding:
|
||||
|
||||
1. Target ID and Action ID based. The target ID is a 32-bit unsigned ID for an OBSW object entity
|
||||
which can also accept Actions. The action ID is a 32-bit unsigned ID for each action that a
|
||||
target is able to perform.
|
||||
2. Target ID and Action String based. The target ID is the same as in the first proposal, but
|
||||
the unique action is identified by a string.
|
||||
|
||||
The framework provides an `ActionRequest` abstraction to model both of these cases.
|
||||
|
||||
## Commanding with ECSS PUS 8
|
||||
|
||||
`sat-rs` provides a generic ECSS PUS 8 action command handler. This handler can convert PUS 8
|
||||
telecommands which use the commanding scheme 1 explained above to an `ActionRequest` which is
|
||||
then forwarded to the target specified by the Target ID.
|
||||
|
||||
There are 3 requirements for the PUS 8 telecommand:
|
||||
|
||||
1. The subservice 128 must be used
|
||||
2. Bytes 0 to 4 of application data must contain the target ID in `u32` big endian format.
|
||||
3. Bytes 4 to 8 of application data must contain the action ID in `u32` big endian format.
|
||||
4. The rest of the application data are assumed to be command specific additional parameters. They
|
||||
will be added to an IPC store and the corresponding store address will be sent as part of the
|
||||
`ActionRequest`.
|
||||
|
||||
## Sending back telemetry
|
||||
|
||||
There are some cases where the regular verification provided by PUS in response to PUS action
|
||||
commands is not sufficient and some additional telemetry needs to be sent to ground. In that
|
||||
case, it is recommended to chose some custom subservice for action TM data and then send the
|
||||
telemetry using the same scheme as shown above, where the first 8 bytes of the application
|
||||
data is reserved for the target ID and action ID.
|
||||
|
46
satrs-book/src/communication.md
Normal file
46
satrs-book/src/communication.md
Normal file
@ -0,0 +1,46 @@
|
||||
# Communication with sat-rs based software
|
||||
|
||||
Communication is a vital topic for remote system which are usually not (directly)
|
||||
connected to the internet and only have 1-2 communication links during nominal operation. However,
|
||||
most of these systems have internet access during development cycle. There are various standards
|
||||
provided by CCSDS and ECSS which can be useful to determine how to communicate with the satellite
|
||||
and the primary On-Board Software.
|
||||
|
||||
# Application layer
|
||||
|
||||
Most communication with space systems is usually packet based. For example, the CCSDS space
|
||||
packet standard only specifies a 6 byte header with at least 1 byte payload. The PUS packet
|
||||
standard is a subset of the space packet standard, which adds some fields and a 16 bit CRC, but
|
||||
it is still centered around small packets. `sat-rs` provides support for these ECSS and CCSDS
|
||||
standards and also attempts to fill the gap to the internet protocol by providing the following
|
||||
components.
|
||||
|
||||
1. [UDP TMTC Server](https://docs.rs/satrs-core/0.1.0-alpha.0/satrs_core/hal/host/udp_server/index.html#).
|
||||
UDP is already packet based which makes it an excellent fit for exchanging space packets.
|
||||
2. TCP TMTC Server. This is a stream based protocol, so the server uses the COBS framing protocol
|
||||
to always deliver complete packets.
|
||||
|
||||
# Working with telemetry and telecommands (TMTC)
|
||||
|
||||
The commands sent to a space system are commonly called telecommands (TC) while the data received
|
||||
from it are called telemetry (TM). Keeping in mind the previous section, the concept of a TC source
|
||||
and a TM sink can be applied to most satellites. The TM sink is the one entity where all generated
|
||||
telemetry arrives in real-time. The most important task of the TM sink usually is to send all
|
||||
arriving telemetry to the ground segment of a satellite mission immediately. Another important
|
||||
task might be to store all arriving telemetry persistently. This is especially important for
|
||||
space systems which do not have permanent contact like low-earth-orbit (LEO) satellites.
|
||||
|
||||
The most important task of a TC source is to deliver the telecommands to the correct recipients.
|
||||
For modern component oriented software using message passing, this usually includes staged
|
||||
demultiplexing components to determine where a command needs to be sent.
|
||||
|
||||
# Low-level protocols and the bridge to the communcation subsystem
|
||||
|
||||
Many satellite systems usually use the lower levels of the OSI layer in addition to the application
|
||||
layer covered by the PUS standard or the CCSDS space packets standard. This oftentimes requires
|
||||
special hardware like dedicated FPGAs to handle forward error correction fast enough. `sat-rs`
|
||||
might provide components to handle standard like the Unified Space Data Link Standard (USLP) in
|
||||
software but most of the time the handling of communication is performed through custom
|
||||
software and hardware. Still, connecting this custom software and hardware to `sat-rs` can mostly
|
||||
be done by using the concept of TC sources and TM sinks mentioned previously.
|
||||
|
57
satrs-book/src/constrained-systems.md
Normal file
57
satrs-book/src/constrained-systems.md
Normal file
@ -0,0 +1,57 @@
|
||||
# Working with Constrained Systems
|
||||
|
||||
Software for space systems oftentimes has different requirements than the software for host
|
||||
systems or servers. Currently, most space systems are considered embedded systems.
|
||||
|
||||
For these systems, the computation power and the available heap are the most important resources
|
||||
which are constrained. This might make completeley heap based memory management schemes which
|
||||
are oftentimes used on host and server based systems unfeasable. Still, completely forbidding
|
||||
heap allocations might make software development unnecessarilly difficult, especially in a
|
||||
time where the OBSW might be running on Linux based systems with hundreds of MBs of RAM.
|
||||
|
||||
A useful pattern used commonly in space systems is to limit heap allocations to program
|
||||
initialization time and avoid frequent run-time allocations. This prevents issues like
|
||||
running out of memory (something even Rust can not protect from) or heap fragmentation.
|
||||
|
||||
# Using pre-allocated pool structures
|
||||
|
||||
A huge candidate for heap allocations is the TMTC and handling. TC, TMs and IPC data are all
|
||||
candidates where the data size might vary greatly. The regular solution for host systems
|
||||
might be to send around this data as a `Vec<u8>` until it is dropped. `sat-rs` provides
|
||||
another solution to avoid run-time allocations by offering and recommendng pre-allocated static
|
||||
pools.
|
||||
|
||||
These pools are split into subpools where each subpool can have different page sizes.
|
||||
For example, a very small TC pool might look like this:
|
||||
|
||||
TODO: Add image
|
||||
|
||||
A TC entry inside this pool has a store address which can then be sent around without having
|
||||
to dynamically allocate memory. The same principle can also be applied to the TM and IPC data.
|
||||
|
||||
# Using special crates to prevent smaller allocations
|
||||
|
||||
Another common way to use the heap on host systems is using containers like `String` and `Vec<u8>`
|
||||
to work with data where the size is not known beforehand. The most common solution for embedded
|
||||
systems is to determine the maximum expected size and then use a pre-allocated `u8` buffer and a
|
||||
size variable. Alternatively, you can use the following crates for more convenience or a smart
|
||||
behaviour which at the very least reduce heap allocations:
|
||||
|
||||
1. [`smallvec`](https://docs.rs/smallvec/latest/smallvec/).
|
||||
2. [`arrayvec`](https://docs.rs/arrayvec/latest/arrayvec/index.html) which also contains an
|
||||
[`ArrayString`](https://docs.rs/arrayvec/latest/arrayvec/struct.ArrayString.html) helper type.
|
||||
3. [`tinyvec`](https://docs.rs/tinyvec/latest/tinyvec/).
|
||||
|
||||
# Using a fixed amount of threads
|
||||
|
||||
On host systems, it is a common practice to dynamically spawn new threads to handle workloads.
|
||||
On space systems this is generally considered an anti-pattern as this is considered undeterministic
|
||||
and might lead to similar issues like when dynamically using the heap. For example, spawning a new
|
||||
thread might use up the remaining heap of a system, leading to undeterministic errors.
|
||||
|
||||
The most common way to avoid this is to simply spawn all required threads at program initialization
|
||||
time. If a thread is done with its task, it can go back to sleeping regularly, only occasionally
|
||||
checking for new jobs. If a system still needs to handle bursty concurrent loads, another possible
|
||||
way commonly used for host systems as well would be to use a threadpool, for example by using the
|
||||
[`threadpool`](https://crates.io/crates/threadpool) crate.
|
||||
|
57
satrs-book/src/design.md
Normal file
57
satrs-book/src/design.md
Normal file
@ -0,0 +1,57 @@
|
||||
# Framework Design
|
||||
|
||||
Satellites and space systems in general are complex systems with a wide range of requirements for
|
||||
both the hardware and the software. Consequently, the general design of the framework is centered
|
||||
around many light-weight components which try to impose as few restrictions as possible on how to
|
||||
solve certain problems.
|
||||
|
||||
There are still a lot of common patterns and architectures across these systems where guidance
|
||||
of how to solve a problem and a common structure would still be extremely useful to avoid pitfalls
|
||||
which were already solved and to avoid boilerplate code. This framework tries to provide this
|
||||
structure and guidance the following way:
|
||||
|
||||
1. Providing this book which explains the architecture and design patterns in respect to common
|
||||
issues and requirements of space systems.
|
||||
2. Providing an example application. Space systems still commonly have large monolithic
|
||||
primary On-Board Softwares, so the choice was made to provide one example software which
|
||||
contains the various features provided by sat-rs.
|
||||
3. Providing a good test suite. This includes both unittests and integration tests. The integration
|
||||
tests can also serve as smaller usage examples than the large `satrs-example` application.
|
||||
|
||||
This framework has special support for standards used in the space industry. This especially
|
||||
includes standards provided by Consultative Committee for Space Data Systems (CCSDS) and European
|
||||
Cooperation for Space Standardization (ECSS). It does not enforce using any of those standards,
|
||||
but it is always recommended to use some sort of standard for interoperability.
|
||||
|
||||
A lot of the modules and design considerations are based on the Flight Software Framework (FSFW).
|
||||
The FSFW has its own [documentation](https://documentation.irs.uni-stuttgart.de/fsfw/), which
|
||||
will be referred to when applicable. The FSFW was developed over a period of 10 years for the
|
||||
Flying Laptop Project by the University of Stuttgart with Airbus Defence and Space GmbH.
|
||||
It has flight heritage through the 2 mssions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
|
||||
and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/).
|
||||
Therefore, a lot of the design concepts were ported more or less unchanged to the `sat-rs`
|
||||
framework.
|
||||
FLP is a medium-size small satellite with a higher budget and longer development time than EIVE,
|
||||
which allowed to build a highly reliable system while EIVE is a smaller 6U+ cubesat which had a
|
||||
shorter development cycle and was built using cheaper COTS components. This framework also tries
|
||||
to accumulate the knowledge of developing the OBSW and operating the satellite for both these
|
||||
different systems and provide a solution for a wider range of small satellite systems.
|
||||
|
||||
`sat-rs` can be seen as a modern port of the FSFW which uses common principles of software
|
||||
engineering to provide a reliable and robust basis for space On-Board Software. The choice
|
||||
of using the Rust programming language was made for the following reasons:
|
||||
|
||||
1. Rust has safety guarantees which are a perfect fit for space systems which generally have high
|
||||
robustness and reliablity guarantees.
|
||||
2. Rust is suitable for embedded systems. It can also be run on smaller embedded systems like the
|
||||
STM32 which have also become common in the space sector. All space systems are embedded systems,
|
||||
which makes using large languages like Python challenging even for OBCs with more performance.
|
||||
3. Rust has support for linking C APIs through its excellent FFI support. This is especially
|
||||
important because many vendor provided libaries are still C based.
|
||||
4. Modern tooling like a package managers and various development helper, which can further reduce
|
||||
development cycles for space systems. `cargo` provides tools like auto-formatters and linters
|
||||
which can immediately ensure a high software quality throughout each development cycle.
|
||||
5. A large ecosystem with excellent libraries which also leverages the excellent tooling provided
|
||||
previously. Integrating these libraries is a lot easier compared to languages like C/C++ where
|
||||
there is still no standardized way to use packages.
|
||||
|
16
satrs-book/src/events.md
Normal file
16
satrs-book/src/events.md
Normal file
@ -0,0 +1,16 @@
|
||||
# Events
|
||||
|
||||
Events can be an extremely important mechanism used for remote systems to monitor unexpected
|
||||
or expected anomalies and events occuring on these systems. They are oftentimes tied to
|
||||
Fault Detection, Isolation and Recovery (FDIR) operations, which need to happen autonomously.
|
||||
|
||||
Events can also be used as a convenient Inter-Process Communication (IPC) mechansism, which is
|
||||
also observable for the Ground segment. The PUS Service 5 standardizes how the ground interface
|
||||
for events might look like, but does not specify how other software components might react
|
||||
to those events. There is the PUS Service 19, which might be used for that purpose, but the
|
||||
event components recommended by this framework do not really need this service.
|
||||
|
||||
The following images shows how the flow of events could look like in a system where components
|
||||
can generate events, and where other system components might be interested in those events:
|
||||
|
||||

|
1
satrs-book/src/fdir.md
Normal file
1
satrs-book/src/fdir.md
Normal file
@ -0,0 +1 @@
|
||||
# Fault Detecion, Isolation And Recovery (FDIR)
|
1
satrs-book/src/ground-segments.md
Normal file
1
satrs-book/src/ground-segments.md
Normal file
@ -0,0 +1 @@
|
||||
# Ground Segments
|
24
satrs-book/src/housekeeping.md
Normal file
24
satrs-book/src/housekeeping.md
Normal file
@ -0,0 +1,24 @@
|
||||
# Housekeeping Data
|
||||
|
||||
Remote systems like satellites and rovers oftentimes generate data autonomously and periodically.
|
||||
The most common example for this is temperature or attitude data. Data like this is commonly
|
||||
referred to as housekeeping data, and is usually one of the most important and most resource heavy
|
||||
data sources received from a satellite. Standards like the PUS Service 3 make recommendation how to
|
||||
expose housekeeping data, but the applicability of the interface offered by PUS 3 has proven to be
|
||||
partially difficult and clunky for modular systems.
|
||||
|
||||
First, we are going to list some assumption and requirements about Housekeeping (HK) data:
|
||||
|
||||
1. HK data is generated periodically by various system components throughout the
|
||||
systems.
|
||||
2. An autonomous and periodic sampling of that HK data to be stored and sent to Ground is generally
|
||||
required. A minimum interface consists of requesting a one-shot sample of HK, enabling and
|
||||
disabling the periodic autonomous generation of samples and modifying the collection interval
|
||||
of the periodic autonomous generation.
|
||||
3. HK data often needs to be shared to other software components. For example, a thermal controller
|
||||
wants to read the data samples of all sensor components.
|
||||
|
||||
A commonly required way to model HK data in a clean way is also to group related HK data into sets,
|
||||
which can then dumped via a similar interface.
|
||||
|
||||
TODO: Write down `sat-rs` recommendations how to expose and work with HK data.
|
259
satrs-book/src/images/event_man_arch.graphml
Normal file
259
satrs-book/src/images/event_man_arch.graphml
Normal file
@ -0,0 +1,259 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<graphml xmlns="http://graphml.graphdrawing.org/xmlns" xmlns:java="http://www.yworks.com/xml/yfiles-common/1.0/java" xmlns:sys="http://www.yworks.com/xml/yfiles-common/markup/primitives/2.0" xmlns:x="http://www.yworks.com/xml/yfiles-common/markup/2.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:y="http://www.yworks.com/xml/graphml" xmlns:yed="http://www.yworks.com/xml/yed/3" xsi:schemaLocation="http://graphml.graphdrawing.org/xmlns http://www.yworks.com/xml/schema/graphml/1.1/ygraphml.xsd">
|
||||
<!--Created by yEd 3.22-->
|
||||
<key attr.name="Description" attr.type="string" for="graph" id="d0"/>
|
||||
<key for="port" id="d1" yfiles.type="portgraphics"/>
|
||||
<key for="port" id="d2" yfiles.type="portgeometry"/>
|
||||
<key for="port" id="d3" yfiles.type="portuserdata"/>
|
||||
<key attr.name="url" attr.type="string" for="node" id="d4"/>
|
||||
<key attr.name="description" attr.type="string" for="node" id="d5"/>
|
||||
<key for="node" id="d6" yfiles.type="nodegraphics"/>
|
||||
<key for="graphml" id="d7" yfiles.type="resources"/>
|
||||
<key attr.name="url" attr.type="string" for="edge" id="d8"/>
|
||||
<key attr.name="description" attr.type="string" for="edge" id="d9"/>
|
||||
<key for="edge" id="d10" yfiles.type="edgegraphics"/>
|
||||
<graph edgedefault="directed" id="G">
|
||||
<data key="d0" xml:space="preserve"/>
|
||||
<node id="n0">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="509.9999999999999" width="768.7000000000003" x="579.3105418719211" y="304.7"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="16" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="21.936037063598633" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="150.1282958984375" x="26.197490701913352" xml:space="preserve" y="24.234711021505348">Example Event Flow<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.46591974671274444" nodeRatioY="-0.452480958781362" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n1">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="203.0" x="814.0" y="506.6799999999999"/>
|
||||
<y:Fill color="#FFFF00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.452094078063965" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="86.21258544921875" x="58.393707275390625" xml:space="preserve" y="21.27395296096796">Event Manager<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n2">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="82.0" x="617.6" y="413.23"/>
|
||||
<y:Fill color="#FF9900" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="55.120361328125" x="13.4398193359375" xml:space="preserve" y="14.547905921936035">Event
|
||||
Creator 0<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n3">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="76.55999999999995" x="988.5" y="335.62999999999994"/>
|
||||
<y:Fill color="#FF9900" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="55.120361328125" x="10.719819335937473" xml:space="preserve" y="14.547905921936035">Event
|
||||
Creator 2<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n4">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="72.55999999999983" x="860.6610837438426" y="335.62999999999994"/>
|
||||
<y:Fill color="#FF9900" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="55.120361328125" x="8.719819335937359" xml:space="preserve" y="14.547905921936035">Event
|
||||
Creator 1<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n5">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="87.27999999999997" x="1112.52" y="335.62999999999994"/>
|
||||
<y:Fill color="#FF9900" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="55.120361328125" x="16.079819335937373" xml:space="preserve" y="14.547905921936035">Event
|
||||
Creator 3<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n6">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="126.0" x="781.0" y="620.26"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="92.78865051269531" x="16.605674743652344" xml:space="preserve" y="14.547905921936035">PUS Service 5
|
||||
Event Reporting
|
||||
<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n7">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="118.63999999999987" x="928.2" y="620.26"/>
|
||||
<y:Fill color="#FFCC00" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="84.08859252929688" x="17.2757037353515" xml:space="preserve" y="14.547905921936035">PUS Service 19
|
||||
Event Action<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n8">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="60.0" width="87.27999999999997" x="792.1260377358491" y="733.8400000000001"/>
|
||||
<y:Fill color="#FFCC99" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="center" autoSizePolicy="content" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="59.932403564453125" x="13.673798217773424" xml:space="preserve" y="14.547905921936035">Telemetry
|
||||
Sink<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="0.0" labelRatioY="0.0" nodeRatioX="0.0" nodeRatioY="0.0" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<node id="n9">
|
||||
<data key="d6">
|
||||
<y:ShapeNode>
|
||||
<y:Geometry height="170.79999999999995" width="210.80000000000018" x="1076.84" y="601.88"/>
|
||||
<y:Fill hasColor="false" transparent="false"/>
|
||||
<y:BorderStyle color="#000000" raised="false" type="line" width="1.0"/>
|
||||
<y:NodeLabel alignment="left" autoSizePolicy="content" fontFamily="Dialog" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="143.6875" horizontalTextPosition="center" iconTextGap="4" modelName="custom" textColor="#000000" verticalTextPosition="bottom" visible="true" width="181.591796875" x="8.373079774614325" xml:space="preserve" y="7.444138124199753">Subscriptions
|
||||
|
||||
1. Event Creator 0 subscribes
|
||||
for event 0
|
||||
2. Event Creator 1 subscribes
|
||||
for event group 2
|
||||
3. PUS Service 5 handler
|
||||
subscribes for all events
|
||||
4. PUS Service 19 handler
|
||||
subscribes for all events<y:LabelModel><y:SmartNodeLabelModel distance="4.0"/></y:LabelModel><y:ModelParameter><y:SmartNodeLabelModelParameter labelRatioX="-0.5" labelRatioY="-0.5" nodeRatioX="-0.4602795077105583" nodeRatioY="-0.45641605313700395" offsetX="0.0" offsetY="0.0" upX="0.0" upY="-1.0"/></y:ModelParameter></y:NodeLabel>
|
||||
<y:Shape type="rectangle"/>
|
||||
</y:ShapeNode>
|
||||
</data>
|
||||
</node>
|
||||
<edge id="e0" source="n4" target="n1">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="8.058916256157545" sy="0.0" tx="-10.5" ty="0.0"/>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="53.92036437988281" x="8.639817810058275" xml:space="preserve" y="29.00100609374465">event 1
|
||||
(group 1)<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="35.59999999999969" distanceToCenter="true" position="left" ratio="0.34252387409930674" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e1" source="n2" target="n1">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="0.0" sy="11.93999999999994" tx="-83.5" ty="0.0">
|
||||
<y:Point x="832.0" y="455.16999999999996"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="53.92036437988281" x="25.334655000000453" xml:space="preserve" y="-40.972107505798476">event 0
|
||||
(group 0)<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="25.520000000000095" distanceToCenter="true" position="left" ratio="0.20267159489379444" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e2" source="n3" target="n1">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="-23.719999999999914" sy="5.5" tx="87.56000000000006" ty="0.0"/>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="53.92036437988281" x="5.6761352539062955" xml:space="preserve" y="27.551854405966765">event 2
|
||||
(group 3)<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="5.676132812499983" distanceToCenter="false" position="left" ratio="0.3219761157957032" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e3" source="n5" target="n1">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="-6.275467980295616" sy="0.0" tx="57.5" ty="8.5">
|
||||
<y:Point x="1149.8845320197042" y="545.1799999999998"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="97.38468933105469" x="26.667665869801795" xml:space="preserve" y="43.287014528669715">event 3 (group 2)
|
||||
event 4 (group 2)<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="75.3599999999999" distanceToCenter="true" position="left" ratio="0.2967848459873102" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e4" source="n1" target="n6">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="-65.0" sy="0.0" tx="6.5" ty="0.0"/>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.452094078063965" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="83.16456604003906" x="-98.78228302001958" xml:space="preserve" y="16.63042580701972"><<all events>><y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="57.20000000000004" distanceToCenter="true" position="right" ratio="0.4441995640590947" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e5" source="n1" target="n7">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="42.660000000000196" sy="0.0" tx="-29.359999999999786" ty="0.0"/>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.452094078063965" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="83.16456604003906" x="20.4177438354493" xml:space="preserve" y="17.885881494816203"><<all events>><y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="62.0" distanceToCenter="true" position="left" ratio="0.492249939452652" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e6" source="n1" target="n2">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="0.0" sy="0.0" tx="0.0" ty="0.0">
|
||||
<y:Point x="658.6" y="536.6799999999998"/>
|
||||
</y:Path>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="44.69230651855469" x="-131.99129340961497" xml:space="preserve" y="-45.45208675384538">event 1
|
||||
event 2<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="0.0" distance="30.0" distanceToCenter="true" position="right" ratio="0.6426904695623505" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e7" source="n1" target="n4">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="-35.69940886699487" sy="0.0" tx="-17.140492610837327" ty="1.5"/>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="17.452094078063965" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="46.14430236816406" x="-54.352158195608126" xml:space="preserve" y="-79.29459128622307">group 2<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="31.279999999999973" distanceToCenter="true" position="left" ratio="0.6800790648728832" segment="-1"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
<edge id="e8" source="n6" target="n8">
|
||||
<data key="d10">
|
||||
<y:PolyLineEdge>
|
||||
<y:Path sx="0.0" sy="0.0" tx="8.233962264150945" ty="-21.42352238805968"/>
|
||||
<y:LineStyle color="#000000" type="line" width="1.0"/>
|
||||
<y:Arrows source="none" target="standard"/>
|
||||
<y:EdgeLabel alignment="center" configuration="AutoFlippingLabel" distance="2.0" fontFamily="Ubuntu" fontSize="12" fontStyle="plain" hasBackgroundColor="false" hasLineColor="false" height="30.90418815612793" horizontalTextPosition="center" iconTextGap="4" modelName="custom" preferredPlacement="anywhere" ratio="0.5" textColor="#000000" verticalTextPosition="bottom" visible="true" width="87.40060424804688" x="-100.50030212402339" xml:space="preserve" y="11.337896156311103">enabled Events
|
||||
as PUS 5 TM<y:LabelModel><y:SmartEdgeLabelModel autoRotationEnabled="false" defaultAngle="0.0" defaultDistance="10.0"/></y:LabelModel><y:ModelParameter><y:SmartEdgeLabelModelParameter angle="6.283185307179586" distance="56.79999999999995" distanceToCenter="true" position="right" ratio="0.5" segment="0"/></y:ModelParameter><y:PreferredPlacementDescriptor angle="0.0" angleOffsetOnRightSide="0" angleReference="absolute" angleRotationOnRightSide="co" distance="-1.0" frozen="true" placement="anywhere" side="anywhere" sideReference="relative_to_edge_flow"/></y:EdgeLabel>
|
||||
<y:BendStyle smoothed="false"/>
|
||||
</y:PolyLineEdge>
|
||||
</data>
|
||||
</edge>
|
||||
</graph>
|
||||
<data key="d7">
|
||||
<y:Resources/>
|
||||
</data>
|
||||
</graphml>
|
BIN
satrs-book/src/images/event_man_arch.png
Normal file
BIN
satrs-book/src/images/event_man_arch.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 70 KiB |
23
satrs-book/src/introduction.md
Normal file
23
satrs-book/src/introduction.md
Normal file
@ -0,0 +1,23 @@
|
||||
The sat-rs book
|
||||
======
|
||||
|
||||
This book is the primary information resource for the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
|
||||
in addition to the regular API documentation. It contains the following resources:
|
||||
|
||||
1. Architecture informations and consideration which would exceeds the scope of the regular API.
|
||||
2. General information on how to build On-Board Software and how `sat-rs` can help to fulfill
|
||||
the unique requirements of writing software for remote systems.
|
||||
2. A Getting-Started workshop where a small On-Board Software is built from scratch using
|
||||
sat-rs components.
|
||||
|
||||
# Introduction
|
||||
|
||||
The primary goal of the sat-rs framework is to provide re-usable components
|
||||
to write on-board software for remote systems like rovers or satellites. It is specifically written
|
||||
for the special requirements for these systems.
|
||||
|
||||
A lot of the architecture and general design considerations are based on the
|
||||
[FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage
|
||||
through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
|
||||
and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/).
|
||||
|
1
satrs-book/src/logging.md
Normal file
1
satrs-book/src/logging.md
Normal file
@ -0,0 +1 @@
|
||||
# Logging
|
1
satrs-book/src/modelling-space-systems.md
Normal file
1
satrs-book/src/modelling-space-systems.md
Normal file
@ -0,0 +1 @@
|
||||
# Modelling Space Systems
|
102
satrs-book/src/modes-and-health.md
Normal file
102
satrs-book/src/modes-and-health.md
Normal file
@ -0,0 +1,102 @@
|
||||
# Modes
|
||||
|
||||
Modes are an extremely useful concept for complex system in general. They also allow simplified
|
||||
system reasoning for both system operators and OBSW developers. They model the behaviour of a
|
||||
component and also provide observability of a system. A few examples of how to model
|
||||
different components of a space system with modes will be given.
|
||||
|
||||
## Modelling a pyhsical devices with modes
|
||||
|
||||
The following simple mode scheme with the following three mode
|
||||
|
||||
- `OFF`
|
||||
- `ON`
|
||||
- `NORMAL`
|
||||
|
||||
can be applied to a large number of simpler devices of a remote system, for example sensors.
|
||||
|
||||
1. `OFF` means that a device is physically switched off, and the corresponding software component
|
||||
does not poll the device regularly.
|
||||
2. `ON` means that a device is pyhsically switched on, but the device is not polled perically.
|
||||
3. `NORMAL` means that a device is powered on and polled periodically.
|
||||
|
||||
If a devices is `OFF`, the device handler will deny commands which include physical communication
|
||||
with the connected devices. In `NORMAL` mode, it will autonomously perform periodic polling
|
||||
of a connected physical device in addition to handling remote commands by the operator.
|
||||
Using these three basic modes, there are two important transitions which need to be taken care of
|
||||
for the majority of devices:
|
||||
|
||||
1. `OFF` to `ON` or `NORMAL`: The device first needs to be powered on. After that, the
|
||||
device initial startup configuration must be performed.
|
||||
2. `NORMAL` or `ON` to `OFF`: Any important shutdown configuration or handling must be performed
|
||||
before powering off the device.
|
||||
|
||||
## Modelling a controller with modes
|
||||
|
||||
Controller components are not modelling physical devices, but a mode scheme is still the best
|
||||
way to model most of these components.
|
||||
|
||||
For example, a hypothetical attitude controller might have the following modes:
|
||||
|
||||
- `SAFE`
|
||||
- `TARGET IDLE`
|
||||
- `TARGET POINTING GROUND`
|
||||
- `TARGET POINTING NADIR`
|
||||
|
||||
We can also introduce the concept of submodes: The `SAFE` mode can for example have a
|
||||
`DEFAULT` submode and a `DETUMBLE` submode.
|
||||
|
||||
## Achieving system observability with modes
|
||||
|
||||
If a system component has a mode in some shape or form, this mode should be observable. This means
|
||||
that the operator can also retrieve the mode for a particular component. This is especially
|
||||
important if these components can change their mode autonomously.
|
||||
|
||||
If a component is able to change its mode autonomously, this is also something which is relevant
|
||||
information for the operator or for other software components. This means that a component
|
||||
should also be able to announce its mode.
|
||||
|
||||
This concept becomes especially important when applying the mode concept on the whole
|
||||
system level. This will also be explained in detail in a dedicated chapter, but the basic idea
|
||||
is to model the whole system as a tree where each node has a mode. A new capability is added now:
|
||||
A component can announce its mode recursively. This means that the component will announce its
|
||||
own mode first before announcing the mode of all its children. Using a scheme like this, the mode
|
||||
of the whole system can be retrieved using only one command. The same concept can also be used
|
||||
for commanding the whole system, which will be explained in more detail in the dedicated systems
|
||||
modelling chapter.
|
||||
|
||||
In summary, a component which has modes has to expose the following 4 capabilities:
|
||||
|
||||
1. Set a mode
|
||||
2. Read the mode
|
||||
3. Announce the mode
|
||||
4. Announce the mode recursively
|
||||
|
||||
## Using ECSS PUS to perform mode commanding
|
||||
|
||||
# Health
|
||||
|
||||
Health is an important concept for systems and components which might fail.
|
||||
Oftentimes, the health is tied to the mode of a system component in some shape or form, and
|
||||
determines whether a system component is usable. Health is also an extremely useful concept
|
||||
to simplify the Fault Detection, Isolation and Recovery (FDIR) concept of a system.
|
||||
|
||||
The following health states are based on the ones used inside the FSFW and are enough to model most
|
||||
use-cases:
|
||||
|
||||
- `HEALTHY`
|
||||
- `FAULTY`
|
||||
- `NEEDS RECOVERY`
|
||||
- `EXTERNAL CONTROL`
|
||||
|
||||
1. `HEALTHY` means that a component is working nominally, and can perform its task without any issues.
|
||||
2. `FAULTY` means that a component does not work properly. This might also impact other system
|
||||
components, so the passivation and isolation of that component is desirable for FDIR purposes.
|
||||
3. `NEEDS RECOVERY` is used to attempt a recovery of a component. For example, a simple sensor
|
||||
could be power-cycled if there were multiple communication issues in the last time.
|
||||
4. `EXTERNAL CONTROL` is used to isolate an individual component from the rest of the system. For
|
||||
example, on operator might be interested in testing a component in isolation, and the interference
|
||||
of the system is not desired. In that case, the `EXTERNAL CONTROL` health state might be used
|
||||
to prevent mode commands from the system while allowing external mode commands.
|
||||
|
||||
|
1
satrs-book/src/persistent-tm-storage.md
Normal file
1
satrs-book/src/persistent-tm-storage.md
Normal file
@ -0,0 +1 @@
|
||||
# Persistent Telemetry (TM) Storage
|
1
satrs-book/src/power.md
Normal file
1
satrs-book/src/power.md
Normal file
@ -0,0 +1 @@
|
||||
# Power Components
|
1
satrs-book/src/serialization.md
Normal file
1
satrs-book/src/serialization.md
Normal file
@ -0,0 +1 @@
|
||||
# Serialization
|
1
satrs-book/src/thermal.md
Normal file
1
satrs-book/src/thermal.md
Normal file
@ -0,0 +1 @@
|
||||
# Thermal Components
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "satrs-core"
|
||||
version = "0.1.0-alpha.0"
|
||||
version = "0.1.0-alpha.1"
|
||||
edition = "2021"
|
||||
rust-version = "1.61"
|
||||
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
|
||||
@ -15,12 +15,19 @@ categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-sup
|
||||
[dependencies]
|
||||
delegate = ">0.7, <=0.10"
|
||||
paste = "1"
|
||||
# TODO: Remove this as soon as the image including the description was moved to the satrs-book.
|
||||
embed-doc-image = "0.1"
|
||||
|
||||
[dependencies.smallvec]
|
||||
version = "1"
|
||||
|
||||
[dependencies.num_enum]
|
||||
version = ">0.5, <=0.7"
|
||||
default-features = false
|
||||
|
||||
[dependencies.crc]
|
||||
version = "3"
|
||||
|
||||
[dependencies.dyn-clone]
|
||||
version = "1"
|
||||
optional = true
|
||||
@ -60,12 +67,22 @@ version = "1"
|
||||
default-features = false
|
||||
optional = true
|
||||
|
||||
[dependencies.socket2]
|
||||
version = "0.5.4"
|
||||
features = ["all"]
|
||||
optional = true
|
||||
|
||||
[dependencies.spacepackets]
|
||||
version = "0.7.0-beta.1"
|
||||
# path = "../../spacepackets"
|
||||
version = "0.7.0-beta.2"
|
||||
default-features = false
|
||||
# git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
|
||||
# rev = ""
|
||||
# rev = "79d26e1a6"
|
||||
# branch = ""
|
||||
|
||||
[dependencies.cobs]
|
||||
git = "https://github.com/robamu/cobs.rs.git"
|
||||
version = "0.2.3"
|
||||
branch = "all_features"
|
||||
default-features = false
|
||||
|
||||
[dev-dependencies]
|
||||
@ -73,6 +90,7 @@ serde = "1"
|
||||
zerocopy = "0.7"
|
||||
once_cell = "1.13"
|
||||
serde_json = "1"
|
||||
rand = "0.8"
|
||||
|
||||
[dev-dependencies.postcard]
|
||||
version = "1"
|
||||
@ -80,22 +98,23 @@ version = "1"
|
||||
[features]
|
||||
default = ["std"]
|
||||
std = [
|
||||
"downcast-rs/std",
|
||||
"alloc",
|
||||
"bus",
|
||||
"postcard/use-std",
|
||||
"crossbeam-channel/std",
|
||||
"serde/std",
|
||||
"spacepackets/std",
|
||||
"num_enum/std",
|
||||
"thiserror"
|
||||
"downcast-rs/std",
|
||||
"alloc",
|
||||
"bus",
|
||||
"postcard/use-std",
|
||||
"crossbeam-channel/std",
|
||||
"serde/std",
|
||||
"spacepackets/std",
|
||||
"num_enum/std",
|
||||
"thiserror",
|
||||
"socket2"
|
||||
]
|
||||
alloc = [
|
||||
"serde/alloc",
|
||||
"spacepackets/alloc",
|
||||
"hashbrown",
|
||||
"dyn-clone",
|
||||
"downcast-rs"
|
||||
"serde/alloc",
|
||||
"spacepackets/alloc",
|
||||
"hashbrown",
|
||||
"dyn-clone",
|
||||
"downcast-rs"
|
||||
]
|
||||
serde = ["dep:serde", "spacepackets/serde"]
|
||||
crossbeam = ["crossbeam-channel"]
|
||||
|
@ -4,7 +4,7 @@ Checklist for new releases
|
||||
# Pre-Release
|
||||
|
||||
1. Make sure any new modules are documented sufficiently enough and check docs with
|
||||
`cargo doc --all-features --open`.
|
||||
`cargo +nightly doc --all-features --config 'rustdocflags=["--cfg", "doc_cfg"]' --open`.
|
||||
2. Bump version specifier in `Cargo.toml`.
|
||||
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
|
||||
`unreleased` section.
|
||||
@ -19,4 +19,7 @@ Checklist for new releases
|
||||
|
||||
# Post-Release
|
||||
|
||||
1. Create a new release on `EGit` based on the release branch.
|
||||
1. Create a new annotaged tag and push it with `git tag -a satrs-core-<version>` and
|
||||
`git push -u origin satrs-core-<version>`
|
||||
2. Create a new release on `EGit` based on the tag.
|
||||
|
||||
|
871
satrs-core/src/cfdp/dest.rs
Normal file
871
satrs-core/src/cfdp/dest.rs
Normal file
@ -0,0 +1,871 @@
|
||||
use core::str::{from_utf8, Utf8Error};
|
||||
use std::{
|
||||
fs::{metadata, File},
|
||||
io::{BufReader, Read, Seek, SeekFrom, Write},
|
||||
path::{Path, PathBuf},
|
||||
};
|
||||
|
||||
use crate::cfdp::user::TransactionFinishedParams;
|
||||
|
||||
use super::{
|
||||
user::{CfdpUser, MetadataReceivedParams},
|
||||
PacketInfo, PacketTarget, State, TransactionId, TransactionStep, CRC_32,
|
||||
};
|
||||
use smallvec::SmallVec;
|
||||
use spacepackets::{
|
||||
cfdp::{
|
||||
pdu::{
|
||||
eof::EofPdu,
|
||||
file_data::FileDataPdu,
|
||||
finished::{DeliveryCode, FileStatus, FinishedPdu},
|
||||
metadata::{MetadataGenericParams, MetadataPdu},
|
||||
CommonPduConfig, FileDirectiveType, PduError, PduHeader,
|
||||
},
|
||||
tlv::{msg_to_user::MsgToUserTlv, EntityIdTlv, TlvType},
|
||||
ConditionCode, PduType, TransmissionMode,
|
||||
},
|
||||
util::UnsignedByteField,
|
||||
};
|
||||
use thiserror::Error;
|
||||
|
||||
pub struct DestinationHandler {
|
||||
id: UnsignedByteField,
|
||||
step: TransactionStep,
|
||||
state: State,
|
||||
tparams: TransactionParams,
|
||||
packets_to_send_ctx: PacketsToSendContext,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct PacketsToSendContext {
|
||||
packet_available: bool,
|
||||
directive: Option<FileDirectiveType>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct FileProperties {
|
||||
src_file_name: [u8; u8::MAX as usize],
|
||||
src_file_name_len: usize,
|
||||
dest_file_name: [u8; u8::MAX as usize],
|
||||
dest_file_name_len: usize,
|
||||
dest_path_buf: PathBuf,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct TransferState {
|
||||
transaction_id: Option<TransactionId>,
|
||||
progress: usize,
|
||||
condition_code: ConditionCode,
|
||||
delivery_code: DeliveryCode,
|
||||
file_status: FileStatus,
|
||||
metadata_params: MetadataGenericParams,
|
||||
}
|
||||
|
||||
impl Default for TransferState {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
transaction_id: None,
|
||||
progress: Default::default(),
|
||||
condition_code: ConditionCode::NoError,
|
||||
delivery_code: DeliveryCode::Incomplete,
|
||||
file_status: FileStatus::Unreported,
|
||||
metadata_params: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
#[derive(Debug)]
|
||||
struct TransactionParams {
|
||||
tstate: TransferState,
|
||||
pdu_conf: CommonPduConfig,
|
||||
file_properties: FileProperties,
|
||||
cksum_buf: [u8; 1024],
|
||||
msgs_to_user_size: usize,
|
||||
msgs_to_user_buf: [u8; 1024],
|
||||
}
|
||||
|
||||
impl Default for FileProperties {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
src_file_name: [0; u8::MAX as usize],
|
||||
src_file_name_len: Default::default(),
|
||||
dest_file_name: [0; u8::MAX as usize],
|
||||
dest_file_name_len: Default::default(),
|
||||
dest_path_buf: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionParams {
|
||||
fn file_size(&self) -> usize {
|
||||
self.tstate.metadata_params.file_size as usize
|
||||
}
|
||||
|
||||
fn metadata_params(&self) -> &MetadataGenericParams {
|
||||
&self.tstate.metadata_params
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TransactionParams {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
pdu_conf: Default::default(),
|
||||
cksum_buf: [0; 1024],
|
||||
msgs_to_user_size: 0,
|
||||
msgs_to_user_buf: [0; 1024],
|
||||
tstate: Default::default(),
|
||||
file_properties: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionParams {
|
||||
fn reset(&mut self) {
|
||||
self.tstate.condition_code = ConditionCode::NoError;
|
||||
self.tstate.delivery_code = DeliveryCode::Incomplete;
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum DestError {
|
||||
/// File directive expected, but none specified
|
||||
#[error("expected file directive")]
|
||||
DirectiveExpected,
|
||||
#[error("can not process packet type {0:?}")]
|
||||
CantProcessPacketType(FileDirectiveType),
|
||||
#[error("can not process file data PDUs in current state")]
|
||||
WrongStateForFileDataAndEof,
|
||||
// Received new metadata PDU while being already being busy with a file transfer.
|
||||
#[error("busy with transfer")]
|
||||
RecvdMetadataButIsBusy,
|
||||
#[error("empty source file field")]
|
||||
EmptySrcFileField,
|
||||
#[error("empty dest file field")]
|
||||
EmptyDestFileField,
|
||||
#[error("pdu error {0}")]
|
||||
Pdu(#[from] PduError),
|
||||
#[error("io error {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("path conversion error {0}")]
|
||||
PathConversion(#[from] Utf8Error),
|
||||
#[error("error building dest path from source file name and dest folder")]
|
||||
PathConcatError,
|
||||
}
|
||||
|
||||
impl DestinationHandler {
|
||||
pub fn new(id: impl Into<UnsignedByteField>) -> Self {
|
||||
Self {
|
||||
id: id.into(),
|
||||
step: TransactionStep::Idle,
|
||||
state: State::Idle,
|
||||
tparams: Default::default(),
|
||||
packets_to_send_ctx: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn state_machine(&mut self, cfdp_user: &mut impl CfdpUser) -> Result<(), DestError> {
|
||||
match self.state {
|
||||
State::Idle => todo!(),
|
||||
State::BusyClass1Nacked => self.fsm_nacked(cfdp_user),
|
||||
State::BusyClass2Acked => todo!("acknowledged mode not implemented yet"),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert_packet(&mut self, packet_info: &PacketInfo) -> Result<(), DestError> {
|
||||
if packet_info.target() != PacketTarget::DestEntity {
|
||||
// Unwrap is okay here, a PacketInfo for a file data PDU should always have the
|
||||
// destination as the target.
|
||||
return Err(DestError::CantProcessPacketType(
|
||||
packet_info.pdu_directive().unwrap(),
|
||||
));
|
||||
}
|
||||
match packet_info.pdu_type {
|
||||
PduType::FileDirective => {
|
||||
if packet_info.pdu_directive.is_none() {
|
||||
return Err(DestError::DirectiveExpected);
|
||||
}
|
||||
self.handle_file_directive(
|
||||
packet_info.pdu_directive.unwrap(),
|
||||
packet_info.raw_packet,
|
||||
)
|
||||
}
|
||||
PduType::FileData => self.handle_file_data(packet_info.raw_packet),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn packet_to_send_ready(&self) -> bool {
|
||||
self.packets_to_send_ctx.packet_available
|
||||
}
|
||||
|
||||
pub fn get_next_packet_to_send(
|
||||
&self,
|
||||
buf: &mut [u8],
|
||||
) -> Result<Option<(FileDirectiveType, usize)>, DestError> {
|
||||
if !self.packet_to_send_ready() {
|
||||
return Ok(None);
|
||||
}
|
||||
let directive = self.packets_to_send_ctx.directive.unwrap();
|
||||
let written_size = match directive {
|
||||
FileDirectiveType::FinishedPdu => {
|
||||
let pdu_header = PduHeader::new_no_file_data(self.tparams.pdu_conf, 0);
|
||||
let finished_pdu = if self.tparams.tstate.condition_code == ConditionCode::NoError
|
||||
|| self.tparams.tstate.condition_code == ConditionCode::UnsupportedChecksumType
|
||||
{
|
||||
FinishedPdu::new_default(
|
||||
pdu_header,
|
||||
self.tparams.tstate.delivery_code,
|
||||
self.tparams.tstate.file_status,
|
||||
)
|
||||
} else {
|
||||
// TODO: Are there cases where this ID is actually the source entity ID?
|
||||
let entity_id = EntityIdTlv::new(self.id);
|
||||
FinishedPdu::new_with_error(
|
||||
pdu_header,
|
||||
self.tparams.tstate.condition_code,
|
||||
self.tparams.tstate.delivery_code,
|
||||
self.tparams.tstate.file_status,
|
||||
entity_id,
|
||||
)
|
||||
};
|
||||
finished_pdu.write_to_bytes(buf)?
|
||||
}
|
||||
FileDirectiveType::AckPdu => todo!(),
|
||||
FileDirectiveType::NakPdu => todo!(),
|
||||
FileDirectiveType::KeepAlivePdu => todo!(),
|
||||
_ => {
|
||||
// This should never happen and is considered an internal impl error
|
||||
panic!("invalid file directive {directive:?} for dest handler send packet");
|
||||
}
|
||||
};
|
||||
Ok(Some((directive, written_size)))
|
||||
}
|
||||
|
||||
pub fn handle_file_directive(
|
||||
&mut self,
|
||||
pdu_directive: FileDirectiveType,
|
||||
raw_packet: &[u8],
|
||||
) -> Result<(), DestError> {
|
||||
match pdu_directive {
|
||||
FileDirectiveType::EofPdu => self.handle_eof_pdu(raw_packet)?,
|
||||
FileDirectiveType::FinishedPdu
|
||||
| FileDirectiveType::NakPdu
|
||||
| FileDirectiveType::KeepAlivePdu => {
|
||||
return Err(DestError::CantProcessPacketType(pdu_directive));
|
||||
}
|
||||
FileDirectiveType::AckPdu => {
|
||||
todo!(
|
||||
"check whether ACK pdu handling is applicable by checking the acked directive field"
|
||||
)
|
||||
}
|
||||
FileDirectiveType::MetadataPdu => self.handle_metadata_pdu(raw_packet)?,
|
||||
FileDirectiveType::PromptPdu => self.handle_prompt_pdu(raw_packet)?,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn handle_metadata_pdu(&mut self, raw_packet: &[u8]) -> Result<(), DestError> {
|
||||
if self.state != State::Idle {
|
||||
return Err(DestError::RecvdMetadataButIsBusy);
|
||||
}
|
||||
let metadata_pdu = MetadataPdu::from_bytes(raw_packet)?;
|
||||
self.tparams.reset();
|
||||
self.tparams.tstate.metadata_params = *metadata_pdu.metadata_params();
|
||||
let src_name = metadata_pdu.src_file_name();
|
||||
if src_name.is_empty() {
|
||||
return Err(DestError::EmptySrcFileField);
|
||||
}
|
||||
self.tparams.file_properties.src_file_name[..src_name.len_value()]
|
||||
.copy_from_slice(src_name.value());
|
||||
self.tparams.file_properties.src_file_name_len = src_name.len_value();
|
||||
let dest_name = metadata_pdu.dest_file_name();
|
||||
if dest_name.is_empty() {
|
||||
return Err(DestError::EmptyDestFileField);
|
||||
}
|
||||
self.tparams.file_properties.dest_file_name[..dest_name.len_value()]
|
||||
.copy_from_slice(dest_name.value());
|
||||
self.tparams.file_properties.dest_file_name_len = dest_name.len_value();
|
||||
self.tparams.pdu_conf = *metadata_pdu.pdu_header().common_pdu_conf();
|
||||
self.tparams.msgs_to_user_size = 0;
|
||||
if metadata_pdu.options().is_some() {
|
||||
for option_tlv in metadata_pdu.options_iter().unwrap() {
|
||||
if option_tlv.is_standard_tlv()
|
||||
&& option_tlv.tlv_type().unwrap() == TlvType::MsgToUser
|
||||
{
|
||||
self.tparams
|
||||
.msgs_to_user_buf
|
||||
.copy_from_slice(option_tlv.raw_data().unwrap());
|
||||
self.tparams.msgs_to_user_size += option_tlv.len_full();
|
||||
}
|
||||
}
|
||||
}
|
||||
if self.tparams.pdu_conf.trans_mode == TransmissionMode::Unacknowledged {
|
||||
self.state = State::BusyClass1Nacked;
|
||||
} else {
|
||||
self.state = State::BusyClass2Acked;
|
||||
}
|
||||
self.step = TransactionStep::TransactionStart;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn handle_file_data(&mut self, raw_packet: &[u8]) -> Result<(), DestError> {
|
||||
if self.state == State::Idle || self.step != TransactionStep::ReceivingFileDataPdus {
|
||||
return Err(DestError::WrongStateForFileDataAndEof);
|
||||
}
|
||||
let fd_pdu = FileDataPdu::from_bytes(raw_packet)?;
|
||||
let mut dest_file = File::options()
|
||||
.write(true)
|
||||
.open(&self.tparams.file_properties.dest_path_buf)?;
|
||||
dest_file.seek(SeekFrom::Start(fd_pdu.offset()))?;
|
||||
dest_file.write_all(fd_pdu.file_data())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_if)]
|
||||
pub fn handle_eof_pdu(&mut self, raw_packet: &[u8]) -> Result<(), DestError> {
|
||||
if self.state == State::Idle || self.step != TransactionStep::ReceivingFileDataPdus {
|
||||
return Err(DestError::WrongStateForFileDataAndEof);
|
||||
}
|
||||
let eof_pdu = EofPdu::from_bytes(raw_packet)?;
|
||||
let checksum = eof_pdu.file_checksum();
|
||||
// For a standard disk based file system, which is assumed to be used for now, the file
|
||||
// will always be retained. This might change in the future.
|
||||
self.tparams.tstate.file_status = FileStatus::Retained;
|
||||
if self.checksum_check(checksum)? {
|
||||
self.tparams.tstate.condition_code = ConditionCode::NoError;
|
||||
self.tparams.tstate.delivery_code = DeliveryCode::Complete;
|
||||
} else {
|
||||
self.tparams.tstate.condition_code = ConditionCode::FileChecksumFailure;
|
||||
}
|
||||
// TODO: Check progress, and implement transfer completion timer as specified in the
|
||||
// standard. This timer protects against out of order arrival of packets.
|
||||
if self.tparams.tstate.progress != self.tparams.file_size() {}
|
||||
if self.state == State::BusyClass1Nacked {
|
||||
self.step = TransactionStep::TransferCompletion;
|
||||
} else {
|
||||
self.step = TransactionStep::SendingAckPdu;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn handle_prompt_pdu(&mut self, _raw_packet: &[u8]) -> Result<(), DestError> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
fn checksum_check(&mut self, expected_checksum: u32) -> Result<bool, DestError> {
|
||||
let mut digest = CRC_32.digest();
|
||||
let file_to_check = File::open(&self.tparams.file_properties.dest_path_buf)?;
|
||||
let mut buf_reader = BufReader::new(file_to_check);
|
||||
loop {
|
||||
let bytes_read = buf_reader.read(&mut self.tparams.cksum_buf)?;
|
||||
if bytes_read == 0 {
|
||||
break;
|
||||
}
|
||||
digest.update(&self.tparams.cksum_buf[0..bytes_read]);
|
||||
}
|
||||
if digest.finalize() == expected_checksum {
|
||||
return Ok(true);
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
fn fsm_nacked(&mut self, cfdp_user: &mut impl CfdpUser) -> Result<(), DestError> {
|
||||
if self.step == TransactionStep::TransactionStart {
|
||||
self.transaction_start(cfdp_user)?;
|
||||
}
|
||||
if self.step == TransactionStep::TransferCompletion {
|
||||
self.transfer_completion(cfdp_user)?;
|
||||
}
|
||||
if self.step == TransactionStep::SendingAckPdu {
|
||||
todo!("no support for acknowledged mode yet");
|
||||
}
|
||||
if self.step == TransactionStep::SendingFinishedPdu {
|
||||
self.reset();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the step, which denotes the exact step of a pending CFDP transaction when applicable.
|
||||
pub fn step(&self) -> TransactionStep {
|
||||
self.step
|
||||
}
|
||||
|
||||
/// Get the step, which denotes whether the CFDP handler is active, and which CFDP class
|
||||
/// is used if it is active.
|
||||
pub fn state(&self) -> State {
|
||||
self.state
|
||||
}
|
||||
|
||||
fn transaction_start(&mut self, cfdp_user: &mut impl CfdpUser) -> Result<(), DestError> {
|
||||
let dest_name = from_utf8(
|
||||
&self.tparams.file_properties.dest_file_name
|
||||
[..self.tparams.file_properties.dest_file_name_len],
|
||||
)?;
|
||||
let dest_path = Path::new(dest_name);
|
||||
self.tparams.file_properties.dest_path_buf = dest_path.to_path_buf();
|
||||
let source_id = self.tparams.pdu_conf.source_id();
|
||||
let id = TransactionId::new(source_id, self.tparams.pdu_conf.transaction_seq_num);
|
||||
let src_name = from_utf8(
|
||||
&self.tparams.file_properties.src_file_name
|
||||
[0..self.tparams.file_properties.src_file_name_len],
|
||||
)?;
|
||||
let mut msgs_to_user = SmallVec::<[MsgToUserTlv<'_>; 16]>::new();
|
||||
let mut num_msgs_to_user = 0;
|
||||
if self.tparams.msgs_to_user_size > 0 {
|
||||
let mut index = 0;
|
||||
while index < self.tparams.msgs_to_user_size {
|
||||
// This should never panic as the validity of the options was checked beforehand.
|
||||
let msgs_to_user_tlv =
|
||||
MsgToUserTlv::from_bytes(&self.tparams.msgs_to_user_buf[index..])
|
||||
.expect("message to user creation failed unexpectedly");
|
||||
msgs_to_user.push(msgs_to_user_tlv);
|
||||
index += msgs_to_user_tlv.len_full();
|
||||
num_msgs_to_user += 1;
|
||||
}
|
||||
}
|
||||
let metadata_recvd_params = MetadataReceivedParams {
|
||||
id,
|
||||
source_id,
|
||||
file_size: self.tparams.tstate.metadata_params.file_size,
|
||||
src_file_name: src_name,
|
||||
dest_file_name: dest_name,
|
||||
msgs_to_user: &msgs_to_user[..num_msgs_to_user],
|
||||
};
|
||||
self.tparams.tstate.transaction_id = Some(id);
|
||||
cfdp_user.metadata_recvd_indication(&metadata_recvd_params);
|
||||
|
||||
if dest_path.exists() {
|
||||
let dest_metadata = metadata(dest_path)?;
|
||||
if dest_metadata.is_dir() {
|
||||
// Create new destination path by concatenating the last part of the source source
|
||||
// name and the destination folder. For example, for a source file of /tmp/hello.txt
|
||||
// and a destination name of /home/test, the resulting file name should be
|
||||
// /home/test/hello.txt
|
||||
let source_path = Path::new(from_utf8(
|
||||
&self.tparams.file_properties.src_file_name
|
||||
[..self.tparams.file_properties.src_file_name_len],
|
||||
)?);
|
||||
|
||||
let source_name = source_path.file_name();
|
||||
if source_name.is_none() {
|
||||
return Err(DestError::PathConcatError);
|
||||
}
|
||||
let source_name = source_name.unwrap();
|
||||
self.tparams.file_properties.dest_path_buf.push(source_name);
|
||||
}
|
||||
}
|
||||
// This function does exactly what we require: Create a new file if it does not exist yet
|
||||
// and trucate an existing one.
|
||||
File::create(&self.tparams.file_properties.dest_path_buf)?;
|
||||
self.step = TransactionStep::ReceivingFileDataPdus;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn transfer_completion(&mut self, cfdp_user: &mut impl CfdpUser) -> Result<(), DestError> {
|
||||
let transaction_finished_params = TransactionFinishedParams {
|
||||
id: self.tparams.tstate.transaction_id.unwrap(),
|
||||
condition_code: self.tparams.tstate.condition_code,
|
||||
delivery_code: self.tparams.tstate.delivery_code,
|
||||
file_status: self.tparams.tstate.file_status,
|
||||
};
|
||||
cfdp_user.transaction_finished_indication(&transaction_finished_params);
|
||||
// This function should never be called with metadata parameters not set
|
||||
if self.tparams.metadata_params().closure_requested {
|
||||
self.prepare_finished_pdu()?;
|
||||
self.step = TransactionStep::SendingFinishedPdu;
|
||||
} else {
|
||||
self.reset();
|
||||
self.state = State::Idle;
|
||||
self.step = TransactionStep::Idle;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn reset(&mut self) {
|
||||
self.step = TransactionStep::Idle;
|
||||
self.state = State::Idle;
|
||||
self.packets_to_send_ctx.packet_available = false;
|
||||
self.tparams.reset();
|
||||
}
|
||||
|
||||
fn prepare_finished_pdu(&mut self) -> Result<(), DestError> {
|
||||
self.packets_to_send_ctx.packet_available = true;
|
||||
self.packets_to_send_ctx.directive = Some(FileDirectiveType::FinishedPdu);
|
||||
self.step = TransactionStep::SendingFinishedPdu;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::sync::atomic::{AtomicU8, Ordering};
|
||||
#[allow(unused_imports)]
|
||||
use std::println;
|
||||
use std::{env::temp_dir, fs};
|
||||
|
||||
use alloc::{format, string::String};
|
||||
use rand::Rng;
|
||||
use spacepackets::{
|
||||
cfdp::{lv::Lv, ChecksumType},
|
||||
util::{UbfU16, UnsignedByteFieldU16},
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
const LOCAL_ID: UnsignedByteFieldU16 = UnsignedByteFieldU16::new(1);
|
||||
const REMOTE_ID: UnsignedByteFieldU16 = UnsignedByteFieldU16::new(2);
|
||||
|
||||
const SRC_NAME: &str = "__cfdp__source-file";
|
||||
const DEST_NAME: &str = "__cfdp__dest-file";
|
||||
|
||||
static ATOMIC_COUNTER: AtomicU8 = AtomicU8::new(0);
|
||||
|
||||
#[derive(Default)]
|
||||
struct TestCfdpUser {
|
||||
next_expected_seq_num: u64,
|
||||
expected_full_src_name: String,
|
||||
expected_full_dest_name: String,
|
||||
expected_file_size: usize,
|
||||
}
|
||||
|
||||
impl TestCfdpUser {
|
||||
fn generic_id_check(&self, id: &crate::cfdp::TransactionId) {
|
||||
assert_eq!(id.source_id, LOCAL_ID.into());
|
||||
assert_eq!(id.seq_num().value(), self.next_expected_seq_num);
|
||||
}
|
||||
}
|
||||
|
||||
impl CfdpUser for TestCfdpUser {
|
||||
fn transaction_indication(&mut self, id: &crate::cfdp::TransactionId) {
|
||||
self.generic_id_check(id);
|
||||
}
|
||||
|
||||
fn eof_sent_indication(&mut self, id: &crate::cfdp::TransactionId) {
|
||||
self.generic_id_check(id);
|
||||
}
|
||||
|
||||
fn transaction_finished_indication(
|
||||
&mut self,
|
||||
finished_params: &crate::cfdp::user::TransactionFinishedParams,
|
||||
) {
|
||||
self.generic_id_check(&finished_params.id);
|
||||
}
|
||||
|
||||
fn metadata_recvd_indication(
|
||||
&mut self,
|
||||
md_recvd_params: &crate::cfdp::user::MetadataReceivedParams,
|
||||
) {
|
||||
self.generic_id_check(&md_recvd_params.id);
|
||||
assert_eq!(
|
||||
String::from(md_recvd_params.src_file_name),
|
||||
self.expected_full_src_name
|
||||
);
|
||||
assert_eq!(
|
||||
String::from(md_recvd_params.dest_file_name),
|
||||
self.expected_full_dest_name
|
||||
);
|
||||
assert_eq!(md_recvd_params.msgs_to_user.len(), 0);
|
||||
assert_eq!(md_recvd_params.source_id, LOCAL_ID.into());
|
||||
assert_eq!(md_recvd_params.file_size as usize, self.expected_file_size);
|
||||
}
|
||||
|
||||
fn file_segment_recvd_indication(
|
||||
&mut self,
|
||||
_segment_recvd_params: &crate::cfdp::user::FileSegmentRecvdParams,
|
||||
) {
|
||||
}
|
||||
|
||||
fn report_indication(&mut self, _id: &crate::cfdp::TransactionId) {}
|
||||
|
||||
fn suspended_indication(
|
||||
&mut self,
|
||||
_id: &crate::cfdp::TransactionId,
|
||||
_condition_code: ConditionCode,
|
||||
) {
|
||||
panic!("unexpected suspended indication");
|
||||
}
|
||||
|
||||
fn resumed_indication(&mut self, _id: &crate::cfdp::TransactionId, _progresss: u64) {}
|
||||
|
||||
fn fault_indication(
|
||||
&mut self,
|
||||
_id: &crate::cfdp::TransactionId,
|
||||
_condition_code: ConditionCode,
|
||||
_progress: u64,
|
||||
) {
|
||||
panic!("unexpected fault indication");
|
||||
}
|
||||
|
||||
fn abandoned_indication(
|
||||
&mut self,
|
||||
_id: &crate::cfdp::TransactionId,
|
||||
_condition_code: ConditionCode,
|
||||
_progress: u64,
|
||||
) {
|
||||
panic!("unexpected abandoned indication");
|
||||
}
|
||||
|
||||
fn eof_recvd_indication(&mut self, id: &crate::cfdp::TransactionId) {
|
||||
self.generic_id_check(id);
|
||||
}
|
||||
}
|
||||
|
||||
fn init_check(handler: &DestinationHandler) {
|
||||
assert_eq!(handler.state(), State::Idle);
|
||||
assert_eq!(handler.step(), TransactionStep::Idle);
|
||||
}
|
||||
|
||||
fn init_full_filenames() -> (PathBuf, PathBuf) {
|
||||
let mut file_path = temp_dir();
|
||||
let mut src_path = file_path.clone();
|
||||
// Atomic counter used to allow concurrent tests.
|
||||
let unique_counter = ATOMIC_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
// Create unique test filenames.
|
||||
let src_name_unique = format!("{SRC_NAME}{}.txt", unique_counter);
|
||||
let dest_name_unique = format!("{DEST_NAME}{}.txt", unique_counter);
|
||||
src_path.push(src_name_unique);
|
||||
file_path.push(dest_name_unique);
|
||||
(src_path, file_path)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic() {
|
||||
let dest_handler = DestinationHandler::new(REMOTE_ID);
|
||||
init_check(&dest_handler);
|
||||
}
|
||||
|
||||
fn create_pdu_header(seq_num: impl Into<UnsignedByteField>) -> PduHeader {
|
||||
let mut pdu_conf =
|
||||
CommonPduConfig::new_with_byte_fields(LOCAL_ID, REMOTE_ID, seq_num).unwrap();
|
||||
pdu_conf.trans_mode = TransmissionMode::Unacknowledged;
|
||||
PduHeader::new_no_file_data(pdu_conf, 0)
|
||||
}
|
||||
|
||||
fn create_metadata_pdu<'filename>(
|
||||
pdu_header: &PduHeader,
|
||||
src_name: &'filename Path,
|
||||
dest_name: &'filename Path,
|
||||
file_size: u64,
|
||||
) -> MetadataPdu<'filename, 'filename, 'static> {
|
||||
let metadata_params = MetadataGenericParams::new(false, ChecksumType::Crc32, file_size);
|
||||
MetadataPdu::new(
|
||||
*pdu_header,
|
||||
metadata_params,
|
||||
Lv::new_from_str(src_name.as_os_str().to_str().unwrap()).unwrap(),
|
||||
Lv::new_from_str(dest_name.as_os_str().to_str().unwrap()).unwrap(),
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
fn insert_metadata_pdu(
|
||||
metadata_pdu: &MetadataPdu,
|
||||
buf: &mut [u8],
|
||||
dest_handler: &mut DestinationHandler,
|
||||
) {
|
||||
let written_len = metadata_pdu
|
||||
.write_to_bytes(buf)
|
||||
.expect("writing metadata PDU failed");
|
||||
let packet_info =
|
||||
PacketInfo::new(&buf[..written_len]).expect("generating packet info failed");
|
||||
let insert_result = dest_handler.insert_packet(&packet_info);
|
||||
if let Err(e) = insert_result {
|
||||
panic!("insert result error: {e}");
|
||||
}
|
||||
}
|
||||
|
||||
fn insert_eof_pdu(
|
||||
file_data: &[u8],
|
||||
pdu_header: &PduHeader,
|
||||
buf: &mut [u8],
|
||||
dest_handler: &mut DestinationHandler,
|
||||
) {
|
||||
let mut digest = CRC_32.digest();
|
||||
digest.update(file_data);
|
||||
let crc32 = digest.finalize();
|
||||
let eof_pdu = EofPdu::new_no_error(*pdu_header, crc32, file_data.len() as u64);
|
||||
let result = eof_pdu.write_to_bytes(buf);
|
||||
assert!(result.is_ok());
|
||||
let packet_info = PacketInfo::new(&buf).expect("generating packet info failed");
|
||||
let result = dest_handler.insert_packet(&packet_info);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_file_transfer() {
|
||||
let (src_name, dest_name) = init_full_filenames();
|
||||
assert!(!Path::exists(&dest_name));
|
||||
let mut buf: [u8; 512] = [0; 512];
|
||||
let mut test_user = TestCfdpUser {
|
||||
next_expected_seq_num: 0,
|
||||
expected_full_src_name: src_name.to_string_lossy().into(),
|
||||
expected_full_dest_name: dest_name.to_string_lossy().into(),
|
||||
expected_file_size: 0,
|
||||
};
|
||||
// We treat the destination handler like it is a remote entity.
|
||||
let mut dest_handler = DestinationHandler::new(REMOTE_ID);
|
||||
init_check(&dest_handler);
|
||||
|
||||
let seq_num = UbfU16::new(0);
|
||||
let pdu_header = create_pdu_header(seq_num);
|
||||
let metadata_pdu =
|
||||
create_metadata_pdu(&pdu_header, src_name.as_path(), dest_name.as_path(), 0);
|
||||
insert_metadata_pdu(&metadata_pdu, &mut buf, &mut dest_handler);
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
if let Err(e) = result {
|
||||
panic!("dest handler fsm error: {e}");
|
||||
}
|
||||
assert_ne!(dest_handler.state(), State::Idle);
|
||||
assert_eq!(dest_handler.step(), TransactionStep::ReceivingFileDataPdus);
|
||||
|
||||
insert_eof_pdu(&[], &pdu_header, &mut buf, &mut dest_handler);
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(dest_handler.state(), State::Idle);
|
||||
assert_eq!(dest_handler.step(), TransactionStep::Idle);
|
||||
assert!(Path::exists(&dest_name));
|
||||
let read_content = fs::read(&dest_name).expect("reading back string failed");
|
||||
assert_eq!(read_content.len(), 0);
|
||||
assert!(fs::remove_file(dest_name).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_small_file_transfer() {
|
||||
let (src_name, dest_name) = init_full_filenames();
|
||||
assert!(!Path::exists(&dest_name));
|
||||
let file_data_str = "Hello World!";
|
||||
let file_data = file_data_str.as_bytes();
|
||||
let mut buf: [u8; 512] = [0; 512];
|
||||
let mut test_user = TestCfdpUser {
|
||||
next_expected_seq_num: 0,
|
||||
expected_full_src_name: src_name.to_string_lossy().into(),
|
||||
expected_full_dest_name: dest_name.to_string_lossy().into(),
|
||||
expected_file_size: file_data.len(),
|
||||
};
|
||||
// We treat the destination handler like it is a remote entity.
|
||||
let mut dest_handler = DestinationHandler::new(REMOTE_ID);
|
||||
init_check(&dest_handler);
|
||||
|
||||
let seq_num = UbfU16::new(0);
|
||||
let pdu_header = create_pdu_header(seq_num);
|
||||
let metadata_pdu = create_metadata_pdu(
|
||||
&pdu_header,
|
||||
src_name.as_path(),
|
||||
dest_name.as_path(),
|
||||
file_data.len() as u64,
|
||||
);
|
||||
insert_metadata_pdu(&metadata_pdu, &mut buf, &mut dest_handler);
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
if let Err(e) = result {
|
||||
panic!("dest handler fsm error: {e}");
|
||||
}
|
||||
assert_ne!(dest_handler.state(), State::Idle);
|
||||
assert_eq!(dest_handler.step(), TransactionStep::ReceivingFileDataPdus);
|
||||
|
||||
let offset = 0;
|
||||
let filedata_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, offset, file_data);
|
||||
filedata_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
let result = dest_handler.insert_packet(&packet_info);
|
||||
if let Err(e) = result {
|
||||
panic!("destination handler packet insertion error: {e}");
|
||||
}
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
assert!(result.is_ok());
|
||||
|
||||
insert_eof_pdu(file_data, &pdu_header, &mut buf, &mut dest_handler);
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(dest_handler.state(), State::Idle);
|
||||
assert_eq!(dest_handler.step(), TransactionStep::Idle);
|
||||
|
||||
assert!(Path::exists(&dest_name));
|
||||
let read_content = fs::read_to_string(&dest_name).expect("reading back string failed");
|
||||
assert_eq!(read_content, file_data_str);
|
||||
assert!(fs::remove_file(dest_name).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_segmented_file_transfer() {
|
||||
let (src_name, dest_name) = init_full_filenames();
|
||||
assert!(!Path::exists(&dest_name));
|
||||
let mut rng = rand::thread_rng();
|
||||
let mut random_data = [0u8; 512];
|
||||
rng.fill(&mut random_data);
|
||||
let mut buf: [u8; 512] = [0; 512];
|
||||
let mut test_user = TestCfdpUser {
|
||||
next_expected_seq_num: 0,
|
||||
expected_full_src_name: src_name.to_string_lossy().into(),
|
||||
expected_full_dest_name: dest_name.to_string_lossy().into(),
|
||||
expected_file_size: random_data.len(),
|
||||
};
|
||||
|
||||
// We treat the destination handler like it is a remote entity.
|
||||
let mut dest_handler = DestinationHandler::new(REMOTE_ID);
|
||||
init_check(&dest_handler);
|
||||
|
||||
let seq_num = UbfU16::new(0);
|
||||
let pdu_header = create_pdu_header(seq_num);
|
||||
let metadata_pdu = create_metadata_pdu(
|
||||
&pdu_header,
|
||||
src_name.as_path(),
|
||||
dest_name.as_path(),
|
||||
random_data.len() as u64,
|
||||
);
|
||||
insert_metadata_pdu(&metadata_pdu, &mut buf, &mut dest_handler);
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
if let Err(e) = result {
|
||||
panic!("dest handler fsm error: {e}");
|
||||
}
|
||||
assert_ne!(dest_handler.state(), State::Idle);
|
||||
assert_eq!(dest_handler.step(), TransactionStep::ReceivingFileDataPdus);
|
||||
|
||||
// First file data PDU
|
||||
let mut offset: usize = 0;
|
||||
let segment_len = 256;
|
||||
let filedata_pdu = FileDataPdu::new_no_seg_metadata(
|
||||
pdu_header,
|
||||
offset as u64,
|
||||
&random_data[0..segment_len],
|
||||
);
|
||||
filedata_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
let result = dest_handler.insert_packet(&packet_info);
|
||||
if let Err(e) = result {
|
||||
panic!("destination handler packet insertion error: {e}");
|
||||
}
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
assert!(result.is_ok());
|
||||
|
||||
// Second file data PDU
|
||||
offset += segment_len;
|
||||
let filedata_pdu = FileDataPdu::new_no_seg_metadata(
|
||||
pdu_header,
|
||||
offset as u64,
|
||||
&random_data[segment_len..],
|
||||
);
|
||||
filedata_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
let result = dest_handler.insert_packet(&packet_info);
|
||||
if let Err(e) = result {
|
||||
panic!("destination handler packet insertion error: {e}");
|
||||
}
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
assert!(result.is_ok());
|
||||
|
||||
insert_eof_pdu(&random_data, &pdu_header, &mut buf, &mut dest_handler);
|
||||
let result = dest_handler.state_machine(&mut test_user);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(dest_handler.state(), State::Idle);
|
||||
assert_eq!(dest_handler.step(), TransactionStep::Idle);
|
||||
|
||||
// Clean up
|
||||
assert!(Path::exists(&dest_name));
|
||||
let read_content = fs::read(&dest_name).expect("reading back string failed");
|
||||
assert_eq!(read_content, random_data);
|
||||
assert!(fs::remove_file(dest_name).is_ok());
|
||||
}
|
||||
}
|
320
satrs-core/src/cfdp/mod.rs
Normal file
320
satrs-core/src/cfdp/mod.rs
Normal file
@ -0,0 +1,320 @@
|
||||
use crc::{Crc, CRC_32_CKSUM};
|
||||
use spacepackets::{
|
||||
cfdp::{
|
||||
pdu::{FileDirectiveType, PduError, PduHeader},
|
||||
ChecksumType, PduType, TransmissionMode,
|
||||
},
|
||||
util::UnsignedByteField,
|
||||
};
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::boxed::Box;
|
||||
#[cfg(feature = "serde")]
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub mod dest;
|
||||
#[cfg(feature = "std")]
|
||||
pub mod source;
|
||||
pub mod user;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum EntityType {
|
||||
Sending,
|
||||
Receiving,
|
||||
}
|
||||
|
||||
/// Generic abstraction for a check timer which has different functionality depending on whether
|
||||
/// the using entity is the sending entity or the receiving entity for the unacknowledged
|
||||
/// transmission mode.
|
||||
///
|
||||
/// For the sending entity, this timer determines the expiry period for declaring a check limit
|
||||
/// fault after sending an EOF PDU with requested closure. This allows a timeout of the transfer.
|
||||
/// Also see 4.6.3.2 of the CFDP standard.
|
||||
///
|
||||
/// For the receiving entity, this timer determines the expiry period for incrementing a check
|
||||
/// counter after an EOF PDU is received for an incomplete file transfer. This allows out-of-order
|
||||
/// reception of file data PDUs and EOF PDUs. Also see 4.6.3.3 of the CFDP standard.
|
||||
pub trait CheckTimerProvider {
|
||||
fn has_expired(&self) -> bool;
|
||||
}
|
||||
|
||||
/// A generic trait which allows CFDP entities to create check timers which are required to
|
||||
/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2
|
||||
/// and 4.6.3.3. The [CheckTimerProvider] provides more information about the purpose of the
|
||||
/// check timer.
|
||||
///
|
||||
/// This trait also allows the creation of different check timers depending on
|
||||
/// the ID of the local entity, the ID of the remote entity for a given transaction, and the
|
||||
/// type of entity.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait CheckTimerCreator {
|
||||
fn get_check_timer_provider(
|
||||
local_id: &UnsignedByteField,
|
||||
remote_id: &UnsignedByteField,
|
||||
entity_type: EntityType,
|
||||
) -> Box<dyn CheckTimerProvider>;
|
||||
}
|
||||
|
||||
/// Simple implementation of the [CheckTimerProvider] trait assuming a standard runtime.
|
||||
/// It also assumes that a second accuracy of the check timer period is sufficient.
|
||||
#[cfg(feature = "std")]
|
||||
pub struct StdCheckTimer {
|
||||
expiry_time_seconds: u64,
|
||||
start_time: std::time::Instant,
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl StdCheckTimer {
|
||||
pub fn new(expiry_time_seconds: u64) -> Self {
|
||||
Self {
|
||||
expiry_time_seconds,
|
||||
start_time: std::time::Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
impl CheckTimerProvider for StdCheckTimer {
|
||||
fn has_expired(&self) -> bool {
|
||||
let elapsed_time = self.start_time.elapsed();
|
||||
if elapsed_time.as_secs() > self.expiry_time_seconds {
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteEntityConfig {
|
||||
pub entity_id: UnsignedByteField,
|
||||
pub max_file_segment_len: usize,
|
||||
pub closure_requeted_by_default: bool,
|
||||
pub crc_on_transmission_by_default: bool,
|
||||
pub default_transmission_mode: TransmissionMode,
|
||||
pub default_crc_type: ChecksumType,
|
||||
pub check_limit: u32,
|
||||
}
|
||||
|
||||
pub trait RemoteEntityConfigProvider {
|
||||
fn get_remote_config(&self, remote_id: &UnsignedByteField) -> Option<&RemoteEntityConfig>;
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub struct TransactionId {
|
||||
source_id: UnsignedByteField,
|
||||
seq_num: UnsignedByteField,
|
||||
}
|
||||
|
||||
impl TransactionId {
|
||||
pub fn new(source_id: UnsignedByteField, seq_num: UnsignedByteField) -> Self {
|
||||
Self { source_id, seq_num }
|
||||
}
|
||||
|
||||
pub fn source_id(&self) -> &UnsignedByteField {
|
||||
&self.source_id
|
||||
}
|
||||
|
||||
pub fn seq_num(&self) -> &UnsignedByteField {
|
||||
&self.seq_num
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum TransactionStep {
|
||||
Idle = 0,
|
||||
TransactionStart = 1,
|
||||
ReceivingFileDataPdus = 2,
|
||||
SendingAckPdu = 3,
|
||||
TransferCompletion = 4,
|
||||
SendingFinishedPdu = 5,
|
||||
}
|
||||
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum State {
|
||||
Idle = 0,
|
||||
BusyClass1Nacked = 2,
|
||||
BusyClass2Acked = 3,
|
||||
}
|
||||
|
||||
pub const CRC_32: Crc<u32> = Crc::<u32>::new(&CRC_32_CKSUM);
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
|
||||
pub enum PacketTarget {
|
||||
SourceEntity,
|
||||
DestEntity,
|
||||
}
|
||||
|
||||
/// This is a helper struct which contains base information about a particular PDU packet.
|
||||
/// This is also necessary information for CFDP packet routing. For example, some packet types
|
||||
/// like file data PDUs can only be used by CFDP source entities.
|
||||
pub struct PacketInfo<'raw_packet> {
|
||||
pdu_type: PduType,
|
||||
pdu_directive: Option<FileDirectiveType>,
|
||||
target: PacketTarget,
|
||||
raw_packet: &'raw_packet [u8],
|
||||
}
|
||||
|
||||
impl<'raw> PacketInfo<'raw> {
|
||||
pub fn new(raw_packet: &'raw [u8]) -> Result<Self, PduError> {
|
||||
let (pdu_header, header_len) = PduHeader::from_bytes(raw_packet)?;
|
||||
if pdu_header.pdu_type() == PduType::FileData {
|
||||
return Ok(Self {
|
||||
pdu_type: pdu_header.pdu_type(),
|
||||
pdu_directive: None,
|
||||
target: PacketTarget::DestEntity,
|
||||
raw_packet,
|
||||
});
|
||||
}
|
||||
if pdu_header.pdu_datafield_len() < 1 {
|
||||
return Err(PduError::FormatError);
|
||||
}
|
||||
// Route depending on PDU type and directive type if applicable. Retrieve directive type
|
||||
// from the raw stream for better performance (with sanity and directive code check).
|
||||
// The routing is based on section 4.5 of the CFDP standard which specifies the PDU forwarding
|
||||
// procedure.
|
||||
let directive = FileDirectiveType::try_from(raw_packet[header_len]).map_err(|_| {
|
||||
PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len],
|
||||
expected: None,
|
||||
}
|
||||
})?;
|
||||
let packet_target = match directive {
|
||||
// Section c) of 4.5.3: These PDUs should always be targeted towards the file sender a.k.a.
|
||||
// the source handler
|
||||
FileDirectiveType::NakPdu
|
||||
| FileDirectiveType::FinishedPdu
|
||||
| FileDirectiveType::KeepAlivePdu => PacketTarget::SourceEntity,
|
||||
// Section b) of 4.5.3: These PDUs should always be targeted towards the file receiver a.k.a.
|
||||
// the destination handler
|
||||
FileDirectiveType::MetadataPdu
|
||||
| FileDirectiveType::EofPdu
|
||||
| FileDirectiveType::PromptPdu => PacketTarget::DestEntity,
|
||||
// Section a): Recipient depends of the type of PDU that is being acknowledged. We can simply
|
||||
// extract the PDU type from the raw stream. If it is an EOF PDU, this packet is passed to
|
||||
// the source handler, for a Finished PDU, it is passed to the destination handler.
|
||||
FileDirectiveType::AckPdu => {
|
||||
let acked_directive = FileDirectiveType::try_from(raw_packet[header_len + 1])
|
||||
.map_err(|_| PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len],
|
||||
expected: None,
|
||||
})?;
|
||||
if acked_directive == FileDirectiveType::EofPdu {
|
||||
PacketTarget::SourceEntity
|
||||
} else if acked_directive == FileDirectiveType::FinishedPdu {
|
||||
PacketTarget::DestEntity
|
||||
} else {
|
||||
// TODO: Maybe a better error? This might be confusing..
|
||||
return Err(PduError::InvalidDirectiveType {
|
||||
found: raw_packet[header_len + 1],
|
||||
expected: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(Self {
|
||||
pdu_type: pdu_header.pdu_type(),
|
||||
pdu_directive: Some(directive),
|
||||
target: packet_target,
|
||||
raw_packet,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn pdu_type(&self) -> PduType {
|
||||
self.pdu_type
|
||||
}
|
||||
|
||||
pub fn pdu_directive(&self) -> Option<FileDirectiveType> {
|
||||
self.pdu_directive
|
||||
}
|
||||
|
||||
pub fn target(&self) -> PacketTarget {
|
||||
self.target
|
||||
}
|
||||
|
||||
pub fn raw_packet(&self) -> &[u8] {
|
||||
self.raw_packet
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use spacepackets::cfdp::{
|
||||
lv::Lv,
|
||||
pdu::{
|
||||
eof::EofPdu,
|
||||
file_data::FileDataPdu,
|
||||
metadata::{MetadataGenericParams, MetadataPdu},
|
||||
CommonPduConfig, FileDirectiveType, PduHeader,
|
||||
},
|
||||
PduType,
|
||||
};
|
||||
|
||||
use crate::cfdp::PacketTarget;
|
||||
|
||||
use super::PacketInfo;
|
||||
|
||||
fn generic_pdu_header() -> PduHeader {
|
||||
let pdu_conf = CommonPduConfig::default();
|
||||
PduHeader::new_no_file_data(pdu_conf, 0)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metadata_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let metadata_params = MetadataGenericParams::default();
|
||||
let src_file_name = "hello.txt";
|
||||
let dest_file_name = "hello-dest.txt";
|
||||
let src_lv = Lv::new_from_str(src_file_name).unwrap();
|
||||
let dest_lv = Lv::new_from_str(dest_file_name).unwrap();
|
||||
let metadata_pdu = MetadataPdu::new(pdu_header, metadata_params, src_lv, dest_lv, None);
|
||||
metadata_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing metadata PDU failed");
|
||||
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileDirective);
|
||||
assert!(packet_info.pdu_directive().is_some());
|
||||
assert_eq!(
|
||||
packet_info.pdu_directive().unwrap(),
|
||||
FileDirectiveType::MetadataPdu
|
||||
);
|
||||
assert_eq!(packet_info.target(), PacketTarget::DestEntity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filedata_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let file_data_pdu = FileDataPdu::new_no_seg_metadata(pdu_header, 0, &[]);
|
||||
file_data_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileData);
|
||||
assert!(packet_info.pdu_directive().is_none());
|
||||
assert_eq!(packet_info.target(), PacketTarget::DestEntity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_eof_pdu_info() {
|
||||
let mut buf: [u8; 128] = [0; 128];
|
||||
let pdu_header = generic_pdu_header();
|
||||
let eof_pdu = EofPdu::new_no_error(pdu_header, 0, 0);
|
||||
eof_pdu
|
||||
.write_to_bytes(&mut buf)
|
||||
.expect("writing file data PDU failed");
|
||||
let packet_info = PacketInfo::new(&buf).expect("creating packet info failed");
|
||||
assert_eq!(packet_info.pdu_type(), PduType::FileDirective);
|
||||
assert!(packet_info.pdu_directive().is_some());
|
||||
assert_eq!(
|
||||
packet_info.pdu_directive().unwrap(),
|
||||
FileDirectiveType::EofPdu
|
||||
);
|
||||
}
|
||||
}
|
15
satrs-core/src/cfdp/source.rs
Normal file
15
satrs-core/src/cfdp/source.rs
Normal file
@ -0,0 +1,15 @@
|
||||
#![allow(dead_code)]
|
||||
use spacepackets::util::UnsignedByteField;
|
||||
|
||||
pub struct SourceHandler {
|
||||
id: UnsignedByteField,
|
||||
}
|
||||
|
||||
impl SourceHandler {
|
||||
pub fn new(id: impl Into<UnsignedByteField>) -> Self {
|
||||
Self { id: id.into() }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {}
|
65
satrs-core/src/cfdp/user.rs
Normal file
65
satrs-core/src/cfdp/user.rs
Normal file
@ -0,0 +1,65 @@
|
||||
use spacepackets::{
|
||||
cfdp::{
|
||||
pdu::{
|
||||
file_data::RecordContinuationState,
|
||||
finished::{DeliveryCode, FileStatus},
|
||||
},
|
||||
tlv::msg_to_user::MsgToUserTlv,
|
||||
ConditionCode,
|
||||
},
|
||||
util::UnsignedByteField,
|
||||
};
|
||||
|
||||
use super::TransactionId;
|
||||
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct TransactionFinishedParams {
|
||||
pub id: TransactionId,
|
||||
pub condition_code: ConditionCode,
|
||||
pub delivery_code: DeliveryCode,
|
||||
pub file_status: FileStatus,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MetadataReceivedParams<'src_file, 'dest_file, 'msgs_to_user> {
|
||||
pub id: TransactionId,
|
||||
pub source_id: UnsignedByteField,
|
||||
pub file_size: u64,
|
||||
pub src_file_name: &'src_file str,
|
||||
pub dest_file_name: &'dest_file str,
|
||||
pub msgs_to_user: &'msgs_to_user [MsgToUserTlv<'msgs_to_user>],
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileSegmentRecvdParams<'seg_meta> {
|
||||
pub id: TransactionId,
|
||||
pub offset: u64,
|
||||
pub length: usize,
|
||||
pub rec_cont_state: Option<RecordContinuationState>,
|
||||
pub segment_metadata: Option<&'seg_meta [u8]>,
|
||||
}
|
||||
|
||||
pub trait CfdpUser {
|
||||
fn transaction_indication(&mut self, id: &TransactionId);
|
||||
fn eof_sent_indication(&mut self, id: &TransactionId);
|
||||
fn transaction_finished_indication(&mut self, finished_params: &TransactionFinishedParams);
|
||||
fn metadata_recvd_indication(&mut self, md_recvd_params: &MetadataReceivedParams);
|
||||
fn file_segment_recvd_indication(&mut self, segment_recvd_params: &FileSegmentRecvdParams);
|
||||
// TODO: The standard does not strictly specify how the report information looks..
|
||||
fn report_indication(&mut self, id: &TransactionId);
|
||||
fn suspended_indication(&mut self, id: &TransactionId, condition_code: ConditionCode);
|
||||
fn resumed_indication(&mut self, id: &TransactionId, progress: u64);
|
||||
fn fault_indication(
|
||||
&mut self,
|
||||
id: &TransactionId,
|
||||
condition_code: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
fn abandoned_indication(
|
||||
&mut self,
|
||||
id: &TransactionId,
|
||||
condition_code: ConditionCode,
|
||||
progress: u64,
|
||||
);
|
||||
fn eof_recvd_indication(&mut self, id: &TransactionId);
|
||||
}
|
269
satrs-core/src/encoding/ccsds.rs
Normal file
269
satrs-core/src/encoding/ccsds.rs
Normal file
@ -0,0 +1,269 @@
|
||||
#[cfg(feature = "alloc")]
|
||||
use alloc::vec::Vec;
|
||||
#[cfg(feature = "alloc")]
|
||||
use hashbrown::HashSet;
|
||||
use spacepackets::PacketId;
|
||||
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
|
||||
pub trait PacketIdLookup {
|
||||
fn validate(&self, packet_id: u16) -> bool;
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for Vec<u16> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&packet_id)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for HashSet<u16> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&packet_id)
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for [u16] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&packet_id).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for Vec<PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "alloc")]
|
||||
impl PacketIdLookup for HashSet<PacketId> {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.contains(&PacketId::from(packet_id))
|
||||
}
|
||||
}
|
||||
|
||||
impl PacketIdLookup for [PacketId] {
|
||||
fn validate(&self, packet_id: u16) -> bool {
|
||||
self.binary_search(&PacketId::from(packet_id)).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// This function parses a given buffer for tightly packed CCSDS space packets. It uses the
|
||||
/// [PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet and then
|
||||
/// uses the length field of the packet to extract CCSDS packets.
|
||||
///
|
||||
/// This function is also able to deal with broken tail packets at the end as long a the parser
|
||||
/// can read the full 7 bytes which constitue a space packet header plus one byte minimal size.
|
||||
/// If broken tail packets are detected, they are moved to the front of the buffer, and the write
|
||||
/// index for future write operations will be written to the `next_write_idx` argument.
|
||||
///
|
||||
/// The parser will write all packets which were decoded successfully to the given `tc_receiver`
|
||||
/// and return the number of packets found. If the [ReceivesTcCore::pass_tc] calls fails, the
|
||||
/// error will be returned.
|
||||
pub fn parse_buffer_for_ccsds_space_packets<E>(
|
||||
buf: &mut [u8],
|
||||
packet_id_lookup: &(impl PacketIdLookup + ?Sized),
|
||||
tc_receiver: &mut (impl ReceivesTcCore<Error = E> + ?Sized),
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<u32, E> {
|
||||
*next_write_idx = 0;
|
||||
let mut packets_found = 0;
|
||||
let mut current_idx = 0;
|
||||
let buf_len = buf.len();
|
||||
loop {
|
||||
if current_idx + 7 >= buf.len() {
|
||||
break;
|
||||
}
|
||||
let packet_id = u16::from_be_bytes(buf[current_idx..current_idx + 2].try_into().unwrap());
|
||||
if packet_id_lookup.validate(packet_id) {
|
||||
let length_field =
|
||||
u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap());
|
||||
let packet_size = length_field + 7;
|
||||
if (current_idx + packet_size as usize) <= buf_len {
|
||||
tc_receiver.pass_tc(&buf[current_idx..current_idx + packet_size as usize])?;
|
||||
packets_found += 1;
|
||||
} else {
|
||||
// Move packet to start of buffer if applicable.
|
||||
if current_idx > 0 {
|
||||
buf.copy_within(current_idx.., 0);
|
||||
*next_write_idx = buf.len() - current_idx;
|
||||
}
|
||||
}
|
||||
current_idx += packet_size as usize;
|
||||
continue;
|
||||
}
|
||||
current_idx += 1;
|
||||
}
|
||||
Ok(packets_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, SerializablePusPacket},
|
||||
PacketId, SpHeader,
|
||||
};
|
||||
|
||||
use crate::encoding::tests::TcCacher;
|
||||
|
||||
use super::parse_buffer_for_ccsds_space_packets;
|
||||
|
||||
const TEST_APID_0: u16 = 0x02;
|
||||
const TEST_APID_1: u16 = 0x10;
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0);
|
||||
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1);
|
||||
|
||||
#[test]
|
||||
fn test_basic() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 1);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 1);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_packet() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 2);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 2);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len_ping]
|
||||
);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[packet_len_ping..packet_len_ping + packet_len_action]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_apid() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer,
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 2);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 2);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[..packet_len_ping]
|
||||
);
|
||||
assert_eq!(
|
||||
tc_cacher.tc_queue.pop_front().unwrap(),
|
||||
buffer[packet_len_ping..packet_len_ping + packet_len_action]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_packet_multi() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let packet_len_action = action_tc
|
||||
.write_to_bytes(&mut buffer[packet_len_ping..])
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer[..packet_len_ping + packet_len_action - 4],
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 1);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 1);
|
||||
// The broken packet was moved to the start, so the next write index should be after the
|
||||
// last segment missing 4 bytes.
|
||||
assert_eq!(next_write_idx, packet_len_action - 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_split_packet() {
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let mut buffer: [u8; 32] = [0; 32];
|
||||
let packet_len_ping = ping_tc
|
||||
.write_to_bytes(&mut buffer)
|
||||
.expect("writing packet failed");
|
||||
let valid_packet_ids = [TEST_PACKET_ID_0, TEST_PACKET_ID_1];
|
||||
let mut tc_cacher = TcCacher::default();
|
||||
let mut next_write_idx = 0;
|
||||
let parse_result = parse_buffer_for_ccsds_space_packets(
|
||||
&mut buffer[..packet_len_ping - 4],
|
||||
valid_packet_ids.as_slice(),
|
||||
&mut tc_cacher,
|
||||
&mut next_write_idx,
|
||||
);
|
||||
assert_eq!(next_write_idx, 0);
|
||||
assert!(parse_result.is_ok());
|
||||
let parsed_packets = parse_result.unwrap();
|
||||
assert_eq!(parsed_packets, 0);
|
||||
assert_eq!(tc_cacher.tc_queue.len(), 0);
|
||||
}
|
||||
}
|
263
satrs-core/src/encoding/cobs.rs
Normal file
263
satrs-core/src/encoding/cobs.rs
Normal file
@ -0,0 +1,263 @@
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
use cobs::{decode_in_place, encode, max_encoding_length};
|
||||
|
||||
/// This function encodes the given packet with COBS and also wraps the encoded packet with
|
||||
/// the sentinel value 0. It can be used repeatedly on the same encoded buffer by expecting
|
||||
/// and incrementing the mutable reference of the current packet index. This is also used
|
||||
/// to retrieve the total encoded size.
|
||||
///
|
||||
/// This function will return [false] if the given encoding buffer is not large enough to hold
|
||||
/// the encoded buffer and the two sentinel bytes and [true] if the encoding was successfull.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// ```
|
||||
/// use cobs::decode_in_place_report;
|
||||
/// use satrs_core::encoding::{encode_packet_with_cobs};
|
||||
//
|
||||
/// const SIMPLE_PACKET: [u8; 5] = [1, 2, 3, 4, 5];
|
||||
/// const INVERTED_PACKET: [u8; 5] = [5, 4, 3, 2, 1];
|
||||
///
|
||||
/// let mut encoding_buf: [u8; 32] = [0; 32];
|
||||
/// let mut current_idx = 0;
|
||||
/// assert!(encode_packet_with_cobs(&SIMPLE_PACKET, &mut encoding_buf, &mut current_idx));
|
||||
/// assert!(encode_packet_with_cobs(&INVERTED_PACKET, &mut encoding_buf, &mut current_idx));
|
||||
/// assert_eq!(encoding_buf[0], 0);
|
||||
/// let dec_report = decode_in_place_report(&mut encoding_buf[1..]).expect("decoding failed");
|
||||
/// assert_eq!(encoding_buf[1 + dec_report.src_used], 0);
|
||||
/// assert_eq!(dec_report.dst_used, 5);
|
||||
/// assert_eq!(current_idx, 16);
|
||||
/// ```
|
||||
pub fn encode_packet_with_cobs(
|
||||
packet: &[u8],
|
||||
encoded_buf: &mut [u8],
|
||||
current_idx: &mut usize,
|
||||
) -> bool {
|
||||
let max_encoding_len = max_encoding_length(packet.len());
|
||||
if *current_idx + max_encoding_len + 2 > encoded_buf.len() {
|
||||
return false;
|
||||
}
|
||||
encoded_buf[*current_idx] = 0;
|
||||
*current_idx += 1;
|
||||
*current_idx += encode(packet, &mut encoded_buf[*current_idx..]);
|
||||
encoded_buf[*current_idx] = 0;
|
||||
*current_idx += 1;
|
||||
true
|
||||
}
|
||||
|
||||
/// This function parses a given buffer for COBS encoded packets. The packet structure is
|
||||
/// expected to be like this, assuming a sentinel value of 0 as the packet delimiter:
|
||||
///
|
||||
/// 0 | ... Encoded Packet Data ... | 0 | 0 | ... Encoded Packet Data ... | 0
|
||||
///
|
||||
/// This function is also able to deal with broken tail packets at the end. If broken tail
|
||||
/// packets are detected, they are moved to the front of the buffer, and the write index for
|
||||
/// future write operations will be written to the `next_write_idx` argument.
|
||||
///
|
||||
/// The parser will write all packets which were decoded successfully to the given `tc_receiver`.
|
||||
pub fn parse_buffer_for_cobs_encoded_packets<E>(
|
||||
buf: &mut [u8],
|
||||
tc_receiver: &mut dyn ReceivesTcCore<Error = E>,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<u32, E> {
|
||||
let mut start_index_packet = 0;
|
||||
let mut start_found = false;
|
||||
let mut last_byte = false;
|
||||
let mut packets_found = 0;
|
||||
for i in 0..buf.len() {
|
||||
if i == buf.len() - 1 {
|
||||
last_byte = true;
|
||||
}
|
||||
if buf[i] == 0 {
|
||||
if !start_found && !last_byte && buf[i + 1] == 0 {
|
||||
// Special case: Consecutive sentinel values or all zeroes.
|
||||
// Skip.
|
||||
continue;
|
||||
}
|
||||
if start_found {
|
||||
let decode_result = decode_in_place(&mut buf[start_index_packet..i]);
|
||||
if let Ok(packet_len) = decode_result {
|
||||
packets_found += 1;
|
||||
tc_receiver
|
||||
.pass_tc(&buf[start_index_packet..start_index_packet + packet_len])?;
|
||||
}
|
||||
start_found = false;
|
||||
} else {
|
||||
start_index_packet = i + 1;
|
||||
start_found = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Move split frame at the end to the front of the buffer.
|
||||
if start_index_packet > 0 && start_found && packets_found > 0 {
|
||||
buf.copy_within(start_index_packet - 1.., 0);
|
||||
*next_write_idx = buf.len() - start_index_packet + 1;
|
||||
}
|
||||
Ok(packets_found)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use cobs::encode;
|
||||
|
||||
use crate::encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET};
|
||||
|
||||
use super::parse_buffer_for_cobs_encoded_packets;
|
||||
|
||||
#[test]
|
||||
fn test_parsing_simple_packet() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut next_read_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
&mut encoded_buf[0..current_idx],
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
let packet = &test_sender.tc_queue[0];
|
||||
assert_eq!(packet, &SIMPLE_PACKET);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parsing_consecutive_packets() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
|
||||
// Second packet
|
||||
encoded_buf[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
current_idx += encode(&INVERTED_PACKET, &mut encoded_buf[current_idx..]);
|
||||
encoded_buf[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
let mut next_read_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
&mut encoded_buf[0..current_idx],
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 2);
|
||||
assert_eq!(test_sender.tc_queue.len(), 2);
|
||||
let packet0 = &test_sender.tc_queue[0];
|
||||
assert_eq!(packet0, &SIMPLE_PACKET);
|
||||
let packet1 = &test_sender.tc_queue[1];
|
||||
assert_eq!(packet1, &INVERTED_PACKET);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_split_tail_packet_only() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut next_read_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx - 1],
|
||||
&mut test_sender,
|
||||
&mut next_read_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 0);
|
||||
assert_eq!(test_sender.tc_queue.len(), 0);
|
||||
assert_eq!(next_read_idx, 0);
|
||||
}
|
||||
|
||||
fn generic_test_split_packet(cut_off: usize) {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
assert!(cut_off < INVERTED_PACKET.len() + 1);
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
// Second packet
|
||||
encoded_buf[current_idx] = 0;
|
||||
let packet_start = current_idx;
|
||||
current_idx += 1;
|
||||
let encoded_len = encode(&INVERTED_PACKET, &mut encoded_buf[current_idx..]);
|
||||
assert_eq!(encoded_len, 6);
|
||||
current_idx += encoded_len;
|
||||
// We cut off the sentinel byte, so we expecte the write index to be the length of the
|
||||
// packet minus the sentinel byte plus the first sentinel byte.
|
||||
let next_expected_write_idx = 1 + encoded_len - cut_off + 1;
|
||||
encoded_buf[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
let mut next_write_idx = 0;
|
||||
let expected_at_start = encoded_buf[packet_start..current_idx - cut_off].to_vec();
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx - cut_off],
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
assert_eq!(&test_sender.tc_queue[0], &SIMPLE_PACKET);
|
||||
assert_eq!(next_write_idx, next_expected_write_idx);
|
||||
assert_eq!(encoded_buf[..next_expected_write_idx], expected_at_start);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_packet_and_split_tail_packet_0() {
|
||||
generic_test_split_packet(1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_packet_and_split_tail_packet_1() {
|
||||
generic_test_split_packet(2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_one_packet_and_split_tail_packet_2() {
|
||||
generic_test_split_packet(3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_zero_at_end() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut next_write_idx = 0;
|
||||
let mut current_idx = 0;
|
||||
encoded_buf[current_idx] = 5;
|
||||
current_idx += 1;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
encoded_buf[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut encoded_buf[0..current_idx],
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 1);
|
||||
assert_eq!(test_sender.tc_queue.len(), 1);
|
||||
assert_eq!(&test_sender.tc_queue[0], &SIMPLE_PACKET);
|
||||
assert_eq!(next_write_idx, 1);
|
||||
assert_eq!(encoded_buf[0], 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_zeroes() {
|
||||
let mut test_sender = TcCacher::default();
|
||||
let mut all_zeroes: [u8; 5] = [0; 5];
|
||||
let mut next_write_idx = 0;
|
||||
let packets = parse_buffer_for_cobs_encoded_packets(
|
||||
// Cut off the sentinel byte at the end.
|
||||
&mut all_zeroes,
|
||||
&mut test_sender,
|
||||
&mut next_write_idx,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(packets, 0);
|
||||
assert!(test_sender.tc_queue.is_empty());
|
||||
assert_eq!(next_write_idx, 0);
|
||||
}
|
||||
}
|
40
satrs-core/src/encoding/mod.rs
Normal file
40
satrs-core/src/encoding/mod.rs
Normal file
@ -0,0 +1,40 @@
|
||||
pub mod ccsds;
|
||||
pub mod cobs;
|
||||
|
||||
pub use crate::encoding::ccsds::parse_buffer_for_ccsds_space_packets;
|
||||
pub use crate::encoding::cobs::{encode_packet_with_cobs, parse_buffer_for_cobs_encoded_packets};
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use alloc::{collections::VecDeque, vec::Vec};
|
||||
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
|
||||
use super::cobs::encode_packet_with_cobs;
|
||||
|
||||
pub(crate) const SIMPLE_PACKET: [u8; 5] = [1, 2, 3, 4, 5];
|
||||
pub(crate) const INVERTED_PACKET: [u8; 5] = [5, 4, 3, 2, 1];
|
||||
|
||||
#[derive(Default)]
|
||||
pub(crate) struct TcCacher {
|
||||
pub(crate) tc_queue: VecDeque<Vec<u8>>,
|
||||
}
|
||||
|
||||
impl ReceivesTcCore for TcCacher {
|
||||
type Error = ();
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
self.tc_queue.push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet_with_cobs(&SIMPLE_PACKET, encoded_buf, current_idx);
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn encode_inverted_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet_with_cobs(&INVERTED_PACKET, encoded_buf, current_idx);
|
||||
}
|
||||
}
|
@ -29,7 +29,7 @@ pub trait Executable: Send {
|
||||
fn periodic_op(&mut self, op_code: i32) -> Result<OpResult, Self::Error>;
|
||||
}
|
||||
|
||||
/// This function allows executing one task which implements the [Executable][Executable] trait
|
||||
/// This function allows executing one task which implements the [Executable] trait
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
@ -78,7 +78,7 @@ pub fn exec_sched_single<
|
||||
}
|
||||
|
||||
/// This function allows executing multiple tasks as long as the tasks implement the
|
||||
/// [Executable][Executable] trait
|
||||
/// [Executable] trait
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
|
@ -1,2 +0,0 @@
|
||||
//! Helper modules intended to be used on hosts with a full [std] runtime
|
||||
pub mod udp_server;
|
@ -1,4 +1,4 @@
|
||||
//! # Hardware Abstraction Layer module
|
||||
#[cfg(feature = "std")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
|
||||
pub mod host;
|
||||
pub mod std;
|
||||
|
6
satrs-core/src/hal/std/mod.rs
Normal file
6
satrs-core/src/hal/std/mod.rs
Normal file
@ -0,0 +1,6 @@
|
||||
//! Helper modules intended to be used on systems with a full [std] runtime.
|
||||
pub mod tcp_server;
|
||||
pub mod udp_server;
|
||||
|
||||
mod tcp_cobs_server;
|
||||
mod tcp_spacepackets_server;
|
369
satrs-core/src/hal/std/tcp_cobs_server.rs
Normal file
369
satrs-core/src/hal/std/tcp_cobs_server.rs
Normal file
@ -0,0 +1,369 @@
|
||||
use alloc::boxed::Box;
|
||||
use alloc::vec;
|
||||
use cobs::encode;
|
||||
use delegate::delegate;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::net::TcpListener;
|
||||
use std::net::TcpStream;
|
||||
use std::vec::Vec;
|
||||
|
||||
use crate::encoding::parse_buffer_for_cobs_encoded_packets;
|
||||
use crate::tmtc::ReceivesTc;
|
||||
use crate::tmtc::TmPacketSource;
|
||||
|
||||
use crate::hal::std::tcp_server::{
|
||||
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
|
||||
};
|
||||
|
||||
/// Concrete [TcpTcParser] implementation for the [TcpTmtcInCobsServer].
|
||||
#[derive(Default)]
|
||||
pub struct CobsTcParser {}
|
||||
|
||||
impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for CobsTcParser {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>> {
|
||||
// Reader vec full, need to parse for packets.
|
||||
conn_result.num_received_tcs += parse_buffer_for_cobs_encoded_packets(
|
||||
&mut tc_buffer[..current_write_idx],
|
||||
tc_receiver.upcast_mut(),
|
||||
next_write_idx,
|
||||
)
|
||||
.map_err(|e| TcpTmtcError::TcError(e))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Concrete [TcpTmSender] implementation for the [TcpTmtcInCobsServer].
|
||||
pub struct CobsTmSender {
|
||||
tm_encoding_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl CobsTmSender {
|
||||
fn new(tm_buffer_size: usize) -> Self {
|
||||
Self {
|
||||
// The buffer should be large enough to hold the maximum expected TM size encoded with
|
||||
// COBS.
|
||||
tm_encoding_buffer: vec![0; cobs::max_encoding_length(tm_buffer_size)],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>> {
|
||||
let mut tm_was_sent = false;
|
||||
loop {
|
||||
// Write TM until TM source is exhausted. For now, there is no limit for the amount
|
||||
// of TM written this way.
|
||||
let read_tm_len = tm_source
|
||||
.retrieve_packet(tm_buffer)
|
||||
.map_err(|e| TcpTmtcError::TmError(e))?;
|
||||
|
||||
if read_tm_len == 0 {
|
||||
return Ok(tm_was_sent);
|
||||
}
|
||||
tm_was_sent = true;
|
||||
conn_result.num_sent_tms += 1;
|
||||
|
||||
// Encode into COBS and sent to client.
|
||||
let mut current_idx = 0;
|
||||
self.tm_encoding_buffer[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
current_idx += encode(
|
||||
&tm_buffer[..read_tm_len],
|
||||
&mut self.tm_encoding_buffer[current_idx..],
|
||||
);
|
||||
self.tm_encoding_buffer[current_idx] = 0;
|
||||
current_idx += 1;
|
||||
stream.write_all(&self.tm_encoding_buffer[..current_idx])?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TCP TMTC server implementation for exchange of generic TMTC packets which are framed with the
|
||||
/// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing).
|
||||
///
|
||||
/// Telemetry will be encoded with the COBS protocol using [cobs::encode] in addition to being
|
||||
/// wrapped with the sentinel value 0 as the packet delimiter as well before being sent back to
|
||||
/// the client. Please note that the server will send as much data as it can retrieve from the
|
||||
/// [TmPacketSource] in its current implementation.
|
||||
///
|
||||
/// Using a framing protocol like COBS imposes minimal restrictions on the type of TMTC data
|
||||
/// exchanged while also allowing packets with flexible size and a reliable way to reconstruct full
|
||||
/// packets even from a data stream which is split up. The server wil use the
|
||||
/// [parse_buffer_for_cobs_encoded_packets] function to parse for packets and pass them to a
|
||||
/// generic TC receiver. The user can use [crate::encoding::encode_packet_with_cobs] to encode
|
||||
/// telecommands sent to the server.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/tcp_servers.rs)
|
||||
/// test also serves as the example application for this module.
|
||||
pub struct TcpTmtcInCobsServer<TmError, TcError: 'static> {
|
||||
generic_server: TcpTmtcGenericServer<TmError, TcError, CobsTmSender, CobsTcParser>,
|
||||
}
|
||||
|
||||
impl<TmError: 'static, TcError: 'static> TcpTmtcInCobsServer<TmError, TcError> {
|
||||
/// Create a new TCP TMTC server which exchanges TMTC packets encoded with
|
||||
/// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing).
|
||||
///
|
||||
/// ## Parameter
|
||||
///
|
||||
/// * `cfg` - Configuration of the server.
|
||||
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
|
||||
/// then sent back to the client.
|
||||
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be
|
||||
/// forwarded to this TC receiver.
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tm_source: Box<dyn TmPacketSource<Error = TmError>>,
|
||||
tc_receiver: Box<dyn ReceivesTc<Error = TcError>>,
|
||||
) -> Result<Self, TcpTmtcError<TmError, TcError>> {
|
||||
Ok(Self {
|
||||
generic_server: TcpTmtcGenericServer::new(
|
||||
cfg,
|
||||
CobsTcParser::default(),
|
||||
CobsTmSender::new(cfg.tm_buffer_size),
|
||||
tm_source,
|
||||
tc_receiver,
|
||||
)?,
|
||||
})
|
||||
}
|
||||
|
||||
delegate! {
|
||||
to self.generic_server {
|
||||
pub fn listener(&mut self) -> &mut TcpListener;
|
||||
|
||||
/// Can be used to retrieve the local assigned address of the TCP server. This is especially
|
||||
/// useful if using the port number 0 for OS auto-assignment.
|
||||
pub fn local_addr(&self) -> std::io::Result<SocketAddr>;
|
||||
|
||||
/// Delegation to the [TcpTmtcGenericServer::handle_next_connection] call.
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
|
||||
thread,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET},
|
||||
hal::std::tcp_server::{
|
||||
tests::{SyncTcCacher, SyncTmSource},
|
||||
ServerConfig,
|
||||
},
|
||||
};
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
use cobs::encode;
|
||||
|
||||
use super::TcpTmtcInCobsServer;
|
||||
|
||||
fn encode_simple_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet(&SIMPLE_PACKET, encoded_buf, current_idx)
|
||||
}
|
||||
|
||||
fn encode_inverted_packet(encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encode_packet(&INVERTED_PACKET, encoded_buf, current_idx)
|
||||
}
|
||||
|
||||
fn encode_packet(packet: &[u8], encoded_buf: &mut [u8], current_idx: &mut usize) {
|
||||
encoded_buf[*current_idx] = 0;
|
||||
*current_idx += 1;
|
||||
*current_idx += encode(packet, &mut encoded_buf[*current_idx..]);
|
||||
encoded_buf[*current_idx] = 0;
|
||||
*current_idx += 1;
|
||||
}
|
||||
|
||||
fn generic_tmtc_server(
|
||||
addr: &SocketAddr,
|
||||
tc_receiver: SyncTcCacher,
|
||||
tm_source: SyncTmSource,
|
||||
) -> TcpTmtcInCobsServer<(), ()> {
|
||||
TcpTmtcInCobsServer::new(
|
||||
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024),
|
||||
Box::new(tm_source),
|
||||
Box::new(tc_receiver),
|
||||
)
|
||||
.expect("TCP server generation failed")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_basic_no_tm() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let tm_source = SyncTmSource::default();
|
||||
let mut tcp_server = generic_tmtc_server(&auto_port_addr, tc_receiver.clone(), tm_source);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1);
|
||||
assert_eq!(conn_result.num_sent_tms, 0);
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
// Send TC to server now.
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.write_all(&encoded_buf[..current_idx])
|
||||
.expect("writing to TCP server failed");
|
||||
drop(stream);
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that the packet was received and decoded successfully.
|
||||
let mut tc_queue = tc_receiver
|
||||
.tc_queue
|
||||
.lock()
|
||||
.expect("locking tc queue failed");
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
|
||||
drop(tc_queue);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_basic_multi_tm_multi_tc() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
tm_source.add_tm(&INVERTED_PACKET);
|
||||
tm_source.add_tm(&SIMPLE_PACKET);
|
||||
let mut tcp_server =
|
||||
generic_tmtc_server(&auto_port_addr, tc_receiver.clone(), tm_source.clone());
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 2, "Not enough TCs received");
|
||||
assert_eq!(conn_result.num_sent_tms, 2, "Not enough TMs received");
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
// Send TC to server now.
|
||||
let mut encoded_buf: [u8; 32] = [0; 32];
|
||||
let mut current_idx = 0;
|
||||
encode_simple_packet(&mut encoded_buf, &mut current_idx);
|
||||
encode_inverted_packet(&mut encoded_buf, &mut current_idx);
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.set_read_timeout(Some(Duration::from_millis(10)))
|
||||
.expect("setting reas timeout failed");
|
||||
stream
|
||||
.write_all(&encoded_buf[..current_idx])
|
||||
.expect("writing to TCP server failed");
|
||||
// Done with writing.
|
||||
stream
|
||||
.shutdown(std::net::Shutdown::Write)
|
||||
.expect("shutting down write failed");
|
||||
let mut read_buf: [u8; 16] = [0; 16];
|
||||
let mut read_len_total = 0;
|
||||
// Timeout ensures this does not block forever.
|
||||
while read_len_total < 16 {
|
||||
let read_len = stream.read(&mut read_buf).expect("read failed");
|
||||
read_len_total += read_len;
|
||||
// Read until full expected size is available.
|
||||
if read_len == 16 {
|
||||
// Read first TM packet.
|
||||
current_idx = 0;
|
||||
assert_eq!(read_len, 16);
|
||||
assert_eq!(read_buf[0], 0);
|
||||
current_idx += 1;
|
||||
let mut dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
|
||||
.expect("COBS decoding failed");
|
||||
assert_eq!(dec_report.dst_used, 5);
|
||||
// Skip first sentinel byte.
|
||||
assert_eq!(
|
||||
&read_buf[current_idx..current_idx + INVERTED_PACKET.len()],
|
||||
&INVERTED_PACKET
|
||||
);
|
||||
current_idx += dec_report.src_used;
|
||||
// End sentinel.
|
||||
assert_eq!(read_buf[current_idx], 0, "invalid sentinel end byte");
|
||||
current_idx += 1;
|
||||
|
||||
// Read second TM packet.
|
||||
assert_eq!(read_buf[current_idx], 0);
|
||||
current_idx += 1;
|
||||
dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
|
||||
.expect("COBS decoding failed");
|
||||
assert_eq!(dec_report.dst_used, 5);
|
||||
// Skip first sentinel byte.
|
||||
assert_eq!(
|
||||
&read_buf[current_idx..current_idx + SIMPLE_PACKET.len()],
|
||||
&SIMPLE_PACKET
|
||||
);
|
||||
current_idx += dec_report.src_used;
|
||||
// End sentinel.
|
||||
assert_eq!(read_buf[current_idx], 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
drop(stream);
|
||||
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that the packet was received and decoded successfully.
|
||||
let mut tc_queue = tc_receiver
|
||||
.tc_queue
|
||||
.lock()
|
||||
.expect("locking tc queue failed");
|
||||
assert_eq!(tc_queue.len(), 2);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &INVERTED_PACKET);
|
||||
drop(tc_queue);
|
||||
}
|
||||
}
|
378
satrs-core/src/hal/std/tcp_server.rs
Normal file
378
satrs-core/src/hal/std/tcp_server.rs
Normal file
@ -0,0 +1,378 @@
|
||||
//! Generic TCP TMTC servers with different TMTC format flavours.
|
||||
use alloc::vec;
|
||||
use alloc::{boxed::Box, vec::Vec};
|
||||
use core::time::Duration;
|
||||
use socket2::{Domain, Socket, Type};
|
||||
use std::io::Read;
|
||||
use std::net::TcpListener;
|
||||
use std::net::{SocketAddr, TcpStream};
|
||||
use std::thread;
|
||||
|
||||
use crate::tmtc::{ReceivesTc, TmPacketSource};
|
||||
use thiserror::Error;
|
||||
|
||||
// Re-export the TMTC in COBS server.
|
||||
pub use crate::hal::std::tcp_cobs_server::{CobsTcParser, CobsTmSender, TcpTmtcInCobsServer};
|
||||
pub use crate::hal::std::tcp_spacepackets_server::{
|
||||
SpacepacketsTcParser, SpacepacketsTmSender, TcpSpacepacketsServer,
|
||||
};
|
||||
|
||||
/// Configuration struct for the generic TCP TMTC server
|
||||
///
|
||||
/// ## Parameters
|
||||
///
|
||||
/// * `addr` - Address of the TCP server.
|
||||
/// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or
|
||||
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
|
||||
/// to reduce CPU load.
|
||||
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [TmPacketSource] and
|
||||
/// encoding of that data. This buffer should at large enough to hold the maximum expected
|
||||
/// TM size read from the packet source.
|
||||
/// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from
|
||||
/// the client. It is recommended to make this buffer larger to allow reading multiple
|
||||
/// consecutive packets as well, for example by using common buffer sizes like 4096 or 8192
|
||||
/// byte. The buffer should at the very least be large enough to hold the maximum expected
|
||||
/// telecommand size.
|
||||
/// * `reuse_addr` - Can be used to set the `SO_REUSEADDR` option on the raw socket. This is
|
||||
/// especially useful if the address and port are static for the server. Set to false by
|
||||
/// default.
|
||||
/// * `reuse_port` - Can be used to set the `SO_REUSEPORT` option on the raw socket. This is
|
||||
/// especially useful if the address and port are static for the server. Set to false by
|
||||
/// default.
|
||||
#[derive(Debug, Copy, Clone)]
|
||||
pub struct ServerConfig {
|
||||
pub addr: SocketAddr,
|
||||
pub inner_loop_delay: Duration,
|
||||
pub tm_buffer_size: usize,
|
||||
pub tc_buffer_size: usize,
|
||||
pub reuse_addr: bool,
|
||||
pub reuse_port: bool,
|
||||
}
|
||||
|
||||
impl ServerConfig {
|
||||
pub fn new(
|
||||
addr: SocketAddr,
|
||||
inner_loop_delay: Duration,
|
||||
tm_buffer_size: usize,
|
||||
tc_buffer_size: usize,
|
||||
) -> Self {
|
||||
Self {
|
||||
addr,
|
||||
inner_loop_delay,
|
||||
tm_buffer_size,
|
||||
tc_buffer_size,
|
||||
reuse_addr: false,
|
||||
reuse_port: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum TcpTmtcError<TmError, TcError> {
|
||||
#[error("TM retrieval error: {0}")]
|
||||
TmError(TmError),
|
||||
#[error("TC retrieval error: {0}")]
|
||||
TcError(TcError),
|
||||
#[error("io error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
/// Result of one connection attempt. Contains the client address if a connection was established,
|
||||
/// in addition to the number of telecommands and telemetry packets exchanged.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ConnectionResult {
|
||||
pub addr: Option<SocketAddr>,
|
||||
pub num_received_tcs: u32,
|
||||
pub num_sent_tms: u32,
|
||||
}
|
||||
|
||||
/// Generic parser abstraction for an object which can parse for telecommands given a raw
|
||||
/// bytestream received from a TCP socket and send them to a generic [ReceivesTc] telecommand
|
||||
/// receiver. This allows different encoding schemes for telecommands.
|
||||
pub trait TcpTcParser<TmError, TcError> {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
|
||||
/// Generic sender abstraction for an object which can pull telemetry from a given TM source
|
||||
/// using a [TmPacketSource] and then send them back to a client using a given [TcpStream].
|
||||
/// The concrete implementation can also perform any encoding steps which are necessary before
|
||||
/// sending back the data to a client.
|
||||
pub trait TcpTmSender<TmError, TcError> {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
|
||||
/// TCP TMTC server implementation for exchange of generic TMTC packets in a generic way which
|
||||
/// stays agnostic to the encoding scheme and format used for both telecommands and telemetry.
|
||||
///
|
||||
/// This server implements a generic TMTC handling logic and allows modifying its behaviour
|
||||
/// through the following 4 core abstractions:
|
||||
///
|
||||
/// 1. [TcpTcParser] to parse for telecommands from the raw bytestream received from a client.
|
||||
/// 2. Parsed telecommands will be sent to the [ReceivesTc] telecommand receiver.
|
||||
/// 3. [TcpTmSender] to send telemetry pulled from a TM source back to the client.
|
||||
/// 4. [TmPacketSource] as a generic TM source used by the [TcpTmSender].
|
||||
///
|
||||
/// It is possible to specify custom abstractions to build a dedicated TCP TMTC server without
|
||||
/// having to re-implement common logic.
|
||||
///
|
||||
/// Currently, this framework offers the following concrete implementations:
|
||||
///
|
||||
/// 1. [TcpTmtcInCobsServer] to exchange TMTC wrapped inside the COBS framing protocol.
|
||||
pub struct TcpTmtcGenericServer<
|
||||
TmError,
|
||||
TcError,
|
||||
TmHandler: TcpTmSender<TmError, TcError>,
|
||||
TcHandler: TcpTcParser<TmError, TcError>,
|
||||
> {
|
||||
base: TcpTmtcServerBase<TmError, TcError>,
|
||||
tc_handler: TcHandler,
|
||||
tm_handler: TmHandler,
|
||||
}
|
||||
|
||||
impl<
|
||||
TmError: 'static,
|
||||
TcError: 'static,
|
||||
TmSender: TcpTmSender<TmError, TcError>,
|
||||
TcParser: TcpTcParser<TmError, TcError>,
|
||||
> TcpTmtcGenericServer<TmError, TcError, TmSender, TcParser>
|
||||
{
|
||||
/// Create a new generic TMTC server instance.
|
||||
///
|
||||
/// ## Parameter
|
||||
///
|
||||
/// * `cfg` - Configuration of the server.
|
||||
/// * `tc_parser` - Parser which extracts telecommands from the raw bytestream received from
|
||||
/// the client.
|
||||
/// * `tm_sender` - Sends back telemetry to the client using the specified TM source.
|
||||
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
|
||||
/// then sent back to the client.
|
||||
/// * `tc_receiver` - Any received telecommand which was decoded successfully will be forwarded
|
||||
/// to this TC receiver.
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tc_parser: TcParser,
|
||||
tm_sender: TmSender,
|
||||
tm_source: Box<dyn TmPacketSource<Error = TmError>>,
|
||||
tc_receiver: Box<dyn ReceivesTc<Error = TcError>>,
|
||||
) -> Result<TcpTmtcGenericServer<TmError, TcError, TmSender, TcParser>, std::io::Error> {
|
||||
Ok(Self {
|
||||
base: TcpTmtcServerBase::new(cfg, tm_source, tc_receiver)?,
|
||||
tc_handler: tc_parser,
|
||||
tm_handler: tm_sender,
|
||||
})
|
||||
}
|
||||
|
||||
/// Retrieve the internal [TcpListener] class.
|
||||
pub fn listener(&mut self) -> &mut TcpListener {
|
||||
self.base.listener()
|
||||
}
|
||||
|
||||
/// Can be used to retrieve the local assigned address of the TCP server. This is especially
|
||||
/// useful if using the port number 0 for OS auto-assignment.
|
||||
pub fn local_addr(&self) -> std::io::Result<SocketAddr> {
|
||||
self.base.local_addr()
|
||||
}
|
||||
|
||||
/// This call is used to handle the next connection to a client. Right now, it performs
|
||||
/// the following steps:
|
||||
///
|
||||
/// 1. It calls the [std::net::TcpListener::accept] method internally using the blocking API
|
||||
/// until a client connects.
|
||||
/// 2. It reads all the telecommands from the client and parses all received data using the
|
||||
/// user specified [TcpTcParser].
|
||||
/// 3. After reading and parsing all telecommands, it sends back all telemetry using the
|
||||
/// user specified [TcpTmSender].
|
||||
///
|
||||
/// The server will delay for a user-specified period if the client connects to the server
|
||||
/// for prolonged periods and there is no traffic for the server. This is the case if the
|
||||
/// client does not send any telecommands and no telemetry needs to be sent back to the client.
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>> {
|
||||
let mut connection_result = ConnectionResult::default();
|
||||
let mut current_write_idx;
|
||||
let mut next_write_idx = 0;
|
||||
let (mut stream, addr) = self.base.listener.accept()?;
|
||||
stream.set_nonblocking(true)?;
|
||||
connection_result.addr = Some(addr);
|
||||
current_write_idx = next_write_idx;
|
||||
loop {
|
||||
let read_result = stream.read(&mut self.base.tc_buffer[current_write_idx..]);
|
||||
match read_result {
|
||||
Ok(0) => {
|
||||
// Connection closed by client. If any TC was read, parse for complete packets.
|
||||
// After that, break the outer loop.
|
||||
if current_write_idx > 0 {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.base.tc_buffer,
|
||||
self.base.tc_receiver.as_mut(),
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
)?;
|
||||
}
|
||||
break;
|
||||
}
|
||||
Ok(read_len) => {
|
||||
current_write_idx += read_len;
|
||||
// TC buffer is full, we must parse for complete packets now.
|
||||
if current_write_idx == self.base.tc_buffer.capacity() {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.base.tc_buffer,
|
||||
self.base.tc_receiver.as_mut(),
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
)?;
|
||||
current_write_idx = next_write_idx;
|
||||
}
|
||||
}
|
||||
Err(e) => match e.kind() {
|
||||
// As per [TcpStream::set_read_timeout] documentation, this should work for
|
||||
// both UNIX and Windows.
|
||||
std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut => {
|
||||
self.tc_handler.handle_tc_parsing(
|
||||
&mut self.base.tc_buffer,
|
||||
self.base.tc_receiver.as_mut(),
|
||||
&mut connection_result,
|
||||
current_write_idx,
|
||||
&mut next_write_idx,
|
||||
)?;
|
||||
current_write_idx = next_write_idx;
|
||||
|
||||
if !self.tm_handler.handle_tm_sending(
|
||||
&mut self.base.tm_buffer,
|
||||
self.base.tm_source.as_mut(),
|
||||
&mut connection_result,
|
||||
&mut stream,
|
||||
)? {
|
||||
// No TC read, no TM was sent, but the client has not disconnected.
|
||||
// Perform an inner delay to avoid burning CPU time.
|
||||
thread::sleep(self.base.inner_loop_delay);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
return Err(TcpTmtcError::Io(e));
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
self.tm_handler.handle_tm_sending(
|
||||
&mut self.base.tm_buffer,
|
||||
self.base.tm_source.as_mut(),
|
||||
&mut connection_result,
|
||||
&mut stream,
|
||||
)?;
|
||||
Ok(connection_result)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct TcpTmtcServerBase<TmError, TcError> {
|
||||
pub(crate) listener: TcpListener,
|
||||
pub(crate) inner_loop_delay: Duration,
|
||||
pub(crate) tm_source: Box<dyn TmPacketSource<Error = TmError>>,
|
||||
pub(crate) tm_buffer: Vec<u8>,
|
||||
pub(crate) tc_receiver: Box<dyn ReceivesTc<Error = TcError>>,
|
||||
pub(crate) tc_buffer: Vec<u8>,
|
||||
}
|
||||
|
||||
impl<TmError, TcError> TcpTmtcServerBase<TmError, TcError> {
|
||||
pub(crate) fn new(
|
||||
cfg: ServerConfig,
|
||||
tm_source: Box<dyn TmPacketSource<Error = TmError>>,
|
||||
tc_receiver: Box<dyn ReceivesTc<Error = TcError>>,
|
||||
) -> Result<Self, std::io::Error> {
|
||||
// Create a TCP listener bound to two addresses.
|
||||
let socket = Socket::new(Domain::IPV4, Type::STREAM, None)?;
|
||||
socket.set_reuse_address(cfg.reuse_addr)?;
|
||||
socket.set_reuse_port(cfg.reuse_port)?;
|
||||
let addr = (cfg.addr).into();
|
||||
socket.bind(&addr)?;
|
||||
socket.listen(128)?;
|
||||
Ok(Self {
|
||||
listener: socket.into(),
|
||||
inner_loop_delay: cfg.inner_loop_delay,
|
||||
tm_source,
|
||||
tm_buffer: vec![0; cfg.tm_buffer_size],
|
||||
tc_receiver,
|
||||
tc_buffer: vec![0; cfg.tc_buffer_size],
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn listener(&mut self) -> &mut TcpListener {
|
||||
&mut self.listener
|
||||
}
|
||||
|
||||
pub(crate) fn local_addr(&self) -> std::io::Result<SocketAddr> {
|
||||
self.listener.local_addr()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) mod tests {
|
||||
use std::sync::Mutex;
|
||||
|
||||
use alloc::{collections::VecDeque, sync::Arc, vec::Vec};
|
||||
|
||||
use crate::tmtc::{ReceivesTcCore, TmPacketSourceCore};
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct SyncTcCacher {
|
||||
pub(crate) tc_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
impl ReceivesTcCore for SyncTcCacher {
|
||||
type Error = ();
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut tc_queue = self.tc_queue.lock().expect("tc forwarder failed");
|
||||
tc_queue.push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct SyncTmSource {
|
||||
tm_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
|
||||
impl SyncTmSource {
|
||||
pub(crate) fn add_tm(&mut self, tm: &[u8]) {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failec");
|
||||
tm_queue.push_back(tm.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
impl TmPacketSourceCore for SyncTmSource {
|
||||
type Error = ();
|
||||
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failed");
|
||||
if !tm_queue.is_empty() {
|
||||
let next_vec = tm_queue.front().unwrap();
|
||||
if buffer.len() < next_vec.len() {
|
||||
panic!(
|
||||
"provided buffer too small, must be at least {} bytes",
|
||||
next_vec.len()
|
||||
);
|
||||
}
|
||||
let next_vec = tm_queue.pop_front().unwrap();
|
||||
buffer[0..next_vec.len()].copy_from_slice(&next_vec);
|
||||
return Ok(next_vec.len());
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
}
|
347
satrs-core/src/hal/std/tcp_spacepackets_server.rs
Normal file
347
satrs-core/src/hal/std/tcp_spacepackets_server.rs
Normal file
@ -0,0 +1,347 @@
|
||||
use delegate::delegate;
|
||||
use std::{
|
||||
io::Write,
|
||||
net::{SocketAddr, TcpListener, TcpStream},
|
||||
};
|
||||
|
||||
use alloc::boxed::Box;
|
||||
|
||||
use crate::{
|
||||
encoding::{ccsds::PacketIdLookup, parse_buffer_for_ccsds_space_packets},
|
||||
tmtc::{ReceivesTc, TmPacketSource},
|
||||
};
|
||||
|
||||
use super::tcp_server::{
|
||||
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
|
||||
};
|
||||
|
||||
/// Concrete [TcpTcParser] implementation for the [TcpSpacepacketsServer].
|
||||
pub struct SpacepacketsTcParser {
|
||||
packet_id_lookup: Box<dyn PacketIdLookup + Send>,
|
||||
}
|
||||
|
||||
impl SpacepacketsTcParser {
|
||||
pub fn new(packet_id_lookup: Box<dyn PacketIdLookup + Send>) -> Self {
|
||||
Self { packet_id_lookup }
|
||||
}
|
||||
}
|
||||
|
||||
impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for SpacepacketsTcParser {
|
||||
fn handle_tc_parsing(
|
||||
&mut self,
|
||||
tc_buffer: &mut [u8],
|
||||
tc_receiver: &mut (impl ReceivesTc<Error = TcError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
current_write_idx: usize,
|
||||
next_write_idx: &mut usize,
|
||||
) -> Result<(), TcpTmtcError<TmError, TcError>> {
|
||||
// Reader vec full, need to parse for packets.
|
||||
conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets(
|
||||
&mut tc_buffer[..current_write_idx],
|
||||
self.packet_id_lookup.as_ref(),
|
||||
tc_receiver.upcast_mut(),
|
||||
next_write_idx,
|
||||
)
|
||||
.map_err(|e| TcpTmtcError::TcError(e))?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Concrete [TcpTmSender] implementation for the [TcpSpacepacketsServer].
|
||||
#[derive(Default)]
|
||||
pub struct SpacepacketsTmSender {}
|
||||
|
||||
impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
|
||||
fn handle_tm_sending(
|
||||
&mut self,
|
||||
tm_buffer: &mut [u8],
|
||||
tm_source: &mut (impl TmPacketSource<Error = TmError> + ?Sized),
|
||||
conn_result: &mut ConnectionResult,
|
||||
stream: &mut TcpStream,
|
||||
) -> Result<bool, TcpTmtcError<TmError, TcError>> {
|
||||
let mut tm_was_sent = false;
|
||||
loop {
|
||||
// Write TM until TM source is exhausted. For now, there is no limit for the amount
|
||||
// of TM written this way.
|
||||
let read_tm_len = tm_source
|
||||
.retrieve_packet(tm_buffer)
|
||||
.map_err(|e| TcpTmtcError::TmError(e))?;
|
||||
|
||||
if read_tm_len == 0 {
|
||||
return Ok(tm_was_sent);
|
||||
}
|
||||
tm_was_sent = true;
|
||||
conn_result.num_sent_tms += 1;
|
||||
|
||||
stream.write_all(&tm_buffer[..read_tm_len])?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TCP TMTC server implementation for exchange of tightly stuffed
|
||||
/// [CCSDS space packets](https://public.ccsds.org/Pubs/133x0b2e1.pdf).
|
||||
///
|
||||
/// This serves only works if
|
||||
/// [CCSDS 133.0-B-2 space packets](https://public.ccsds.org/Pubs/133x0b2e1.pdf) are the only
|
||||
/// packet type being exchanged. It uses the CCSDS [spacepackets::PacketId] as the packet delimiter
|
||||
/// and start marker when parsing for packets. The user specifies a set of expected
|
||||
/// [spacepackets::PacketId]s as part of the server configuration for that purpose.
|
||||
///
|
||||
/// ## Example
|
||||
///
|
||||
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/tcp_servers.rs)
|
||||
/// also serves as the example application for this module.
|
||||
pub struct TcpSpacepacketsServer<TmError, TcError: 'static> {
|
||||
generic_server:
|
||||
TcpTmtcGenericServer<TmError, TcError, SpacepacketsTmSender, SpacepacketsTcParser>,
|
||||
}
|
||||
|
||||
impl<TmError: 'static, TcError: 'static> TcpSpacepacketsServer<TmError, TcError> {
|
||||
/// Create a new TCP TMTC server which exchanges CCSDS space packets.
|
||||
///
|
||||
/// ## Parameter
|
||||
///
|
||||
/// * `cfg` - Configuration of the server.
|
||||
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
|
||||
/// then sent back to the client.
|
||||
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be
|
||||
/// forwarded to this TC receiver.
|
||||
/// * `packet_id_lookup` - This lookup table contains the relevant packets IDs for packet
|
||||
/// parsing. This mechanism is used to have a start marker for finding CCSDS packets.
|
||||
pub fn new(
|
||||
cfg: ServerConfig,
|
||||
tm_source: Box<dyn TmPacketSource<Error = TmError>>,
|
||||
tc_receiver: Box<dyn ReceivesTc<Error = TcError>>,
|
||||
packet_id_lookup: Box<dyn PacketIdLookup + Send>,
|
||||
) -> Result<Self, TcpTmtcError<TmError, TcError>> {
|
||||
Ok(Self {
|
||||
generic_server: TcpTmtcGenericServer::new(
|
||||
cfg,
|
||||
SpacepacketsTcParser::new(packet_id_lookup),
|
||||
SpacepacketsTmSender::default(),
|
||||
tm_source,
|
||||
tc_receiver,
|
||||
)?,
|
||||
})
|
||||
}
|
||||
|
||||
delegate! {
|
||||
to self.generic_server {
|
||||
pub fn listener(&mut self) -> &mut TcpListener;
|
||||
|
||||
/// Can be used to retrieve the local assigned address of the TCP server. This is especially
|
||||
/// useful if using the port number 0 for OS auto-assignment.
|
||||
pub fn local_addr(&self) -> std::io::Result<SocketAddr>;
|
||||
|
||||
/// Delegation to the [TcpTmtcGenericServer::handle_next_connection] call.
|
||||
pub fn handle_next_connection(
|
||||
&mut self,
|
||||
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use core::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
#[allow(unused_imports)]
|
||||
use std::println;
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
|
||||
thread,
|
||||
};
|
||||
|
||||
use alloc::{boxed::Box, sync::Arc};
|
||||
use hashbrown::HashSet;
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, SerializablePusPacket},
|
||||
PacketId, SpHeader,
|
||||
};
|
||||
|
||||
use crate::hal::std::tcp_server::{
|
||||
tests::{SyncTcCacher, SyncTmSource},
|
||||
ServerConfig,
|
||||
};
|
||||
|
||||
use super::TcpSpacepacketsServer;
|
||||
|
||||
const TEST_APID_0: u16 = 0x02;
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0);
|
||||
const TEST_APID_1: u16 = 0x10;
|
||||
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1);
|
||||
|
||||
fn generic_tmtc_server(
|
||||
addr: &SocketAddr,
|
||||
tc_receiver: SyncTcCacher,
|
||||
tm_source: SyncTmSource,
|
||||
packet_id_lookup: HashSet<PacketId>,
|
||||
) -> TcpSpacepacketsServer<(), ()> {
|
||||
TcpSpacepacketsServer::new(
|
||||
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024),
|
||||
Box::new(tm_source),
|
||||
Box::new(tc_receiver),
|
||||
Box::new(packet_id_lookup),
|
||||
)
|
||||
.expect("TCP server generation failed")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_basic_tc_only() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let tm_source = SyncTmSource::default();
|
||||
let mut packet_id_lookup = HashSet::new();
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_0);
|
||||
let mut tcp_server = generic_tmtc_server(
|
||||
&auto_port_addr,
|
||||
tc_receiver.clone(),
|
||||
tm_source,
|
||||
packet_id_lookup,
|
||||
);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1);
|
||||
assert_eq!(conn_result.num_sent_tms, 0);
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let tc_0 = ping_tc.to_vec().expect("packet generation failed");
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.write_all(&tc_0)
|
||||
.expect("writing to TCP server failed");
|
||||
drop(stream);
|
||||
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that TC has arrived.
|
||||
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap();
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_multi_tc_multi_tm() {
|
||||
let auto_port_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
|
||||
// Add telemetry
|
||||
let mut total_tm_len = 0;
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true);
|
||||
let tm_0 = verif_tm.to_vec().expect("writing packet failed");
|
||||
total_tm_len += tm_0.len();
|
||||
tm_source.add_tm(&tm_0);
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 3, None, true);
|
||||
let tm_1 = verif_tm.to_vec().expect("writing packet failed");
|
||||
total_tm_len += tm_1.len();
|
||||
tm_source.add_tm(&tm_1);
|
||||
|
||||
// Set up server
|
||||
let mut packet_id_lookup = HashSet::new();
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_0);
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_1);
|
||||
let mut tcp_server = generic_tmtc_server(
|
||||
&auto_port_addr,
|
||||
tc_receiver.clone(),
|
||||
tm_source,
|
||||
packet_id_lookup,
|
||||
);
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(
|
||||
conn_result.num_received_tcs, 2,
|
||||
"wrong number of received TCs"
|
||||
);
|
||||
assert_eq!(conn_result.num_sent_tms, 2, "wrong number of sent TMs");
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.set_read_timeout(Some(Duration::from_millis(10)))
|
||||
.expect("setting reas timeout failed");
|
||||
|
||||
// Send telecommands
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let tc_0 = ping_tc.to_vec().expect("ping tc creation failed");
|
||||
stream
|
||||
.write_all(&tc_0)
|
||||
.expect("writing to TCP server failed");
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap();
|
||||
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true);
|
||||
let tc_1 = action_tc.to_vec().expect("action tc creation failed");
|
||||
stream
|
||||
.write_all(&tc_1)
|
||||
.expect("writing to TCP server failed");
|
||||
|
||||
// Done with writing.
|
||||
stream
|
||||
.shutdown(std::net::Shutdown::Write)
|
||||
.expect("shutting down write failed");
|
||||
let mut read_buf: [u8; 32] = [0; 32];
|
||||
let mut current_idx = 0;
|
||||
let mut read_len_total = 0;
|
||||
// Timeout ensures this does not block forever.
|
||||
while read_len_total < total_tm_len {
|
||||
let read_len = stream
|
||||
.read(&mut read_buf[current_idx..])
|
||||
.expect("read failed");
|
||||
current_idx += read_len;
|
||||
read_len_total += read_len;
|
||||
}
|
||||
drop(stream);
|
||||
assert_eq!(read_buf[..tm_0.len()], tm_0);
|
||||
assert_eq!(read_buf[tm_0.len()..tm_0.len() + tm_1.len()], tm_1);
|
||||
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that TC has arrived.
|
||||
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap();
|
||||
assert_eq!(tc_queue.len(), 2);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_1);
|
||||
}
|
||||
}
|
@ -1,4 +1,4 @@
|
||||
//! UDP server helper components
|
||||
//! Generic UDP TC server.
|
||||
use crate::tmtc::{ReceivesTc, ReceivesTcCore};
|
||||
use std::boxed::Box;
|
||||
use std::io::{Error, ErrorKind};
|
||||
@ -6,7 +6,8 @@ use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
|
||||
use std::vec;
|
||||
use std::vec::Vec;
|
||||
|
||||
/// This TC server helper can be used to receive raw PUS telecommands thorough a UDP interface.
|
||||
/// This UDP server can be used to receive CCSDS space packet telecommands or any other telecommand
|
||||
/// format.
|
||||
///
|
||||
/// It caches all received telecomands into a vector. The maximum expected telecommand size should
|
||||
/// be declared upfront. This avoids dynamic allocation during run-time. The user can specify a TC
|
||||
@ -19,7 +20,7 @@ use std::vec::Vec;
|
||||
/// ```
|
||||
/// use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
|
||||
/// use spacepackets::ecss::SerializablePusPacket;
|
||||
/// use satrs_core::hal::host::udp_server::UdpTcServer;
|
||||
/// use satrs_core::hal::std::udp_server::UdpTcServer;
|
||||
/// use satrs_core::tmtc::{ReceivesTc, ReceivesTcCore};
|
||||
/// use spacepackets::SpHeader;
|
||||
/// use spacepackets::ecss::tc::PusTcCreator;
|
||||
@ -51,9 +52,9 @@ use std::vec::Vec;
|
||||
/// .expect("Error sending PUS TC via UDP");
|
||||
/// ```
|
||||
///
|
||||
/// The [fsrc-example crate](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/fsrc-example)
|
||||
/// The [satrs-example crate](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/satrs-example)
|
||||
/// server code also includes
|
||||
/// [example code](https://egit.irs.uni-stuttgart.de/rust/fsrc-launchpad/src/branch/main/fsrc-example/src/bin/obsw/tmtc.rs)
|
||||
/// [example code](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/tmtc.rs#L67)
|
||||
/// on how to use this TC server. It uses the server to receive PUS telecommands on a specific port
|
||||
/// and then forwards them to a generic CCSDS packet receiver.
|
||||
pub struct UdpTcServer<E> {
|
||||
@ -140,7 +141,7 @@ impl<E: 'static> UdpTcServer<E> {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::hal::host::udp_server::{ReceiveResult, UdpTcServer};
|
||||
use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer};
|
||||
use crate::tmtc::ReceivesTcCore;
|
||||
use spacepackets::ecss::tc::PusTcCreator;
|
||||
use spacepackets::ecss::SerializablePusPacket;
|
@ -20,6 +20,8 @@ extern crate downcast_rs;
|
||||
#[cfg(any(feature = "std", test))]
|
||||
extern crate std;
|
||||
|
||||
pub mod cfdp;
|
||||
pub mod encoding;
|
||||
pub mod error;
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
|
||||
|
@ -72,12 +72,33 @@ pub trait ReceivesTcCore {
|
||||
/// Extension trait of [ReceivesTcCore] which allows downcasting by implementing [Downcast] and
|
||||
/// is also sendable.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait ReceivesTc: ReceivesTcCore + Downcast + Send {}
|
||||
pub trait ReceivesTc: ReceivesTcCore + Downcast + Send {
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast(&self) -> &dyn ReceivesTcCore<Error = Self::Error>;
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast_mut(&mut self) -> &mut dyn ReceivesTcCore<Error = Self::Error>;
|
||||
}
|
||||
|
||||
/// Blanket implementation to automatically implement [ReceivesTc] when the [alloc] feature
|
||||
/// is enabled.
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<T> ReceivesTc for T where T: ReceivesTcCore + Send + 'static {}
|
||||
impl<T> ReceivesTc for T
|
||||
where
|
||||
T: ReceivesTcCore + Send + 'static,
|
||||
{
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast(&self) -> &dyn ReceivesTcCore<Error = Self::Error> {
|
||||
self
|
||||
}
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast_mut(&mut self) -> &mut dyn ReceivesTcCore<Error = Self::Error> {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl_downcast!(ReceivesTc assoc Error);
|
||||
@ -92,3 +113,41 @@ pub trait ReceivesCcsdsTc {
|
||||
type Error;
|
||||
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error>;
|
||||
}
|
||||
|
||||
/// Generic trait for a TM packet source, with no restrictions on the type of TM.
|
||||
/// Implementors write the telemetry into the provided buffer and return the size of the telemetry.
|
||||
pub trait TmPacketSourceCore {
|
||||
type Error;
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error>;
|
||||
}
|
||||
|
||||
/// Extension trait of [TmPacketSourceCore] which allows downcasting by implementing [Downcast] and
|
||||
/// is also sendable.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait TmPacketSource: TmPacketSourceCore + Downcast + Send {
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast(&self) -> &dyn TmPacketSourceCore<Error = Self::Error>;
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast_mut(&mut self) -> &mut dyn TmPacketSourceCore<Error = Self::Error>;
|
||||
}
|
||||
|
||||
/// Blanket implementation to automatically implement [ReceivesTc] when the [alloc] feature
|
||||
/// is enabled.
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<T> TmPacketSource for T
|
||||
where
|
||||
T: TmPacketSourceCore + Send + 'static,
|
||||
{
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast(&self) -> &dyn TmPacketSourceCore<Error = Self::Error> {
|
||||
self
|
||||
}
|
||||
// Remove this once trait upcasting coercion has been implemented.
|
||||
// Tracking issue: https://github.com/rust-lang/rust/issues/65991
|
||||
fn upcast_mut(&mut self) -> &mut dyn TmPacketSourceCore<Error = Self::Error> {
|
||||
self
|
||||
}
|
||||
}
|
||||
|
241
satrs-core/tests/tcp_servers.rs
Normal file
241
satrs-core/tests/tcp_servers.rs
Normal file
@ -0,0 +1,241 @@
|
||||
//! This serves as both an integration test and an example application showcasing all major
|
||||
//! features of the TCP COBS server by performing following steps:
|
||||
//!
|
||||
//! 1. It defines both a TC receiver and a TM source which are [Sync].
|
||||
//! 2. A telemetry packet is inserted into the TM source. The packet will be handled by the
|
||||
//! TCP server after handling all TCs.
|
||||
//! 3. It instantiates the TCP server on localhost with automatic port assignment and assigns
|
||||
//! the TC receiver and TM source created previously.
|
||||
//! 4. It moves the TCP server to a different thread and calls the
|
||||
//! [TcpTmtcInCobsServer::handle_next_connection] call inside that thread
|
||||
//! 5. The main threads connects to the server, sends a test telecommand and then reads back
|
||||
//! the test telemetry insertd in to the TM source previously.
|
||||
use core::{
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
use std::{
|
||||
io::{Read, Write},
|
||||
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
|
||||
sync::Mutex,
|
||||
thread,
|
||||
};
|
||||
|
||||
use hashbrown::HashSet;
|
||||
use satrs_core::{
|
||||
encoding::cobs::encode_packet_with_cobs,
|
||||
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer, TcpTmtcInCobsServer},
|
||||
tmtc::{ReceivesTcCore, TmPacketSourceCore},
|
||||
};
|
||||
use spacepackets::{
|
||||
ecss::{tc::PusTcCreator, SerializablePusPacket},
|
||||
PacketId, SpHeader,
|
||||
};
|
||||
use std::{boxed::Box, collections::VecDeque, sync::Arc, vec::Vec};
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
struct SyncTcCacher {
|
||||
tc_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
impl ReceivesTcCore for SyncTcCacher {
|
||||
type Error = ();
|
||||
|
||||
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
|
||||
let mut tc_queue = self.tc_queue.lock().expect("tc forwarder failed");
|
||||
println!("Received TC: {:x?}", tc_raw);
|
||||
tc_queue.push_back(tc_raw.to_vec());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default, Clone)]
|
||||
struct SyncTmSource {
|
||||
tm_queue: Arc<Mutex<VecDeque<Vec<u8>>>>,
|
||||
}
|
||||
|
||||
impl SyncTmSource {
|
||||
pub(crate) fn add_tm(&mut self, tm: &[u8]) {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failec");
|
||||
tm_queue.push_back(tm.to_vec());
|
||||
}
|
||||
}
|
||||
|
||||
impl TmPacketSourceCore for SyncTmSource {
|
||||
type Error = ();
|
||||
|
||||
fn retrieve_packet(&mut self, buffer: &mut [u8]) -> Result<usize, Self::Error> {
|
||||
let mut tm_queue = self.tm_queue.lock().expect("locking tm queue failed");
|
||||
if !tm_queue.is_empty() {
|
||||
let next_vec = tm_queue.front().unwrap();
|
||||
if buffer.len() < next_vec.len() {
|
||||
panic!(
|
||||
"provided buffer too small, must be at least {} bytes",
|
||||
next_vec.len()
|
||||
);
|
||||
}
|
||||
println!("Sending and encoding TM: {:x?}", next_vec);
|
||||
let next_vec = tm_queue.pop_front().unwrap();
|
||||
buffer[0..next_vec.len()].copy_from_slice(&next_vec);
|
||||
return Ok(next_vec.len());
|
||||
}
|
||||
Ok(0)
|
||||
}
|
||||
}
|
||||
|
||||
const SIMPLE_PACKET: [u8; 5] = [1, 2, 3, 4, 5];
|
||||
const INVERTED_PACKET: [u8; 5] = [5, 4, 3, 4, 1];
|
||||
const AUTO_PORT_ADDR: SocketAddr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 0);
|
||||
|
||||
#[test]
|
||||
fn test_cobs_server() {
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
// Insert a telemetry packet which will be read back by the client at a later stage.
|
||||
tm_source.add_tm(&INVERTED_PACKET);
|
||||
let mut tcp_server = TcpTmtcInCobsServer::new(
|
||||
ServerConfig::new(AUTO_PORT_ADDR, Duration::from_millis(2), 1024, 1024),
|
||||
Box::new(tm_source),
|
||||
Box::new(tc_receiver.clone()),
|
||||
)
|
||||
.expect("TCP server generation failed");
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1, "No TC received");
|
||||
assert_eq!(conn_result.num_sent_tms, 1, "No TM received");
|
||||
// Signal the main thread we are done.
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
|
||||
// Send TC to server now.
|
||||
let mut encoded_buf: [u8; 16] = [0; 16];
|
||||
let mut current_idx = 0;
|
||||
encode_packet_with_cobs(&SIMPLE_PACKET, &mut encoded_buf, &mut current_idx);
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.write_all(&encoded_buf[..current_idx])
|
||||
.expect("writing to TCP server failed");
|
||||
// Done with writing.
|
||||
stream
|
||||
.shutdown(std::net::Shutdown::Write)
|
||||
.expect("shutting down write failed");
|
||||
let mut read_buf: [u8; 16] = [0; 16];
|
||||
let read_len = stream.read(&mut read_buf).expect("read failed");
|
||||
drop(stream);
|
||||
|
||||
// 1 byte encoding overhead, 2 sentinel bytes.
|
||||
assert_eq!(read_len, 8);
|
||||
assert_eq!(read_buf[0], 0);
|
||||
assert_eq!(read_buf[read_len - 1], 0);
|
||||
let decoded_len =
|
||||
cobs::decode_in_place(&mut read_buf[1..read_len]).expect("COBS decoding failed");
|
||||
assert_eq!(decoded_len, 5);
|
||||
// Skip first sentinel byte.
|
||||
assert_eq!(&read_buf[1..1 + INVERTED_PACKET.len()], &INVERTED_PACKET);
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that the packet was received and decoded successfully.
|
||||
let mut tc_queue = tc_receiver
|
||||
.tc_queue
|
||||
.lock()
|
||||
.expect("locking tc queue failed");
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), &SIMPLE_PACKET);
|
||||
drop(tc_queue);
|
||||
}
|
||||
|
||||
const TEST_APID_0: u16 = 0x02;
|
||||
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0);
|
||||
|
||||
#[test]
|
||||
fn test_ccsds_server() {
|
||||
let tc_receiver = SyncTcCacher::default();
|
||||
let mut tm_source = SyncTmSource::default();
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true);
|
||||
let tm_0 = verif_tm.to_vec().expect("tm generation failed");
|
||||
tm_source.add_tm(&tm_0);
|
||||
let mut packet_id_lookup = HashSet::new();
|
||||
packet_id_lookup.insert(TEST_PACKET_ID_0);
|
||||
let mut tcp_server = TcpSpacepacketsServer::new(
|
||||
ServerConfig::new(AUTO_PORT_ADDR, Duration::from_millis(2), 1024, 1024),
|
||||
Box::new(tm_source),
|
||||
Box::new(tc_receiver.clone()),
|
||||
Box::new(packet_id_lookup),
|
||||
)
|
||||
.expect("TCP server generation failed");
|
||||
let dest_addr = tcp_server
|
||||
.local_addr()
|
||||
.expect("retrieving dest addr failed");
|
||||
let conn_handled: Arc<AtomicBool> = Default::default();
|
||||
let set_if_done = conn_handled.clone();
|
||||
// Call the connection handler in separate thread, does block.
|
||||
thread::spawn(move || {
|
||||
let result = tcp_server.handle_next_connection();
|
||||
if result.is_err() {
|
||||
panic!("handling connection failed: {:?}", result.unwrap_err());
|
||||
}
|
||||
let conn_result = result.unwrap();
|
||||
assert_eq!(conn_result.num_received_tcs, 1);
|
||||
assert_eq!(conn_result.num_sent_tms, 1);
|
||||
set_if_done.store(true, Ordering::Relaxed);
|
||||
});
|
||||
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
|
||||
stream
|
||||
.set_read_timeout(Some(Duration::from_millis(10)))
|
||||
.expect("setting reas timeout failed");
|
||||
|
||||
// Send ping telecommand.
|
||||
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap();
|
||||
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
|
||||
let tc_0 = ping_tc.to_vec().expect("packet creation failed");
|
||||
stream
|
||||
.write_all(&tc_0)
|
||||
.expect("writing to TCP server failed");
|
||||
// Done with writing.
|
||||
stream
|
||||
.shutdown(std::net::Shutdown::Write)
|
||||
.expect("shutting down write failed");
|
||||
|
||||
// Now read all the telemetry from the server.
|
||||
let mut read_buf: [u8; 16] = [0; 16];
|
||||
let mut read_len_total = 0;
|
||||
// Timeout ensures this does not block forever.
|
||||
while read_len_total < tm_0.len() {
|
||||
let read_len = stream.read(&mut read_buf).expect("read failed");
|
||||
read_len_total += read_len;
|
||||
assert_eq!(read_buf[..read_len], tm_0);
|
||||
}
|
||||
drop(stream);
|
||||
|
||||
// A certain amount of time is allowed for the transaction to complete.
|
||||
for _ in 0..3 {
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
thread::sleep(Duration::from_millis(5));
|
||||
}
|
||||
}
|
||||
if !conn_handled.load(Ordering::Relaxed) {
|
||||
panic!("connection was not handled properly");
|
||||
}
|
||||
// Check that TC has arrived.
|
||||
let mut tc_queue = tc_receiver.tc_queue.lock().unwrap();
|
||||
assert_eq!(tc_queue.len(), 1);
|
||||
assert_eq!(tc_queue.pop_front().unwrap(), tc_0);
|
||||
}
|
@ -15,11 +15,13 @@ crossbeam-channel = "0.5"
|
||||
delegate = "0.10"
|
||||
zerocopy = "0.6"
|
||||
csv = "1"
|
||||
num_enum = "0.6"
|
||||
num_enum = "0.7"
|
||||
thiserror = "1"
|
||||
|
||||
[dependencies.satrs-core]
|
||||
# version = "0.1.0-alpha.0"
|
||||
path = "../satrs-core"
|
||||
|
||||
|
||||
[dependencies.satrs-mib]
|
||||
path = "../satrs-mib"
|
||||
|
@ -1,5 +1,5 @@
|
||||
use log::{info, warn};
|
||||
use satrs_core::hal::host::udp_server::{ReceiveResult, UdpTcServer};
|
||||
use satrs_core::hal::std::udp_server::{ReceiveResult, UdpTcServer};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::mpsc::{Receiver, SendError, Sender, TryRecvError};
|
||||
use std::thread;
|
||||
|
@ -1,7 +1,17 @@
|
||||
[package]
|
||||
name = "satrs-mib"
|
||||
version = "0.1.0"
|
||||
version = "0.1.0-alpha.0"
|
||||
edition = "2021"
|
||||
rust-version = "1.61"
|
||||
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
|
||||
description = """
|
||||
Helper crate of the sat-rs framework to build a mission information base (MIB) from the
|
||||
On-Board Software (OBSW) code directly."""
|
||||
homepage = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
|
||||
repository = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
|
||||
license = "Apache-2.0"
|
||||
keywords = ["no-std", "space", "aerospace"]
|
||||
categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-support", "embedded"]
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
@ -13,10 +23,14 @@ version = "1"
|
||||
optional = true
|
||||
|
||||
[dependencies.satrs-core]
|
||||
path = "../satrs-core"
|
||||
# version = "0.1.0-alpha.1"
|
||||
git = "https://egit.irs.uni-stuttgart.de/rust/sat-rs.git"
|
||||
branch = "main"
|
||||
# rev = "35e1f7a983f6535c5571186e361fe101d4306b89"
|
||||
|
||||
[dependencies.satrs-mib-codegen]
|
||||
path = "codegen"
|
||||
version = "0.1.0-alpha.0"
|
||||
|
||||
[dependencies.serde]
|
||||
version = "1"
|
||||
|
@ -1,7 +1,11 @@
|
||||
[package]
|
||||
name = "satrs-mib-codegen"
|
||||
version = "0.1.0"
|
||||
version = "0.1.0-alpha.0"
|
||||
edition = "2021"
|
||||
description = "satrs-mib proc macro implementation"
|
||||
homepage = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
|
||||
repository = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
|
||||
license = "Apache-2.0"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[lib]
|
||||
@ -16,7 +20,10 @@ quote = "1"
|
||||
proc-macro2 = "1"
|
||||
|
||||
[dependencies.satrs-core]
|
||||
path = "../../satrs-core"
|
||||
# version = "0.1.0-alpha.1"
|
||||
git = "https://egit.irs.uni-stuttgart.de/rust/sat-rs.git"
|
||||
branch = "main"
|
||||
# rev = "35e1f7a983f6535c5571186e361fe101d4306b89"
|
||||
|
||||
[dev-dependencies]
|
||||
trybuild = { version = "1", features = ["diff"] }
|
||||
|
1
satrs-mib/codegen/LICENSE-APACHE
Symbolic link
1
satrs-mib/codegen/LICENSE-APACHE
Symbolic link
@ -0,0 +1 @@
|
||||
../LICENSE-APACHE
|
1
satrs-mib/codegen/NOTICE
Symbolic link
1
satrs-mib/codegen/NOTICE
Symbolic link
@ -0,0 +1 @@
|
||||
../NOTICE
|
25
satrs-mib/release-checklist.md
Normal file
25
satrs-mib/release-checklist.md
Normal file
@ -0,0 +1,25 @@
|
||||
Checklist for new releases
|
||||
=======
|
||||
|
||||
# Pre-Release
|
||||
|
||||
1. Make sure any new modules are documented sufficiently enough and check docs with
|
||||
`cargo doc --all-features --open`.
|
||||
2. Bump version specifier in `Cargo.toml`.
|
||||
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
|
||||
`unreleased` section.
|
||||
4. Run `cargo test --all-features`.
|
||||
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
|
||||
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
|
||||
targets.
|
||||
|
||||
# Release
|
||||
|
||||
1. `cargo publish`
|
||||
|
||||
# Post-Release
|
||||
|
||||
1. Create a new annotaged tag and push it with `git tag -a satrs-mib-<version>` and
|
||||
`git push -u origin satrs-mib-<version>`
|
||||
2. Create a new release on `EGit` based on the tag.
|
||||
|
Reference in New Issue
Block a user