38 Commits

Author SHA1 Message Date
00bcc455a4 cargo fmt
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit
2024-03-29 16:22:09 +01:00
16ab198d12 Merge branch 'main' into simplify-low-level-pus-api
Some checks failed
Rust/sat-rs/pipeline/pr-main There was a failure building this commit
2024-03-29 15:49:04 +01:00
fd950c5a94 update all dependencies
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-03-29 15:47:34 +01:00
b45a219c6d simplify verification reporter core 2024-03-29 12:34:28 +01:00
977e29894b Merge pull request 'STM32 defmt + RTT support' (#144) from stm32-defmt-support into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #144
2024-03-29 12:34:02 +01:00
dd1417a368 Upgrade example to use defmt/RTT/probe-rs
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-28 23:47:07 +01:00
3195cf5111 update config.toml template file
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-28 23:06:16 +01:00
8280c70682 Merge pull request 'Framework to Library' (#143) from framework-to-library into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #143
2024-03-27 14:33:02 +01:00
19c5aa9b83 update rust book as well
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-27 14:17:59 +01:00
713b4e097b update the README 2024-03-27 14:14:45 +01:00
746b31ec5d Merge pull request 'satrs-example RTIC v2' (#142) from satrs-example-rtic-v2 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #142
2024-03-25 16:09:06 +01:00
2318cd4293 Update satrs-example for the STM32F3
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Update RTIC to v2
- Update Python client version
2024-03-25 14:26:07 +01:00
a6b57d3eb9 Merge pull request 'Update STM32F3 example' (#141) from update-stm32f3-example into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #141
2024-03-22 15:19:58 +01:00
bddd3132d4 added some more instructions for Python
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-22 13:18:20 +01:00
6a6ffba754 why have two files
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-03-22 13:09:27 +01:00
d27a41e4de Start updating the STM32F3 Discovery example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-22 13:08:01 +01:00
128df9a813 Merge pull request 'First version of asynchronix based mini simulator' (#139) from init-minisim into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #139
2024-03-11 10:41:24 +01:00
7387be3bc3 new request/reponse API
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-11 10:26:48 +01:00
d3fb504545 clean up manifest file
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-09 15:14:15 +01:00
ae8e39f626 First version of asynchronix based mini simulator
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
- Basic simulator with 3 devices
- Can be driven via a UDP interface
- Design allows to drive the simulation via different interface in the future
  by using Request/Reply messaging.
2024-03-09 15:11:11 +01:00
ab3d907d4e Merge pull request 'Refactor TMTC distribution modules' (#138) from ccsds-distrib-refactoring into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #138
2024-03-04 16:53:23 +01:00
3de5954898 Refactor TMTC distribution modules
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-04 16:26:34 +01:00
5600aa576c Merge pull request 'use generics for the PUS stack' (#134) from pus-stack-use-generics into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #134
2024-02-26 15:46:47 +01:00
88793cfa87 add some helper types
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-26 15:34:20 +01:00
223b637eb8 use generics for the PUS stack
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-26 15:18:15 +01:00
cf9b115e1e Merge pull request 'Refactored Verification Reporter Module' (#132) from refactor-verification-mod into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #132
2024-02-26 11:58:57 +01:00
eea9b11b39 refactored verification reporter
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Use generics instead of trait objects where applicable.
2024-02-26 11:41:42 +01:00
f21ab0017e Merge pull request 'fixed for scheduler' (#133) from scheduler-fixes into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #133
2024-02-26 11:15:50 +01:00
a7ca00317f cargo fmt
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-26 11:00:48 +01:00
75fda42f4f fixed for scheduler
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-02-26 10:53:33 +01:00
faf0f6f6c6 Merge pull request 'refactored event manager' (#131) from refactor-event-man into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #131
2024-02-23 14:31:48 +01:00
a690c7720d Refactored event manager
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-23 14:19:30 +01:00
b48b5b8caa Merge pull request 'bump example patch version' (#129) from prepare-example-release into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #129
2024-02-21 13:51:49 +01:00
238c3a8d43 bump example patch version
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-21 11:34:35 +01:00
de8c0bc13e Merge pull request 'Use released package versions again' (#128) from bump-example into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #128
2024-02-21 11:34:07 +01:00
012eb82f42 bump example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-21 11:22:16 +01:00
d26f6cbe27 Merge pull request 'sat-rs v0.2.0-rc.0' (#127) from prepare-satrs-release-candidate into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #127
2024-02-21 11:13:05 +01:00
82d3215761 changelog
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-21 11:08:22 +01:00
87 changed files with 43704 additions and 2593 deletions

5
.gitignore vendored
View File

@ -1,5 +1,8 @@
/target
target/
/Cargo.lock
output.log
/.idea/*
!/.idea/runConfigurations

View File

@ -4,6 +4,7 @@ members = [
"satrs",
"satrs-mib",
"satrs-example",
"satrs-minisim",
"satrs-shared",
]

View File

@ -8,12 +8,17 @@
sat-rs
=========
This is the repository of the sat-rs framework. Its primary goal is to provide re-usable components
This is the repository of the sat-rs library. Its primary goal is to provide re-usable components
to write on-board software for remote systems like rovers or satellites. It is specifically written
for the special requirements for these systems. You can find an overview of the project and the
link to the [more high-level sat-rs book](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
at the [IRS software projects website](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/).
This is early-stage software. Important features are missing. New releases
with breaking changes are released regularly, with all changes documented inside respective
changelog files. You should only use this library if your are willing to work in this
environment.
A lot of the architecture and general design considerations are based on the
[FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage
through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)

View File

@ -18,15 +18,19 @@ def generate_cov_report(open_report: bool, format: str, package: str):
out_path = "./target/debug/coverage"
if format == "lcov":
out_path = "./target/debug/lcov.info"
os.system(
grcov_cmd = (
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"-o {out_path}"
)
print(f"Running: {grcov_cmd}")
os.system(grcov_cmd)
if format == "lcov":
os.system(
lcov_cmd = (
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info"
)
print(f"Running: {lcov_cmd}")
os.system(lcov_cmd)
if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path)
@ -43,7 +47,7 @@ def main():
parser.add_argument(
"-p",
"--package",
choices=["satrs"],
choices=["satrs", "satrs-minisim"],
default="satrs",
help="Choose project to generate coverage for",
)

View File

@ -15,7 +15,7 @@ action commanding could look like.
2. Target ID and Action String based. The target ID is the same as in the first proposal, but
the unique action is identified by a string.
The framework provides an `ActionRequest` abstraction to model both of these cases.
The library provides an `ActionRequest` abstraction to model both of these cases.
## Commanding with ECSS PUS 8

View File

@ -20,7 +20,7 @@ components.
1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/host/udp_server/index.html).
UDP is already packet based which makes it an excellent fit for exchanging space packets.
2. [TCP TMTC Server Components](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/index.html).
TCP is a stream based protocol, so the framework provides building blocks to parse telemetry
TCP is a stream based protocol, so the library provides building blocks to parse telemetry
from an arbitrary bytestream. Two concrete implementations are provided:
- [TCP spacepackets server](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/struct.TcpSpacepacketsServer.html)
to parse tightly packed CCSDS Spacepackets.

View File

@ -1,13 +1,14 @@
# Framework Design
# Library Design
Satellites and space systems in general are complex systems with a wide range of requirements for
both the hardware and the software. Consequently, the general design of the framework is centered
both the hardware and the software. Consequently, the general design of the library is centered
around many light-weight components which try to impose as few restrictions as possible on how to
solve certain problems.
solve certain problems. This is also the reason why sat-rs is explicitely called a library
instead of a framework.
There are still a lot of common patterns and architectures across these systems where guidance
of how to solve a problem and a common structure would still be extremely useful to avoid pitfalls
which were already solved and to avoid boilerplate code. This framework tries to provide this
which were already solved and to avoid boilerplate code. This library tries to provide this
structure and guidance the following way:
1. Providing this book which explains the architecture and design patterns in respect to common
@ -18,7 +19,7 @@ structure and guidance the following way:
3. Providing a good test suite. This includes both unittests and integration tests. The integration
tests can also serve as smaller usage examples than the large `satrs-example` application.
This framework has special support for standards used in the space industry. This especially
This library has special support for standards used in the space industry. This especially
includes standards provided by Consultative Committee for Space Data Systems (CCSDS) and European
Cooperation for Space Standardization (ECSS). It does not enforce using any of those standards,
but it is always recommended to use some sort of standard for interoperability.
@ -30,10 +31,10 @@ Flying Laptop Project by the University of Stuttgart with Airbus Defence and Spa
It has flight heritage through the 2 mssions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/).
Therefore, a lot of the design concepts were ported more or less unchanged to the `sat-rs`
framework.
library.
FLP is a medium-size small satellite with a higher budget and longer development time than EIVE,
which allowed to build a highly reliable system while EIVE is a smaller 6U+ cubesat which had a
shorter development cycle and was built using cheaper COTS components. This framework also tries
shorter development cycle and was built using cheaper COTS components. This library also tries
to accumulate the knowledge of developing the OBSW and operating the satellite for both these
different systems and provide a solution for a wider range of small satellite systems.

View File

@ -1,6 +1,6 @@
# sat-rs Example Application
The `sat-rs` framework includes a monolithic example application which can be found inside
The `sat-rs` library includes a monolithic example application which can be found inside
the [`satrs-example`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example)
subdirectory of the repository. The primary purpose of this example application is to show how
the various components of the sat-rs framework could be used as part of a larger on-board

View File

@ -1,7 +1,7 @@
The sat-rs book
======
This book is the primary information resource for the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
This book is the primary information resource for the [sat-rs library](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
in addition to the regular API documentation. It contains the following resources:
1. Architecture informations and consideration which would exceeds the scope of the regular API.
@ -12,10 +12,15 @@ in addition to the regular API documentation. It contains the following resource
# Introduction
The primary goal of the sat-rs framework is to provide re-usable components
The primary goal of the sat-rs library is to provide re-usable components
to write on-board software for remote systems like rovers or satellites. It is specifically written
for the special requirements for these systems.
It should be noted that sat-rs is early-stage software. Important features are missing. New releases
with breaking changes are released regularly, with all changes documented inside respective
changelog files. You should only use this library if your are willing to work in this
environment.
A lot of the architecture and general design considerations are based on the
[FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage
through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)

View File

@ -0,0 +1,37 @@
[target.'cfg(all(target_arch = "arm", target_os = "none"))']
# uncomment ONE of these three option to make `cargo run` start a GDB session
# which option to pick depends on your system
# You can also replace openocd.gdb by jlink.gdb when using a J-Link.
# runner = "arm-none-eabi-gdb -q -x openocd.gdb"
# runner = "gdb-multiarch -q -x openocd.gdb"
# runner = "gdb -q -x openocd.gdb"
runner = "probe-rs run --chip STM32F303VCTx"
rustflags = [
"-C", "linker=flip-link",
# LLD (shipped with the Rust toolchain) is used as the default linker
"-C", "link-arg=-Tlink.x",
"-C", "link-arg=-Tdefmt.x",
# This is needed if your flash or ram addresses are not aligned to 0x10000 in memory.x
# See https://github.com/rust-embedded/cortex-m-quickstart/pull/95
"-C", "link-arg=--nmagic",
# if you run into problems with LLD switch to the GNU linker by commenting out
# this line
# "-C", "linker=arm-none-eabi-ld",
# if you need to link to pre-compiled C libraries provided by a C toolchain
# use GCC as the linker by commenting out both lines above and then
# uncommenting the three lines below
# "-C", "linker=arm-none-eabi-gcc",
# "-C", "link-arg=-Wl,-Tlink.x",
# "-C", "link-arg=-nostartfiles",
]
[build]
# comment out the following line if you intend to run unit tests on host machine
target = "thumbv7em-none-eabihf" # Cortex-M4F and Cortex-M7F (with FPU)
[env]
DEFMT_LOG = "info"

View File

@ -1,3 +1,4 @@
/target
/itm.txt
/.cargo/config*
/.vscode

View File

@ -1,66 +0,0 @@
{
/*
* Requires the Rust Language Server (RLS) and Cortex-Debug extensions
* https://marketplace.visualstudio.com/items?itemName=rust-lang.rust
* https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug
*/
"version": "0.2.0",
"configurations": [
{
/* Launches debug session for currently open example */
"type": "cortex-debug",
"request": "launch",
"name": "Debug",
"servertype": "openocd",
"cwd": "${workspaceRoot}",
"preLaunchTask": "cargo build",
"runToEntryPoint": "true",
"executable": "./target/thumbv7em-none-eabihf/debug/satrs-example-stm32f3-disco",
"preLaunchCommands": ["break rust_begin_unwind"],
"device": "STM32F303VCT6",
"configFiles": [
"${workspaceRoot}/.vscode/openocd-helpers.tcl",
"interface/stlink.cfg",
"target/stm32f3x.cfg"
],
"svdFile": "${env:HOME}/.svd/STM32F303.svd",
"swoConfig": {
"enabled": true,
"cpuFrequency": 8000000,
"swoFrequency": 2000000,
"source": "probe",
"decoders": [
{ "type": "console", "label": "ITM", "port": 0 }
]
}
},
{
/* Launches debug session for currently open example */
"type": "cortex-debug",
"request": "launch",
"name": "Release",
"servertype": "openocd",
"cwd": "${workspaceRoot}",
"preLaunchTask": "cargo build",
"runToEntryPoint": "true",
"executable": "./target/thumbv7em-none-eabihf/release/satrs-example-stm32f3-disco",
"preLaunchCommands": ["break rust_begin_unwind"],
"device": "STM32F303VCT6",
"configFiles": [
"${workspaceRoot}/.vscode/openocd-helpers.tcl",
"interface/stlink.cfg",
"target/stm32f3x.cfg"
],
"svdFile": "${env:HOME}/.svd/STM32F303.svd",
"swoConfig": {
"enabled": true,
"cpuFrequency": 8000000,
"swoFrequency": 2000000,
"source": "probe",
"decoders": [
{ "type": "console", "label": "ITM", "port": 0 }
]
}
}
]
}

View File

@ -1,3 +0,0 @@
{
"cortex-debug.gdbPath.linux": "gdb-multiarch"
}

View File

@ -13,9 +13,9 @@ dependencies = [
[[package]]
name = "atomic-polyfill"
version = "0.1.11"
version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ff7eb3f316534d83a8a2c3d1674ace8a5a71198eba31e2e2b597833f699b28"
checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4"
dependencies = [
"critical-section",
]
@ -55,20 +55,21 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bxcan"
version = "0.6.2"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b13b4b2ea9ab2ba924063ebb86ad895cb79f4a79bf90f27949eb20c335b30f9"
checksum = "40ac3d0c0a542d0ab5521211f873f62706a7136df415676f676d347e5a41dd80"
dependencies = [
"bitflags",
"nb 1.0.0",
"embedded-hal 0.2.7",
"nb 1.1.0",
"vcell",
]
[[package]]
name = "byteorder"
version = "1.4.3"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]]
name = "cast"
@ -87,31 +88,42 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "chrono"
version = "0.4.23"
version = "0.4.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f"
checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a"
dependencies = [
"num-integer",
"num-traits",
]
[[package]]
name = "cortex-m"
version = "0.7.6"
name = "cobs"
version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70858629a458fdfd39f9675c4dc309411f2a3f83bede76988d81bf1a0ecee9e0"
checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15"
[[package]]
name = "cobs"
version = "0.2.3"
source = "git+https://github.com/robamu/cobs.rs.git?branch=all_features#c70a7f30fd00a7cbdb7666dec12b437977385d40"
[[package]]
name = "cortex-m"
version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ec610d8f49840a5b376c69663b6369e71f4b34484b9b2eb29fb918d92516cb9"
dependencies = [
"bare-metal 0.2.5",
"bitfield",
"embedded-hal",
"critical-section",
"embedded-hal 0.2.7",
"volatile-register",
]
[[package]]
name = "cortex-m-rt"
version = "0.7.2"
version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6d3328b8b5534f0c90acd66b68950f2763b37e0173cac4d8b4937c4a80761f9"
checksum = "ee84e813d593101b1723e13ec38b6ab6abbdbaaa4546553f5395ed274079ddb1"
dependencies = [
"cortex-m-rt-macros",
]
@ -124,48 +136,44 @@ checksum = "f0f6f3e36f203cfedbc78b357fb28730aa2c6dc1ab060ee5c2405e843988d3c7"
dependencies = [
"proc-macro2",
"quote",
"syn",
"syn 1.0.109",
]
[[package]]
name = "cortex-m-rtic"
version = "1.1.3"
name = "cortex-m-semihosting"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6b82f1c39acd6c3a35c2013b6110c20f5bc534522791fabadeed49ccada2dce"
checksum = "c23234600452033cc77e4b761e740e02d2c4168e11dbf36ab14a0f58973592b0"
dependencies = [
"bare-metal 1.0.0",
"cortex-m",
"cortex-m-rtic-macros",
"heapless",
"rtic-core",
"rtic-monotonic",
"version_check",
]
[[package]]
name = "cortex-m-rtic-macros"
version = "1.1.5"
name = "crc"
version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e8e9645ef54bec1cf70ac33e9bf9566e6507ab5b41ae6baf3735662194e8607"
checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe"
dependencies = [
"proc-macro-error",
"proc-macro2",
"quote",
"rtic-syntax",
"syn",
"crc-catalog",
]
[[package]]
name = "crc-catalog"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
[[package]]
name = "critical-section"
version = "1.1.1"
version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52"
checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216"
[[package]]
name = "darling"
version = "0.14.2"
version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa"
checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391"
dependencies = [
"darling_core",
"darling_macro",
@ -173,26 +181,102 @@ dependencies = [
[[package]]
name = "darling_core"
version = "0.14.2"
version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f"
checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f"
dependencies = [
"fnv",
"ident_case",
"proc-macro2",
"quote",
"syn",
"syn 2.0.53",
]
[[package]]
name = "darling_macro"
version = "0.14.2"
version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e"
checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
dependencies = [
"darling_core",
"quote",
"syn",
"syn 2.0.53",
]
[[package]]
name = "defmt"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3939552907426de152b3c2c6f51ed53f98f448babd26f28694c95f5906194595"
dependencies = [
"bitflags",
"defmt-macros",
]
[[package]]
name = "defmt-brtt"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2f0ac3635d0c89d12b8101fcb44a7625f5f030a1c0491124b74467eb5a58a78"
dependencies = [
"critical-section",
"defmt",
]
[[package]]
name = "defmt-macros"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18bdc7a7b92ac413e19e95240e75d3a73a8d8e78aa24a594c22cbb4d44b4bbda"
dependencies = [
"defmt-parser",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 2.0.53",
]
[[package]]
name = "defmt-parser"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff4a5fefe330e8d7f31b16a318f9ce81000d8e35e69b93eae154d16d2278f70f"
dependencies = [
"thiserror",
]
[[package]]
name = "defmt-test"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290966e8c38f94b11884877242de876280d0eab934900e9642d58868e77c5df1"
dependencies = [
"cortex-m-rt",
"cortex-m-semihosting",
"defmt",
"defmt-test-macros",
]
[[package]]
name = "defmt-test-macros"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "984bc6eca246389726ac2826acc2488ca0fe5fcd6b8d9b48797021951d76a125"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
]
[[package]]
name = "delegate"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee5df75c70b95bd3aacc8e2fd098797692fb1d54121019c4de481e42f04c8a1"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
@ -214,6 +298,12 @@ dependencies = [
"void",
]
[[package]]
name = "embedded-hal"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "361a90feb7004eca4019fb28352a9465666b24f840f5c3cddf0ff13920590b89"
[[package]]
name = "embedded-time"
version = "0.12.1"
@ -225,25 +315,31 @@ dependencies = [
[[package]]
name = "enumset"
version = "1.0.12"
version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753"
checksum = "226c0da7462c13fb57e5cc9e0dc8f0635e7d27f276a3a7fd30054647f669007d"
dependencies = [
"enumset_derive",
]
[[package]]
name = "enumset_derive"
version = "0.6.1"
version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0"
checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af"
dependencies = [
"darling",
"proc-macro2",
"quote",
"syn",
"syn 2.0.53",
]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]]
name = "fnv"
version = "1.0.7"
@ -252,18 +348,42 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "fugit"
version = "0.3.6"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ab17bb279def6720d058cb6c052249938e7f99260ab534879281a95367a87e5"
checksum = "17186ad64927d5ac8f02c1e77ccefa08ccd9eaa314d5a4772278aa204a22f7e7"
dependencies = [
"gcd",
]
[[package]]
name = "gcd"
version = "2.2.0"
name = "futures-core"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4b1b088ad0a967aa29540456b82fc8903f854775d33f71e9709c4efb3dfbfd2"
checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]]
name = "futures-task"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]]
name = "futures-util"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [
"futures-core",
"futures-task",
"pin-project-lite",
"pin-utils",
]
[[package]]
name = "gcd"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d758ba1b47b00caf47f24925c0074ecb20d6dfcffe7f6d53395c0465674841a"
[[package]]
name = "generic-array"
@ -276,9 +396,9 @@ dependencies = [
[[package]]
name = "generic-array"
version = "0.14.6"
version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9"
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
@ -286,29 +406,26 @@ dependencies = [
[[package]]
name = "hash32"
version = "0.2.1"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67"
checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606"
dependencies = [
"byteorder",
]
[[package]]
name = "hashbrown"
version = "0.12.3"
version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
[[package]]
name = "heapless"
version = "0.7.16"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743"
checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad"
dependencies = [
"atomic-polyfill",
"hash32",
"rustc_version 0.4.0",
"spin",
"stable_deref_trait",
]
@ -320,41 +437,14 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]]
name = "indexmap"
version = "1.9.2"
version = "2.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399"
checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26"
dependencies = [
"autocfg",
"equivalent",
"hashbrown",
]
[[package]]
name = "itm_logger"
version = "0.1.3-pre.0"
dependencies = [
"cortex-m",
"log",
]
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "lsm303dlhc"
version = "0.2.0"
@ -362,7 +452,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5d1a5c290951321d1b0d4a40edd828537de9889134a0e67c5146542ae57706"
dependencies = [
"cast",
"embedded-hal",
"embedded-hal 0.2.7",
"generic-array 0.11.2",
]
@ -372,7 +462,7 @@ version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc4010833aea396656c2f91ee704d51a6f1329ec2ab56ffd00bfd56f7481ea94"
dependencies = [
"generic-array 0.14.6",
"generic-array 0.14.7",
]
[[package]]
@ -381,14 +471,14 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f"
dependencies = [
"nb 1.0.0",
"nb 1.1.0",
]
[[package]]
name = "nb"
version = "1.0.0"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "546c37ac5d9e56f55e73b677106873d9d9f5190605e41a856503623648488cae"
checksum = "8d5439c4ad607c3c23abf66de8c8bf57ba8adcd1f129e699851a6e43935d339d"
[[package]]
name = "num"
@ -414,19 +504,18 @@ dependencies = [
[[package]]
name = "num-integer"
version = "0.1.45"
version = "0.1.46"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9"
checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
dependencies = [
"autocfg",
"num-traits",
]
[[package]]
name = "num-iter"
version = "0.1.43"
version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252"
checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9"
dependencies = [
"autocfg",
"num-integer",
@ -446,27 +535,60 @@ dependencies = [
[[package]]
name = "num-traits"
version = "0.2.15"
version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd"
checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a"
dependencies = [
"autocfg",
]
[[package]]
name = "panic-itm"
version = "0.4.2"
name = "num_enum"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d577d97d1b31268087b6dddf2470e6794ef5eee87d9dca7fcd0481695391a4c"
checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
]
[[package]]
name = "panic-probe"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa6fa5645ef5a760cd340eaa92af9c1ce131c8c09e7f8926d8a24b59d26652b9"
dependencies = [
"cortex-m",
"defmt",
]
[[package]]
name = "paste"
version = "1.0.11"
version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba"
checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]]
name = "proc-macro-error"
@ -477,7 +599,7 @@ dependencies = [
"proc-macro-error-attr",
"proc-macro2",
"quote",
"syn",
"syn 1.0.109",
"version_check",
]
@ -494,31 +616,54 @@ dependencies = [
[[package]]
name = "proc-macro2"
version = "1.0.49"
version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5"
checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.23"
version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b"
checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rtcc"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3623619ce77c09a7d87cf7c61c5c887b9c7dee8805f66af6c4aa5824be4d9930"
checksum = "f4fbd0d5bed2b76e27a7ef872568b34072c1af94c277cd52c17a89d54673b3fe"
dependencies = [
"chrono",
]
[[package]]
name = "rtic"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c443db16326376bdd64377da268f6616d5f804aba8ce799bac7d1f7f244e9d51"
dependencies = [
"atomic-polyfill",
"bare-metal 1.0.0",
"cortex-m",
"critical-section",
"rtic-core",
"rtic-macros",
]
[[package]]
name = "rtic-common"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0786b50b81ef9d2a944a000f60405bb28bf30cd45da2d182f3fe636b2321f35c"
dependencies = [
"critical-section",
]
[[package]]
name = "rtic-core"
version = "1.0.0"
@ -526,21 +671,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9369355b04d06a3780ec0f51ea2d225624db777acbc60abd8ca4832da5c1a42"
[[package]]
name = "rtic-monotonic"
version = "1.0.0"
name = "rtic-macros"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb8b0b822d1a366470b9cea83a1d4e788392db763539dc4ba022bcc787fece82"
[[package]]
name = "rtic-syntax"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ad3ae243dd8d0a1b064615f664d4fa7e63929939074c564cbe5efdc4c503065"
checksum = "54053598ea24b1b74937724e366558412a1777eb2680b91ef646db540982789a"
dependencies = [
"indexmap",
"proc-macro-error",
"proc-macro2",
"quote",
"syn",
"syn 2.0.53",
]
[[package]]
name = "rtic-monotonics"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "058c2397dbd5bb4c5650a0e368c3920953e458805ff5097a0511b8147b3619d7"
dependencies = [
"atomic-polyfill",
"cfg-if",
"cortex-m",
"embedded-hal 1.0.0",
"fugit",
"rtic-time",
]
[[package]]
name = "rtic-time"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75b232e7aebc045cfea81cdd164bc2727a10aca9a4568d406d0a5661cdfd0f19"
dependencies = [
"critical-section",
"futures-util",
"rtic-common",
]
[[package]]
@ -558,31 +723,56 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [
"semver 1.0.16",
"semver 1.0.22",
]
[[package]]
name = "sat-rs-example-stm32f-disco"
name = "satrs"
version = "0.2.0-rc.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8cb19cba46a45047ff0879ebfbf9d6ae1c5b2e0e38b2e08760b10a441d4dae6"
dependencies = [
"cobs 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"crc",
"delegate",
"num-traits",
"num_enum",
"paste",
"satrs-shared",
"smallvec",
"spacepackets",
]
[[package]]
name = "satrs-example-stm32f3-disco"
version = "0.1.0"
dependencies = [
"cobs 0.2.3 (git+https://github.com/robamu/cobs.rs.git?branch=all_features)",
"cortex-m",
"cortex-m-rt",
"cortex-m-rtic",
"embedded-hal",
"cortex-m-semihosting",
"defmt",
"defmt-brtt",
"defmt-test",
"embedded-hal 0.2.7",
"enumset",
"heapless",
"itm_logger",
"panic-itm",
"panic-probe",
"rtic",
"rtic-monotonics",
"satrs",
"stm32f3-discovery",
"stm32f3xx-hal",
"systick-monotonic",
]
[[package]]
name = "scopeguard"
version = "1.1.0"
name = "satrs-shared"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd"
checksum = "75a402ba556a7f5eef707035b45e64a3259b09674311e98697f3dd0508a1bf51"
dependencies = [
"spacepackets",
]
[[package]]
name = "semver"
@ -595,9 +785,9 @@ dependencies = [
[[package]]
name = "semver"
version = "1.0.16"
version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a"
checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca"
[[package]]
name = "semver-parser"
@ -607,17 +797,28 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]]
name = "slice-group-by"
version = "0.3.0"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec"
checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7"
[[package]]
name = "spin"
version = "0.9.4"
name = "smallvec"
version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09"
checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "spacepackets"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28246ae2451af240c3e3ff3c51363c7b6ad565ca6aa9bad23b8c725687c485e1"
dependencies = [
"lock_api",
"chrono",
"crc",
"delegate",
"num-traits",
"num_enum",
"zerocopy",
]
[[package]]
@ -639,9 +840,9 @@ dependencies = [
[[package]]
name = "stm32f3"
version = "0.14.0"
version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "265cda62ac13307414de4aca58dbbbd8038ddba85cffbb335823aa216f2e3200"
checksum = "b28b37228ef3fa47956af38c6abd756e912f244c1657f14e66d42fc8d74ea96f"
dependencies = [
"bare-metal 1.0.0",
"cortex-m",
@ -651,7 +852,8 @@ dependencies = [
[[package]]
name = "stm32f3-discovery"
version = "0.8.0-pre.0"
version = "0.8.0-alpha.0"
source = "git+https://github.com/robamu/stm32f3-discovery?branch=complete-dma-update-hal#5ccacae07ceff02d7d3649df67a6a0ba2a144752"
dependencies = [
"accelerometer",
"cortex-m",
@ -663,20 +865,20 @@ dependencies = [
[[package]]
name = "stm32f3xx-hal"
version = "0.9.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e422c5c044e8f3a068b1e14b83c071449e27c9d4bc0e24f972b552d79f2be03"
version = "0.11.0-alpha.0"
source = "git+https://github.com/robamu/stm32f3xx-hal?branch=complete-dma-update#04fc76b7912649c84b57bd0ab803ea3ccf2aadae"
dependencies = [
"bare-metal 1.0.0",
"bxcan",
"cfg-if",
"cortex-m",
"cortex-m-rt",
"critical-section",
"embedded-dma",
"embedded-hal",
"embedded-hal 0.2.7",
"embedded-time",
"enumset",
"nb 1.0.0",
"nb 1.1.0",
"num-traits",
"paste",
"rtcc",
"slice-group-by",
@ -691,14 +893,14 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90a4adc8cbd1726249b161898e48e0f3f1ce74d34dc784cbbc98fba4ed283fbf"
dependencies = [
"embedded-hal",
"embedded-hal 0.2.7",
]
[[package]]
name = "syn"
version = "1.0.107"
version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5"
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [
"proc-macro2",
"quote",
@ -706,27 +908,47 @@ dependencies = [
]
[[package]]
name = "systick-monotonic"
version = "1.0.1"
name = "syn"
version = "2.0.53"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67fb822d5c615a0ae3a4795ee5b1d06381c7faf488d861c0a4fa8e6a88d5ff84"
checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032"
dependencies = [
"cortex-m",
"fugit",
"rtic-monotonic",
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thiserror"
version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
]
[[package]]
name = "typenum"
version = "1.16.0"
version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]]
name = "unicode-ident"
version = "1.0.6"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "usb-device"
@ -754,9 +976,30 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
[[package]]
name = "volatile-register"
version = "0.2.1"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ee8f19f9d74293faf70901bc20ad067dc1ad390d2cbf1e3f75f721ffee908b6"
checksum = "de437e2a6208b014ab52972a27e59b33fa2920d3e00fe05026167a1c509d19cc"
dependencies = [
"vcell",
]
[[package]]
name = "zerocopy"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
dependencies = [
"byteorder",
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
]

View File

@ -2,58 +2,81 @@
name = "satrs-example-stm32f3-disco"
version = "0.1.0"
edition = "2021"
default-run = "satrs-example-stm32f3-disco"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
cortex-m = "0.7"
cortex-m = { version = "0.7", features = ["critical-section-single-core"] }
cortex-m-rt = "0.7"
embedded-hal = "0.2.6"
cortex-m-rtic = "1.0"
enumset = "1.0"
heapless = "0.7"
systick-monotonic = "1.0"
defmt = "0.3"
defmt-brtt = { version = "0.1", default-features = false, features = ["rtt"] }
panic-probe = { version = "0.3", features = ["print-defmt"] }
embedded-hal = "0.2.7"
cortex-m-semihosting = "0.5.0"
enumset = "1"
heapless = "0.8"
[dependencies.rtic]
version = "2"
features = ["thumbv7-backend"]
[dependencies.rtic-monotonics]
version = "1"
features = ["cortex-m-systick"]
[dependencies.cobs]
git = "https://github.com/robamu/cobs.rs.git"
branch = "all_features"
default-features = false
[dependencies.panic-itm]
version = "0.4"
[dependencies.itm_logger]
git = "https://github.com/robamu/itm_logger.rs.git"
branch = "all_features"
version = "0.1.3-alpha.0"
[dependencies.stm32f3xx-hal]
git = "https://github.com/robamu/stm32f3xx-hal"
version = "0.10.0-alpha.0"
version = "0.11.0-alpha.0"
features = ["stm32f303xc", "rt", "enumset"]
branch = "all_features"
branch = "complete-dma-update"
# Can be used in workspace to develop and update HAL
# path = "../stm32f3xx-hal"
[dependencies.stm32f3-discovery]
git = "https://github.com/robamu/stm32f3-discovery"
version = "0.8.0-alpha.0"
branch = "all_features"
branch = "complete-dma-update-hal"
# Can be used in workspace to develop and update BSP
# path = "../stm32f3-discovery"
[dependencies.satrs-core]
git = "https://egit.irs.uni-stuttgart.de/rust/satrs-core.git"
version = "0.1.0-alpha.0"
[dependencies.satrs]
version = "0.2.0-rc.0"
default-features = false
# this lets you use `cargo fix`!
# [[bin]]
# name = "stm32f3-blinky"
# test = false
# bench = false
[dev-dependencies]
defmt-test = "0.3"
# cargo test
[profile.test]
codegen-units = 1
debug = 2
debug-assertions = true # <-
incremental = false
opt-level = "s" # <-
overflow-checks = true # <-
# cargo build/run --release
[profile.release]
codegen-units = 1 # better optimizations
debug = true # symbols are nice and they don't increase the size on Flash
lto = true # better optimizations
codegen-units = 1
debug = 2
debug-assertions = false # <-
incremental = false
lto = 'fat'
opt-level = "s" # <-
overflow-checks = false # <-
# cargo test --release
[profile.bench]
codegen-units = 1
debug = 2
debug-assertions = false # <-
incremental = false
lto = 'fat'
opt-level = "s" # <-
overflow-checks = false # <-

View File

@ -2,15 +2,25 @@ sat-rs example for the STM32F3-Discovery board
=======
This example application shows how the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad)
can be used on an embedded target. It also shows how a relatively simple OBSW could be built when no
standard runtime is available. It uses [RTIC](https://rtic.rs/1/book/en/) as the concurrency
framework.
can be used on an embedded target.
It also shows how a relatively simple OBSW could be built when no standard runtime is available.
It uses [RTIC](https://rtic.rs/1/book/en/) as the concurrency framework and the
[defmt](https://defmt.ferrous-systems.com/) framework for logging.
The STM32F3-Discovery device was picked because it is a cheap Cortex-M4 based device which is also
used by the [Rust Embedded Book](https://docs.rust-embedded.org/book/intro/hardware.html) and the
[Rust Discovery](https://docs.rust-embedded.org/discovery/f3discovery/) book as an introduction
to embedded Rust.
## Pre-Requisites
Make sure the following tools are installed:
1. [`probe-rs`](https://probe.rs/): Application used to flash and debug the MCU.
2. Optional and recommended: [VS Code](https://code.visualstudio.com/) with
[probe-rs plugin](https://marketplace.visualstudio.com/items?itemName=probe-rs.probe-rs-debugger)
for debugging.
## Preparing Rust and the repository
Building an application requires the `thumbv7em-none-eabihf` cross-compiler toolchain.
@ -40,15 +50,23 @@ you can simply build the application with
cargo build
```
## Flashing and Debugging from the command line
## Flashing from the command line
TODO
You can flash the application from the command line using `probe-rs`:
```sh
probe-rs run --chip STM32F303VCTx
```
## Debugging with VS Code
The STM32F3-Discovery comes with an on-board ST-Link so all that is required to flash and debug
the board is a Mini-USB cable. The code in this repository was debugged using `openocd`
and the VS Code [`Cortex-Debug` plugin](https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug).
the board is a Mini-USB cable. The code in this repository was debugged using [`probe-rs`](https://probe.rs/docs/tools/debuggerA)
and the VS Code [`probe-rs` plugin](https://marketplace.visualstudio.com/items?itemName=probe-rs.probe-rs-debugger).
Make sure to install this plugin first.
Sample configuration files are provided inside the `vscode` folder.
Use `cp vscode .vscode -r` to use them for your project.
Some sample configuration files for VS Code were provided as well. You can simply use `Run` and `Debug`
to automatically rebuild and flash your application.
@ -56,20 +74,32 @@ to automatically rebuild and flash your application.
The `tasks.json` and `launch.json` files are generic and you can use them immediately by opening
the folder in VS code or adding it to a workspace.
If you would like to use a custom GDB application, you can specify the gdb binary in the following
configuration variables in your `settings.json`:
- `"cortex-debug.gdbPath"`
- `"cortex-debug.gdbPath.linux"`
- `"cortex-debug.gdbPath.windows"`
- `"cortex-debug.gdbPath.osx"`
## Commanding with Python
When the SW is running on the Discovery board, you can command the MCU via a serial interface,
using COBS encoded CCSDS packets.
TODO:
- How and where to connect serial interface on the MCU
- How to set up Python venv (or at least strongly recommend it) and install deps
- How to copy `def_tmtc_conf.json` to `tmtc_conf.json` and adapt it for custom serial port
using COBS encoded PUS packets.
It is recommended to use a virtual environment to do this. To set up one in the command line,
you can use `python3 -m venv venv` on Unix systems or `py -m venv venv` on Windows systems.
After doing this, you can check the [venv tutorial](https://docs.python.org/3/tutorial/venv.html)
on how to activate the environment and then use the following command to install the required
dependency:
```sh
pip install -r requirements.txt
```
The packets are exchanged using a dedicated serial interface. You can use any generic USB-to-UART
converter device with the TX pin connected to the PA3 pin and the RX pin connected to the PA2 pin.
A default configuration file for the python application is provided and can be used by running
```sh
cp def_tmtc_conf.json tmtc_conf.json
```
After that, you can for example send a ping to the MCU using the following command
```sh
./main.py -p /ping
```

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +0,0 @@
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
fn main() {
// Put the linker script somewhere the linker can find it
let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
File::create(out.join("memory.x"))
.unwrap()
.write_all(include_bytes!("memory.x"))
.unwrap();
println!("cargo:rustc-link-search={}", out.display());
// Only re-run the build script when memory.x is changed,
// instead of when any part of the source code changes.
println!("cargo:rerun-if-changed=memory.x");
}

View File

@ -24,7 +24,9 @@ break main
# # send captured ITM to the file itm.fifo
# # (the microcontroller SWO pin must be connected to the programmer SWO pin)
# # 8000000 must match the core clock frequency
monitor tpiu config internal itm.txt uart off 8000000
# # 2000000 is the frequency of the SWO pin. This was added for newer
# openocd versions like v0.12.0.
# monitor tpiu config internal itm.txt uart off 8000000 2000000
# # OR: make the microcontroller SWO pin output compatible with UART (8N1)
# # 8000000 must match the core clock frequency
@ -32,7 +34,7 @@ monitor tpiu config internal itm.txt uart off 8000000
# monitor tpiu config external uart off 8000000 2000000
# # enable ITM port 0
monitor itm port 0 on
# monitor itm port 0 on
load

View File

@ -1,4 +1,5 @@
/venv
/.tmtc-history.txt
/log
/.idea/*
!/.idea/runConfigurations

View File

@ -1,4 +1,4 @@
{
"com_if": "serial_cobs",
"serial_baudrate": 115200
}
}

185
satrs-example-stm32f3-disco/pyclient/main.py Normal file → Executable file
View File

@ -1,39 +1,40 @@
#!/usr/bin/env python3
"""Example client for the sat-rs example application"""
import enum
import struct
import logging
import sys
import time
from typing import Optional, cast
from typing import Any, Optional, cast
from prompt_toolkit.history import FileHistory, History
from spacepackets.ecss.tm import CdsShortTimestamp
import tmtccmd
from spacepackets.ecss import PusTelemetry, PusTelecommand, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm
from tmtccmd import CcsdsTmtcBackend, TcHandlerBase, ProcedureParamsWrapper
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest
from tmtccmd.core.ccsds_backend import QueueWrapper
from tmtccmd.logging import add_colorlog_console_logger
from tmtccmd.pus import VerificationWrapper
from tmtccmd.tm import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com_if import ComInterface
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com import ComInterface
from tmtccmd.config import (
CmdTreeNode,
default_json_path,
SetupParams,
TmTcCfgHookBase,
TmtcDefinitionWrapper,
CoreServiceList,
OpCodeEntry,
HookBase,
params_to_procedure_conversion,
)
from tmtccmd.config.com_if import SerialCfgWrapper
from tmtccmd.config.com import SerialCfgWrapper
from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper
from tmtccmd.logging import get_console_logger
from tmtccmd.logging.pus import (
RegularTmtcLogWrapper,
RawTmtcTimedLogWrapper,
TimedLogWhen,
)
from tmtccmd.tc import (
from tmtccmd.tmtc import (
TcQueueEntryType,
ProcedureWrapper,
TcProcedureType,
@ -41,27 +42,26 @@ from tmtccmd.tc import (
SendCbParams,
DefaultPusQueueHelper,
)
from tmtccmd.tm.pus_5_event import Service5Tm
from tmtccmd.pus.s5_fsfw_event import Service5Tm
from tmtccmd.util import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT
from tmtccmd.util.tmtc_printer import FsfwTmTcPrinter
LOGGER = get_console_logger()
_LOGGER = logging.getLogger()
EXAMPLE_PUS_APID = 0x02
class SatRsConfigHook(TmTcCfgHookBase):
class SatRsConfigHook(HookBase):
def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path=json_cfg_path)
def assign_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com_if import (
def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com import (
create_com_interface_default,
create_com_interface_cfg_default,
)
assert self.cfg_path is not None
cfg = create_com_interface_cfg_default(
com_if_key=com_if_key,
json_cfg_path=self.cfg_path,
@ -76,35 +76,14 @@ class SatRsConfigHook(TmTcCfgHookBase):
cfg.serial_cfg.serial_timeout = 0.5
return create_com_interface_default(cfg)
def get_tmtc_definitions(self) -> TmtcDefinitionWrapper:
from tmtccmd.config.globals import get_default_tmtc_defs
def get_command_definitions(self) -> CmdTreeNode:
"""This function should return the root node of the command definition tree."""
return create_cmd_definition_tree()
defs = get_default_tmtc_defs()
srv_5 = OpCodeEntry()
srv_5.add("0", "Event Test")
defs.add_service(
name=CoreServiceList.SERVICE_5.value,
info="PUS Service 5 Event",
op_code_entry=srv_5,
)
srv_17 = OpCodeEntry()
srv_17.add("0", "Ping Test")
defs.add_service(
name=CoreServiceList.SERVICE_17_ALT,
info="PUS Service 17 Test",
op_code_entry=srv_17,
)
srv_3 = OpCodeEntry()
defs.add_service(
name=CoreServiceList.SERVICE_3,
info="PUS Service 3 Housekeeping",
op_code_entry=srv_3,
)
return defs
def perform_mode_operation(self, tmtc_backend: CcsdsTmtcBackend, mode: int):
LOGGER.info("Mode operation hook was called")
pass
def get_cmd_history(self) -> Optional[History]:
"""Optionlly return a history class for the past command paths which will be used
when prompting a command path from the user in CLI mode."""
return FileHistory(".tmtc-history.txt")
def get_object_ids(self) -> ObjectIdDictT:
from tmtccmd.config.objects import get_core_object_ids
@ -112,74 +91,75 @@ class SatRsConfigHook(TmTcCfgHookBase):
return get_core_object_ids()
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
root_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
return root_node
class PusHandler(SpecificApidHandlerBase):
def __init__(
self,
file_logger: logging.Logger,
verif_wrapper: VerificationWrapper,
printer: FsfwTmTcPrinter,
raw_logger: RawTmtcTimedLogWrapper,
):
super().__init__(EXAMPLE_PUS_APID, None)
self.printer = printer
self.file_logger = file_logger
self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: any):
def handle_tm(self, packet: bytes, _user_args: Any):
time_reader = CdsShortTimestamp.empty()
try:
tm_packet = PusTelemetry.unpack(packet)
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
except ValueError as e:
LOGGER.warning("Could not generate PUS TM object from raw data")
LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
_LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e
service = tm_packet.service
dedicated_handler = False
service = pus_tm.service
tm_packet = None
if service == 1:
tm_packet = Service1Tm.unpack(data=packet, params=UnpackParams(1, 2))
tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(time_reader, 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
LOGGER.info(
_LOGGER.info(
f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] "
f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}"
)
LOGGER.warning(
_LOGGER.warning(
f"No matching telecommand found for {tm_packet.tc_req_id}"
)
else:
self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res)
dedicated_handler = True
if service == 3:
LOGGER.info("No handling for HK packets implemented")
LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet)
_LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, CdsShortTimestamp.empty())
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:]
dedicated_handler = True
_LOGGER.info("received JSON string: " + json_str.decode("utf-8"))
if service == 5:
tm_packet = Service5Tm.unpack(packet)
tm_packet = Service5Tm.unpack(packet, time_reader)
if service == 17:
tm_packet = Service17Tm.unpack(packet)
dedicated_handler = True
tm_packet = Service17Tm.unpack(packet, time_reader)
if tm_packet.subservice == 2:
self.printer.file_logger.info("Received Ping Reply TM[17,2]")
LOGGER.info("Received Ping Reply TM[17,2]")
_LOGGER.info("Received Ping Reply TM[17,2]")
else:
self.printer.file_logger.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
LOGGER.info(
_LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
if tm_packet is None:
LOGGER.info(
_LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTelemetry.unpack(packet)
self.raw_logger.log_tm(tm_packet)
if not dedicated_handler and tm_packet is not None:
self.printer.handle_long_tm_print(packet_if=tm_packet, info_if=tm_packet)
tm_packet = PusTelemetry.unpack(packet, time_reader)
self.raw_logger.log_tm(pus_tm)
def make_addressable_id(target_id: int, unique_id: int) -> bytes:
@ -198,8 +178,11 @@ class TcHandler(TcHandlerBase):
self.seq_count_provider = seq_count_provider
self.verif_wrapper = verif_wrapper
self.queue_helper = DefaultPusQueueHelper(
queue_wrapper=None,
queue_wrapper=QueueWrapper.empty(),
tc_sched_timestamp_len=7,
seq_cnt_provider=seq_count_provider,
pus_verificator=verif_wrapper.pus_verificator,
default_pus_apid=EXAMPLE_PUS_APID,
)
def send_cb(self, send_params: SendCbParams):
@ -212,61 +195,55 @@ class TcHandler(TcHandlerBase):
)
self.verif_wrapper.add_tc(pus_tc_wrapper.pus_tc)
raw_tc = pus_tc_wrapper.pus_tc.pack()
LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}")
_LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}")
send_params.com_if.send(raw_tc)
elif entry_helper.entry_type == TcQueueEntryType.LOG:
log_entry = entry_helper.to_log_entry()
LOGGER.info(log_entry.log_str)
_LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, helper: ProcedureWrapper):
if helper.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure()
LOGGER.info(
f"Queue handling finished for service {def_proc.service} and "
f"op code {def_proc.op_code}"
)
def queue_finished_cb(self, info: ProcedureWrapper):
if info.proc_type == TcProcedureType.DEFAULT:
def_proc = info.to_def_procedure()
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
def feed_cb(self, helper: ProcedureWrapper, wrapper: FeedWrapper):
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper
if helper.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure()
service = def_proc.service
op_code = def_proc.op_code
if (
service == CoreServiceList.SERVICE_17
or service == CoreServiceList.SERVICE_17_ALT
):
if info.proc_type == TcProcedureType.DEFAULT:
def_proc = info.to_def_procedure()
cmd_path = def_proc.cmd_path
if cmd_path == "/ping":
q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(PusTelecommand(service=17, subservice=1))
q.add_pus_tc(PusTelecommand(service=17, subservice=1))
def main():
add_colorlog_console_logger(_LOGGER)
tmtccmd.init_printout(False)
hook_obj = SatRsConfigHook(json_cfg_path=default_json_path())
parser_wrapper = PreArgsParsingWrapper()
parser_wrapper.create_default_parent_parser()
parser_wrapper.create_default_parser()
parser_wrapper.add_def_proc_args()
post_args_wrapper = parser_wrapper.parse(hook_obj)
params = SetupParams()
post_args_wrapper = parser_wrapper.parse(hook_obj, params)
proc_wrapper = ProcedureParamsWrapper()
if post_args_wrapper.use_gui:
post_args_wrapper.set_params_without_prompts(params, proc_wrapper)
post_args_wrapper.set_params_without_prompts(proc_wrapper)
else:
post_args_wrapper.set_params_with_prompts(params, proc_wrapper)
post_args_wrapper.set_params_with_prompts(proc_wrapper)
params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
)
# Create console logger helper and file loggers
tmtc_logger = RegularTmtcLogWrapper()
printer = FsfwTmTcPrinter(tmtc_logger.logger)
file_logger = tmtc_logger.logger
raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1)
verificator = PusVerificator()
verification_wrapper = VerificationWrapper(verificator, LOGGER, printer.file_logger)
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(verification_wrapper, printer, raw_logger)
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None)
ccsds_handler.add_apid_handler(tm_handler)
@ -288,7 +265,7 @@ def main():
if state.request == BackendRequest.TERMINATION_NO_ERROR:
sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE:
LOGGER.info("TMTC Client in IDLE mode")
_LOGGER.info("TMTC Client in IDLE mode")
time.sleep(3.0)
elif state.request == BackendRequest.DELAY_LISTENER:
time.sleep(0.8)

View File

@ -1,2 +1,2 @@
tmtccmd == 4.0.0a0
tmtccmd == 8.0.0rc.0
# -e git+https://github.com/robamu-org/tmtccmd.git@main#egg=tmtccmd

View File

@ -1,17 +1,15 @@
#![no_std]
#![no_main]
use satrs_example_stm32f3_disco as _;
extern crate panic_itm;
use cortex_m_rt::entry;
use stm32f3_discovery::leds::Leds;
use stm32f3_discovery::stm32f3xx_hal::delay::Delay;
use stm32f3_discovery::stm32f3xx_hal::{pac, prelude::*};
use stm32f3_discovery::leds::Leds;
use stm32f3_discovery::switch_hal::{OutputSwitch, ToggleableOutputSwitch};
#[entry]
fn main()-> ! {
#[cortex_m_rt::entry]
fn main() -> ! {
defmt::println!("STM32F3 Discovery Blinky");
let dp = pac::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
let cp = cortex_m::Peripherals::take().unwrap();
@ -30,49 +28,49 @@ fn main()-> ! {
gpioe.pe14,
gpioe.pe15,
&mut gpioe.moder,
&mut gpioe.otyper
&mut gpioe.otyper,
);
let delay_ms = 200u16;
loop {
leds.ld3.toggle().ok();
leds.ld3_n.toggle().ok();
delay.delay_ms(delay_ms);
leds.ld3.toggle().ok();
leds.ld3_n.toggle().ok();
delay.delay_ms(delay_ms);
//explicit on/off
leds.ld4.on().ok();
leds.ld4_nw.on().ok();
delay.delay_ms(delay_ms);
leds.ld4.off().ok();
leds.ld4_nw.off().ok();
delay.delay_ms(delay_ms);
leds.ld5.on().ok();
leds.ld5_ne.on().ok();
delay.delay_ms(delay_ms);
leds.ld5.off().ok();
leds.ld5_ne.off().ok();
delay.delay_ms(delay_ms);
leds.ld6.on().ok();
leds.ld6_w.on().ok();
delay.delay_ms(delay_ms);
leds.ld6.off().ok();
delay.delay_ms(delay_ms);
leds.ld7.on().ok();
delay.delay_ms(delay_ms);
leds.ld7.off().ok();
delay.delay_ms(delay_ms);
leds.ld8.on().ok();
delay.delay_ms(delay_ms);
leds.ld8.off().ok();
delay.delay_ms(delay_ms);
leds.ld9.on().ok();
delay.delay_ms(delay_ms);
leds.ld9.off().ok();
leds.ld6_w.off().ok();
delay.delay_ms(delay_ms);
leds.ld10.on().ok();
leds.ld7_e.on().ok();
delay.delay_ms(delay_ms);
leds.ld10.off().ok();
leds.ld7_e.off().ok();
delay.delay_ms(delay_ms);
leds.ld8_sw.on().ok();
delay.delay_ms(delay_ms);
leds.ld8_sw.off().ok();
delay.delay_ms(delay_ms);
leds.ld9_se.on().ok();
delay.delay_ms(delay_ms);
leds.ld9_se.off().ok();
delay.delay_ms(delay_ms);
leds.ld10_s.on().ok();
delay.delay_ms(delay_ms);
leds.ld10_s.off().ok();
delay.delay_ms(delay_ms);
}
}

View File

@ -0,0 +1,51 @@
#![no_main]
#![no_std]
use cortex_m_semihosting::debug;
use defmt_brtt as _; // global logger
use stm32f3xx_hal as _; // memory layout
use panic_probe as _;
// same panicking *behavior* as `panic-probe` but doesn't print a panic message
// this prevents the panic message being printed *twice* when `defmt::panic` is invoked
#[defmt::panic_handler]
fn panic() -> ! {
cortex_m::asm::udf()
}
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with status code 0.
pub fn exit() -> ! {
loop {
debug::exit(debug::EXIT_SUCCESS);
}
}
/// Hardfault handler.
///
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with an error. This seems better than the default, which is to spin in a
/// loop.
#[cortex_m_rt::exception]
unsafe fn HardFault(_frame: &cortex_m_rt::ExceptionFrame) -> ! {
loop {
debug::exit(debug::EXIT_FAILURE);
}
}
// defmt-test 0.3.0 has the limitation that this `#[tests]` attribute can only be used
// once within a crate. the module can be in any file but there can only be at most
// one `#[tests]` module in this library crate
#[cfg(test)]
#[defmt_test::tests]
mod unit_tests {
use defmt::assert;
#[test]
fn it_works() {
assert!(true)
}
}

View File

@ -1,38 +1,36 @@
#![no_std]
#![no_main]
extern crate panic_itm;
// global logger + panicking-behavior + memory layout
use satrs_example_stm32f3_disco as _;
use rtic::app;
use heapless::{
mpmc::Q16,
pool,
pool::singleton::{Box, Pool},
};
use heapless::{mpmc::Q8, Vec};
#[allow(unused_imports)]
use itm_logger::{debug, info, logger_init, warn};
use satrs_core::spacepackets::{ecss::PusPacket, tm::PusTm};
use satrs_core::{
pus::{EcssTmErrorWithSend, EcssTmSenderCore},
seq_count::SequenceCountProviderCore,
use rtic_monotonics::systick::fugit::TimerInstantU32;
use rtic_monotonics::systick::ExtU32;
use satrs::seq_count::SequenceCountProviderCore;
use satrs::{
pool::StoreError,
pus::{EcssChannel, EcssTmSenderCore, EcssTmtcError, PusTmWrapper},
spacepackets::{ecss::PusPacket, ecss::WritablePusPacket},
};
use stm32f3xx_hal::dma::dma1;
use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3};
use stm32f3xx_hal::pac::USART2;
use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent};
use systick_monotonic::{fugit::Duration, Systick};
const UART_BAUD: u32 = 115200;
const BLINK_FREQ_MS: u64 = 1000;
const TX_HANDLER_FREQ_MS: u64 = 20;
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u16 = 5;
const MAX_TC_LEN: usize = 200;
const MAX_TM_LEN: usize = 200;
const BLINK_FREQ_MS: u32 = 1000;
const TX_HANDLER_FREQ_MS: u32 = 20;
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u32 = 5;
const MAX_TC_LEN: usize = 128;
const MAX_TM_LEN: usize = 128;
pub const PUS_APID: u16 = 0x02;
type TxType = Tx<USART2, PA2<AF7<PushPull>>>;
type RxType = Rx<USART2, PA3<AF7<PushPull>>>;
type MsDuration = Duration<u64, 1, 1000>;
type InstantFugit = TimerInstantU32<1000>;
type TxDmaTransferType = SerialDmaTx<&'static [u8], dma1::C7, TxType>;
type RxDmaTransferType = SerialDmaRx<&'static mut [u8], dma1::C6, RxType>;
@ -51,10 +49,12 @@ static mut DMA_TX_BUF: [u8; TM_BUF_LEN] = [0; TM_BUF_LEN];
// transfer buffer.
static mut DMA_RX_BUF: [u8; TC_BUF_LEN] = [0; TC_BUF_LEN];
static TX_REQUESTS: Q16<(Box<poolmod::TM>, usize)> = Q16::new();
type TmPacket = Vec<u8, MAX_TM_LEN>;
type TcPacket = Vec<u8, MAX_TC_LEN>;
const TC_POOL_SLOTS: usize = 12;
const TM_POOL_SLOTS: usize = 12;
static TM_REQUESTS: Q8<TmPacket> = Q8::new();
use core::cell::RefCell;
use core::sync::atomic::{AtomicU16, Ordering};
pub struct SeqCountProviderAtomicRef {
@ -88,70 +88,59 @@ impl SequenceCountProviderCore<u16> for SeqCountProviderAtomicRef {
static SEQ_COUNT_PROVIDER: SeqCountProviderAtomicRef =
SeqCountProviderAtomicRef::new(Ordering::Relaxed);
// Otherwise, warnings because of heapless pool macro.
#[allow(non_camel_case_types)]
mod poolmod {
use super::*;
// Must hold full TC length including COBS overhead.
pool!(TC: [u8; TC_BUF_LEN]);
// Only encoded at the end, so no need to account for COBS overhead.
pool!(TM: [u8; MAX_TM_LEN]);
}
pub struct TxIdle {
tx: TxType,
dma_channel: dma1::C7,
}
#[derive(Debug)]
pub enum TmStoreError {
StoreFull,
StoreSlotsTooSmall,
}
impl From<TmStoreError> for EcssTmErrorWithSend<TmStoreError> {
fn from(value: TmStoreError) -> Self {
Self::SendError(value)
}
}
pub struct TmSender {
mem_block: Option<Box<poolmod::TM>>,
ctx: &'static str,
vec: Option<RefCell<Vec<u8, MAX_TM_LEN>>>,
}
impl TmSender {
pub fn new(mem_block: Box<poolmod::TM>, ctx: &'static str) -> Self {
pub fn new(tm_packet: TmPacket) -> Self {
Self {
mem_block: Some(mem_block),
ctx,
vec: Some(RefCell::new(tm_packet)),
}
}
}
impl EcssChannel for TmSender {
fn id(&self) -> satrs::ChannelId {
0
}
}
impl EcssTmSenderCore for TmSender {
type Error = TmStoreError;
fn send_tm(
&mut self,
tm: PusTm,
) -> Result<(), satrs_core::pus::EcssTmErrorWithSend<Self::Error>> {
let mem_block = self.mem_block.take();
if mem_block.is_none() {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
let vec = self.vec.as_ref();
if vec.is_none() {
panic!("send_tm should only be called once");
}
let mut mem_block = mem_block.unwrap();
if tm.len_packed() > MAX_TM_LEN {
return Err(EcssTmErrorWithSend::SendError(
TmStoreError::StoreSlotsTooSmall,
));
let vec_ref = vec.unwrap();
let mut vec = vec_ref.borrow_mut();
match tm {
PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
PusTmWrapper::Direct(tm) => {
if tm.len_written() > MAX_TM_LEN {
return Err(EcssTmtcError::Store(StoreError::DataTooLarge(
tm.len_written(),
)));
}
vec.resize(tm.len_written(), 0).expect("vec resize failed");
tm.write_to_bytes(vec.as_mut_slice())?;
defmt::info!(
"Sending TM[{},{}] with size {}",
tm.service(),
tm.subservice(),
tm.len_written()
);
drop(vec);
TM_REQUESTS
.enqueue(vec_ref.take())
.map_err(|_| EcssTmtcError::Store(StoreError::StoreFull(0)))?;
}
}
tm.write_to_bytes(mem_block.as_mut_slice())
.map_err(|e| EcssTmErrorWithSend::EcssTmError(e.into()))?;
info!(target: self.ctx, "Sending TM[{},{}] with size {}", tm.service(), tm.subservice(), tm.len_packed());
TX_REQUESTS
.enqueue((mem_block, tm.len_packed()))
.map_err(|_| TmStoreError::StoreFull)?;
Ok(())
}
}
@ -163,33 +152,36 @@ pub enum UartTxState {
Transmitting(Option<TxDmaTransferType>),
}
#[app(device = stm32f3xx_hal::pac, peripherals = true, dispatchers = [TIM20_BRK, TIM20_UP, TIM20_TRG_COM])]
pub struct UartTxShared {
last_completed: Option<InstantFugit>,
state: UartTxState,
}
#[app(device = stm32f3xx_hal::pac, peripherals = true)]
mod app {
use super::*;
use core::slice::Iter;
use cortex_m::iprintln;
use satrs_core::pus::verification::FailParams;
use satrs_core::pus::verification::VerificationReporterCore;
use satrs_core::spacepackets::{
ecss::EcssEnumU16,
tc::PusTc,
time::cds::P_FIELD_BASE,
tm::{PusTm, PusTmSecondaryHeader},
CcsdsPacket, SpHeader,
use rtic_monotonics::systick::Systick;
use rtic_monotonics::Monotonic;
use satrs::pus::verification::FailParams;
use satrs::pus::verification::VerificationReporterCore;
use satrs::spacepackets::{
ecss::tc::PusTcReader, ecss::tm::PusTmCreator, ecss::tm::PusTmSecondaryHeader,
ecss::EcssEnumU16, time::cds::P_FIELD_BASE, CcsdsPacket, SpHeader,
};
#[allow(unused_imports)]
use stm32f3_discovery::leds::Direction;
use stm32f3_discovery::leds::Leds;
use stm32f3xx_hal::prelude::*;
use stm32f3xx_hal::Toggle;
use stm32f3_discovery::switch_hal::OutputSwitch;
use stm32f3xx_hal::Switch;
#[allow(dead_code)]
type SerialType = Serial<USART2, (PA2<AF7<PushPull>>, PA3<AF7<PushPull>>)>;
#[shared]
struct Shared {
tx_transfer: UartTxState,
tx_shared: UartTxShared,
rx_transfer: Option<RxDmaTransferType>,
}
@ -201,18 +193,14 @@ mod app {
curr_dir: Iter<'static, Direction>,
}
#[monotonic(binds = SysTick, default = true)]
type MonoTimer = Systick<1000>;
#[init(local = [
tc_pool_mem: [u8; TC_BUF_LEN * TC_POOL_SLOTS] = [0; TC_BUF_LEN * TC_POOL_SLOTS],
tm_pool_mem: [u8; MAX_TM_LEN * TM_POOL_SLOTS] = [0; MAX_TM_LEN * TM_POOL_SLOTS]
])]
fn init(mut cx: init::Context) -> (Shared, Local, init::Monotonics) {
#[init]
fn init(cx: init::Context) -> (Shared, Local) {
let mut rcc = cx.device.RCC.constrain();
let mono = Systick::new(cx.core.SYST, 8_000_000);
logger_init();
// Initialize the systick interrupt & obtain the token to prove that we did
let systick_mono_token = rtic_monotonics::create_systick_token!();
Systick::start(cx.core.SYST, 8_000_000, systick_mono_token);
let mut flash = cx.device.FLASH.constrain();
let clocks = rcc
.cfgr
@ -220,15 +208,12 @@ mod app {
.sysclk(8.MHz())
.pclk1(8.MHz())
.freeze(&mut flash.acr);
// setup ITM output
iprintln!(
&mut cx.core.ITM.stim[0],
"Starting sat-rs demo application for the STM32F3-Discovery"
);
// Set up monotonic timer.
//let mono_timer = MonoTimer::new(cx.core.DWT, clocks, &mut cx.core.DCB);
defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery");
let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb);
// Assign memory to the pools.
poolmod::TC::grow(cx.local.tc_pool_mem);
poolmod::TM::grow(cx.local.tm_pool_mem);
let verif_reporter = VerificationReporterCore::new(PUS_APID).unwrap();
@ -264,106 +249,138 @@ mod app {
clocks,
&mut rcc.apb1,
);
usart2.configure_rx_interrupt(RxEvent::Idle, Toggle::On);
usart2.configure_rx_interrupt(RxEvent::Idle, Switch::On);
// This interrupt is enabled to re-schedule new transfers in the interrupt handler immediately.
usart2.configure_tx_interrupt(TxEvent::TransmissionComplete, Toggle::On);
usart2.configure_tx_interrupt(TxEvent::TransmissionComplete, Switch::On);
let dma1 = cx.device.DMA1.split(&mut rcc.ahb);
let (tx_serial, mut rx_serial) = usart2.split();
let (mut tx_serial, mut rx_serial) = usart2.split();
// This interrupt is immediately triggered, clear it. It will only be reset
// by the hardware when data is received on RX (RXNE event)
rx_serial.clear_event(RxEvent::Idle);
// For some reason, this is also immediately triggered..
tx_serial.clear_event(TxEvent::TransmissionComplete);
let rx_transfer = rx_serial.read_exact(unsafe { DMA_RX_BUF.as_mut_slice() }, dma1.ch6);
info!(target: "init", "Spawning tasks");
defmt::info!("Spawning tasks");
blink::spawn().unwrap();
serial_tx_handler::spawn().unwrap();
(
Shared {
tx_transfer: UartTxState::Idle(Some(TxIdle {
tx: tx_serial,
dma_channel: dma1.ch7,
})),
tx_shared: UartTxShared {
last_completed: None,
state: UartTxState::Idle(Some(TxIdle {
tx: tx_serial,
dma_channel: dma1.ch7,
})),
},
rx_transfer: Some(rx_transfer),
},
Local {
//timer: mono_timer,
leds,
last_dir: Direction::North,
curr_dir: Direction::iter(),
verif_reporter,
},
init::Monotonics(mono),
)
}
#[task(local = [leds, curr_dir, last_dir])]
fn blink(cx: blink::Context) {
let toggle_leds = |dir: &Direction| {
let leds = cx.local.leds;
let last_led = leds.for_direction(*cx.local.last_dir);
async fn blink(cx: blink::Context) {
let blink::LocalResources {
leds,
curr_dir,
last_dir,
..
} = cx.local;
let mut toggle_leds = |dir: &Direction| {
let last_led = leds.for_direction(*last_dir);
last_led.off().ok();
let led = leds.for_direction(*dir);
led.on().ok();
*cx.local.last_dir = *dir;
*last_dir = *dir;
};
match cx.local.curr_dir.next() {
Some(dir) => {
toggle_leds(dir);
}
None => {
*cx.local.curr_dir = Direction::iter();
toggle_leds(cx.local.curr_dir.next().unwrap());
loop {
match curr_dir.next() {
Some(dir) => {
toggle_leds(dir);
}
None => {
*curr_dir = Direction::iter();
toggle_leds(curr_dir.next().unwrap());
}
}
Systick::delay(BLINK_FREQ_MS.millis()).await;
}
blink::spawn_after(MsDuration::from_ticks(BLINK_FREQ_MS)).unwrap();
}
#[task(
shared = [tx_transfer],
local = []
shared = [tx_shared],
)]
fn serial_tx_handler(mut cx: serial_tx_handler::Context) {
if let Some((buf, len)) = TX_REQUESTS.dequeue() {
cx.shared.tx_transfer.lock(|tx_state| match tx_state {
UartTxState::Idle(tx) => {
//debug!(target: "serial_tx_handler", "bytes: {:x?}", &buf[0..len]);
// Safety: We only copy the data into the TX DMA buffer in this task.
// If the DMA is active, another branch will be taken.
let mut_tx_dma_buf = unsafe { &mut DMA_TX_BUF };
// 0 sentinel value as start marker
mut_tx_dma_buf[0] = 0;
// Should never panic, we accounted for the overhead.
// Write into transfer buffer directly, no need for intermediate
// encoding buffer.
let encoded_len = cobs::encode(&buf[0..len], &mut mut_tx_dma_buf[1..]);
// 0 end marker
mut_tx_dma_buf[encoded_len + 1] = 0;
//debug!(target: "serial_tx_handler", "Sending {} bytes", encoded_len + 2);
//debug!("sent: {:x?}", &mut_tx_dma_buf[0..encoded_len + 2]);
let tx_idle = tx.take().unwrap();
// Transfer completion and re-scheduling of new TX transfers will be done
// by the IRQ handler.
let transfer = tx_idle
.tx
.write_all(&mut_tx_dma_buf[0..encoded_len + 2], tx_idle.dma_channel);
*tx_state = UartTxState::Transmitting(Some(transfer));
// The memory block is automatically returned to the pool when it is dropped.
}
UartTxState::Transmitting(_) => {
// This is a SW configuration error. Only the ISR which
// detects transfer completion should be able to spawn a new
// task, and that ISR should set the state to IDLE.
panic!("invalid internal tx state detected")
}
})
} else {
cx.shared.tx_transfer.lock(|tx_state| {
if let UartTxState::Idle(_) = tx_state {
serial_tx_handler::spawn_after(MsDuration::from_ticks(TX_HANDLER_FREQ_MS))
.unwrap();
async fn serial_tx_handler(mut cx: serial_tx_handler::Context) {
loop {
let is_idle = cx.shared.tx_shared.lock(|tx_shared| {
if let UartTxState::Idle(_) = tx_shared.state {
return true;
}
false
});
if is_idle {
let last_completed = cx.shared.tx_shared.lock(|shared| shared.last_completed);
if let Some(last_completed) = last_completed {
let elapsed_ms = (Systick::now() - last_completed).to_millis();
if elapsed_ms < MIN_DELAY_BETWEEN_TX_PACKETS_MS {
Systick::delay((MIN_DELAY_BETWEEN_TX_PACKETS_MS - elapsed_ms).millis())
.await;
}
}
} else {
// Check for completion after 1 ms
Systick::delay(1.millis()).await;
continue;
}
if let Some(vec) = TM_REQUESTS.dequeue() {
cx.shared
.tx_shared
.lock(|tx_shared| match &mut tx_shared.state {
UartTxState::Idle(tx) => {
let encoded_len;
//debug!(target: "serial_tx_handler", "bytes: {:x?}", &buf[0..len]);
// Safety: We only copy the data into the TX DMA buffer in this task.
// If the DMA is active, another branch will be taken.
unsafe {
// 0 sentinel value as start marker
DMA_TX_BUF[0] = 0;
encoded_len =
cobs::encode(&vec[0..vec.len()], &mut DMA_TX_BUF[1..]);
// Should never panic, we accounted for the overhead.
// Write into transfer buffer directly, no need for intermediate
// encoding buffer.
// 0 end marker
DMA_TX_BUF[encoded_len + 1] = 0;
}
//debug!(target: "serial_tx_handler", "Sending {} bytes", encoded_len + 2);
//debug!("sent: {:x?}", &mut_tx_dma_buf[0..encoded_len + 2]);
let tx_idle = tx.take().unwrap();
// Transfer completion and re-scheduling of new TX transfers will be done
// by the IRQ handler.
// SAFETY: The DMA is the exclusive writer to the DMA buffer now.
let transfer = tx_idle.tx.write_all(
unsafe { &DMA_TX_BUF[0..encoded_len + 2] },
tx_idle.dma_channel,
);
tx_shared.state = UartTxState::Transmitting(Some(transfer));
// The memory block is automatically returned to the pool when it is dropped.
}
UartTxState::Transmitting(_) => (),
});
// Check for completion after 1 ms
Systick::delay(1.millis()).await;
continue;
}
// Nothing to do, and we are idle.
Systick::delay(TX_HANDLER_FREQ_MS.millis()).await;
}
}
@ -375,14 +392,13 @@ mod app {
verif_reporter
],
)]
fn serial_rx_handler(
async fn serial_rx_handler(
cx: serial_rx_handler::Context,
received_packet: Box<poolmod::TC>,
rx_len: usize,
received_packet: Vec<u8, MAX_TC_LEN>,
) {
let tgt: &'static str = "serial_rx_handler";
defmt::info!("running rx handler");
cx.local.stamp_buf[0] = P_FIELD_BASE;
info!(target: tgt, "Received packet with {} bytes", rx_len);
defmt::info!("Received packet with {} bytes", received_packet.len());
let decode_buf = cx.local.decode_buf;
let packet = received_packet.as_slice();
let mut start_idx = None;
@ -393,17 +409,14 @@ mod app {
}
}
if start_idx.is_none() {
warn!(
target: tgt,
"decoding error, can only process cobs encoded frames, data is all 0"
);
defmt::warn!("decoding error, can only process cobs encoded frames, data is all 0");
return;
}
let start_idx = start_idx.unwrap();
match cobs::decode(&received_packet.as_slice()[start_idx..], decode_buf) {
Ok(len) => {
info!(target: tgt, "Decoded packet length: {}", len);
let pus_tc = PusTc::from_bytes(decode_buf);
defmt::info!("Decoded packet length: {}", len);
let pus_tc = PusTcReader::new(decode_buf);
let verif_reporter = cx.local.verif_reporter;
match pus_tc {
Ok((tc, tc_len)) => handle_tc(
@ -412,32 +425,27 @@ mod app {
verif_reporter,
cx.local.src_data_buf,
cx.local.stamp_buf,
tgt,
),
Err(e) => {
warn!(target: tgt, "Error unpacking PUS TC: {}", e);
Err(_e) => {
// TODO: Print error after API rework.
defmt::warn!("Error unpacking PUS TC");
}
}
}
Err(_) => {
warn!(
target: tgt,
"decoding error, can only process cobs encoded frames"
)
defmt::warn!("decoding error, can only process cobs encoded frames")
}
}
}
fn handle_tc(
tc: PusTc,
tc: PusTcReader,
tc_len: usize,
verif_reporter: &mut VerificationReporterCore,
src_data_buf: &mut [u8; MAX_TM_LEN],
stamp_buf: &[u8; 7],
tgt: &'static str,
) {
info!(
target: tgt,
defmt::info!(
"Found PUS TC [{},{}] with length {}",
tc.service(),
tc.subservice(),
@ -446,38 +454,32 @@ mod app {
let token = verif_reporter.add_tc(&tc);
if tc.apid() != PUS_APID {
warn!(target: tgt, "Received tc with unknown APID {}", tc.apid());
defmt::warn!("Received tc with unknown APID {}", tc.apid());
let sendable = verif_reporter
.acceptance_failure(
src_data_buf,
token,
&SEQ_COUNT_PROVIDER,
FailParams::new(stamp_buf, &EcssEnumU16::new(0), None),
SEQ_COUNT_PROVIDER.get(),
0,
FailParams::new(stamp_buf, &EcssEnumU16::new(0), &[]),
)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
if let Err(e) =
verif_reporter.send_acceptance_failure(sendable, &SEQ_COUNT_PROVIDER, &mut sender)
{
warn!(target: tgt, "Sending acceptance failure failed: {:?}", e.0);
let sender = TmSender::new(TmPacket::new());
if let Err(_e) = verif_reporter.send_acceptance_failure(sendable, &sender) {
defmt::warn!("Sending acceptance failure failed");
};
return;
}
let sendable = verif_reporter
.acceptance_success(src_data_buf, token, &SEQ_COUNT_PROVIDER, stamp_buf)
.acceptance_success(src_data_buf, token, SEQ_COUNT_PROVIDER.get(), 0, stamp_buf)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
let accepted_token = match verif_reporter.send_acceptance_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
let sender = TmSender::new(TmPacket::new());
let accepted_token = match verif_reporter.send_acceptance_success(sendable, &sender) {
Ok(token) => token,
Err(e) => {
warn!(target: "serial_rx_handler", "Sending acceptance success failed: {:?}", e.0);
Err(_e) => {
// TODO: Print error as soon as EcssTmtcError has Format attr.. or rework API.
defmt::warn!("Sending acceptance success failed");
return;
}
};
@ -485,47 +487,51 @@ mod app {
if tc.service() == 17 {
if tc.subservice() == 1 {
let sendable = verif_reporter
.start_success(src_data_buf, accepted_token, &SEQ_COUNT_PROVIDER, stamp_buf)
.start_success(
src_data_buf,
accepted_token,
SEQ_COUNT_PROVIDER.get(),
0,
stamp_buf,
)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
let started_token = match verif_reporter.send_start_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
// let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let sender = TmSender::new(TmPacket::new());
let started_token = match verif_reporter.send_start_success(sendable, &sender) {
Ok(token) => token,
Err(e) => {
warn!(target: tgt, "Sending acceptance success failed: {:?}", e.0);
Err(_e) => {
// TODO: Print error as soon as EcssTmtcError has Format attr.. or rework API.
defmt::warn!("Sending acceptance success failed");
return;
}
};
info!(
target: tgt,
"Received PUS ping telecommand, sending ping reply TM[17,2]"
);
defmt::info!("Received PUS ping telecommand, sending ping reply TM[17,2]");
let mut sp_header =
SpHeader::tc_unseg(PUS_APID, SEQ_COUNT_PROVIDER.get(), 0).unwrap();
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, stamp_buf);
let ping_reply = PusTm::new(&mut sp_header, sec_header, None, true);
let mut mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let reply_len = ping_reply.write_to_bytes(mem_block.as_mut_slice()).unwrap();
if TX_REQUESTS.enqueue((mem_block, reply_len)).is_err() {
warn!(target: tgt, "TC queue full");
let ping_reply = PusTmCreator::new(&mut sp_header, sec_header, &[], true);
let mut tm_packet = TmPacket::new();
tm_packet
.resize(ping_reply.len_written(), 0)
.expect("vec resize failed");
ping_reply.write_to_bytes(&mut tm_packet).unwrap();
if TM_REQUESTS.enqueue(tm_packet).is_err() {
defmt::warn!("TC queue full");
return;
}
SEQ_COUNT_PROVIDER.increment();
let sendable = verif_reporter
.completion_success(src_data_buf, started_token, &SEQ_COUNT_PROVIDER, stamp_buf)
.completion_success(
src_data_buf,
started_token,
SEQ_COUNT_PROVIDER.get(),
0,
stamp_buf,
)
.unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt);
if let Err(e) = verif_reporter.send_step_or_completion_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
warn!(target: tgt, "Sending completion success failed: {:?}", e.0);
let sender = TmSender::new(TmPacket::new());
if let Err(_e) = verif_reporter.send_step_or_completion_success(sendable, &sender) {
defmt::warn!("Sending completion success failed");
}
} else {
// TODO: Invalid subservice
@ -535,26 +541,26 @@ mod app {
#[task(binds = DMA1_CH6, shared = [rx_transfer])]
fn rx_dma_isr(mut cx: rx_dma_isr::Context) {
let mut tc_packet = TcPacket::new();
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_ref = rx_transfer.as_ref().unwrap();
if rx_ref.is_complete() {
let uart_rx_owned = rx_transfer.take().unwrap();
let (buf, c, rx) = uart_rx_owned.stop();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a memory pool here
// during the interrupt. There are multiple ways to do this, we use a stack allocaed vector here
// to do this.
let mut mem_block = poolmod::TC::alloc()
.expect("allocating memory block for rx failed")
.init([0u8; TC_BUF_LEN]);
// Copy data into memory pool.
mem_block.copy_from_slice(buf);
tc_packet.resize(buf.len(), 0).expect("vec resize failed");
tc_packet.copy_from_slice(buf);
// Start the next transfer as soon as possible.
*rx_transfer = Some(rx.read_exact(buf, c));
// Only send owning pointer to pool memory and the received packet length.
serial_rx_handler::spawn(mem_block, TC_BUF_LEN)
.expect("spawning rx handler task failed");
// Send the vector to a regular task.
serial_rx_handler::spawn(tc_packet).expect("spawning rx handler task failed");
// If this happens, there is a high chance that the maximum packet length was
// exceeded. Circular mode is not used here, so data might be missed.
warn!(
defmt::warn!(
"rx transfer with maximum length {}, might miss data",
TC_BUF_LEN
);
@ -562,23 +568,26 @@ mod app {
});
}
#[task(binds = USART2_EXTI26, shared = [rx_transfer, tx_transfer])]
#[task(binds = USART2_EXTI26, shared = [rx_transfer, tx_shared])]
fn serial_isr(mut cx: serial_isr::Context) {
cx.shared.tx_transfer.lock(|tx_state| match tx_state {
UartTxState::Idle(_) => (),
UartTxState::Transmitting(transfer) => {
let transfer_ref = transfer.as_ref().unwrap();
if transfer_ref.is_complete() {
let transfer = transfer.take().unwrap();
let (_, dma_channel, tx) = transfer.stop();
*tx_state = UartTxState::Idle(Some(TxIdle { tx, dma_channel }));
serial_tx_handler::spawn_after(MsDuration::from_ticks(
MIN_DELAY_BETWEEN_TX_PACKETS_MS.into(),
))
.unwrap();
cx.shared
.tx_shared
.lock(|tx_shared| match &mut tx_shared.state {
UartTxState::Idle(_) => (),
UartTxState::Transmitting(transfer) => {
let transfer_ref = transfer.as_ref().unwrap();
if transfer_ref.is_complete() {
let transfer = transfer.take().unwrap();
let (_, dma_channel, mut tx) = transfer.stop();
tx.clear_event(TxEvent::TransmissionComplete);
tx_shared.state = UartTxState::Idle(Some(TxIdle { tx, dma_channel }));
// We cache the last completed time to ensure that there is a minimum delay between consecutive
// transferred packets.
tx_shared.last_completed = Some(Systick::now());
}
}
}
});
});
let mut tc_packet = TcPacket::new();
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_transfer_ref = rx_transfer.as_ref().unwrap();
// Received a partial packet.
@ -586,17 +595,14 @@ mod app {
let rx_transfer_owned = rx_transfer.take().unwrap();
let (buf, ch, mut rx, rx_len) = rx_transfer_owned.stop_and_return_received_bytes();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a memory pool here
// to do this.
let mut mem_block = poolmod::TC::alloc()
.expect("allocating memory block for rx failed")
.init([0u8; TC_BUF_LEN]);
// Copy data into memory pool.
mem_block[0..rx_len as usize].copy_from_slice(&buf[0..rx_len as usize]);
// during the interrupt. There are multiple ways to do this, we use a stack
// allocated vector to do this.
tc_packet
.resize(rx_len as usize, 0)
.expect("vec resize failed");
tc_packet[0..rx_len as usize].copy_from_slice(&buf[0..rx_len as usize]);
rx.clear_event(RxEvent::Idle);
// Only send owning pointer to pool memory and the received packet length.
serial_rx_handler::spawn(mem_block, rx_len as usize)
.expect("spawning rx handler task failed");
serial_rx_handler::spawn(tc_packet).expect("spawning rx handler failed");
*rx_transfer = Some(rx.read_exact(buf, ch));
}
});

View File

@ -5,8 +5,8 @@
// List of extensions which should be recommended for users of this workspace.
"recommendations": [
"rust-lang.rust",
"marus25.cortex-debug",
"probe-rs.probe-rs-debugger"
],
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
"unwantedRecommendations": []
}
}

View File

@ -0,0 +1,22 @@
{
"version": "0.2.0",
"configurations": [
{
"preLaunchTask": "${defaultBuildTask}",
"type": "probe-rs-debug",
"request": "launch",
"name": "probe-rs Debugging ",
"flashingConfig": {
"flashingEnabled": true
},
"chip": "STM32F303VCTx",
"coreConfigs": [
{
"programBinary": "${workspaceFolder}/target/thumbv7em-none-eabihf/debug/satrs-example-stm32f3-disco",
"rttEnabled": true,
"svdFile": "STM32F303.svd"
}
]
}
]
}

View File

@ -11,7 +11,8 @@ proc CDSWOConfigure { CDCPUFreqHz CDSWOFreqHz CDSWOOutput } {
# Alternative option: Pipe ITM output into itm.txt file
# tpiu config internal itm.txt uart off $CDCPUFreqHz
# Default option so SWO display of VS code works.
# Default option so SWO display of VS code works. Please note that this might not be required
# anymore starting at openocd v0.12.0
tpiu config internal $CDSWOOutput uart off $CDCPUFreqHz $CDSWOFreqHz
itm port 0 on
}

View File

@ -1,6 +1,6 @@
[package]
name = "satrs-example"
version = "0.1.0"
version = "0.1.1"
edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
default-run = "satrs-example"
@ -20,11 +20,11 @@ thiserror = "1"
derive-new = "0.5"
[dependencies.satrs]
# version = "0.1.1"
# version = "0.2.0-rc.0"
path = "../satrs"
[dependencies.satrs-mib]
# version = "0.1.0"
version = "0.1.1"
path = "../satrs-mib"
[features]

View File

@ -1,7 +1,7 @@
use std::sync::mpsc::{self, TryRecvError};
use log::{info, warn};
use satrs::pus::verification::{VerificationReporterWithSender, VerificationReportingProvider};
use satrs::pus::verification::VerificationReportingProvider;
use satrs::pus::{EcssTmSender, PusTmWrapper};
use satrs::request::TargetAndApidId;
use satrs::spacepackets::ecss::hk::Subservice as HkSubservice;
@ -9,7 +9,7 @@ use satrs::{
hk::HkRequest,
spacepackets::{
ecss::tm::{PusTmCreator, PusTmSecondaryHeader},
time::cds::{DaysLen16Bits, TimeProvider},
time::cds::{CdsTime, DaysLen16Bits},
SequenceFlags, SpHeader,
},
};
@ -21,23 +21,23 @@ use crate::{
update_time,
};
pub struct AcsTask {
pub struct AcsTask<VerificationReporter: VerificationReportingProvider> {
timestamp: [u8; 7],
time_provider: TimeProvider<DaysLen16Bits>,
verif_reporter: VerificationReporterWithSender,
time_provider: CdsTime<DaysLen16Bits>,
verif_reporter: VerificationReporter,
tm_sender: Box<dyn EcssTmSender>,
request_rx: mpsc::Receiver<RequestWithToken>,
}
impl AcsTask {
impl<VerificationReporter: VerificationReportingProvider> AcsTask<VerificationReporter> {
pub fn new(
tm_sender: impl EcssTmSender,
request_rx: mpsc::Receiver<RequestWithToken>,
verif_reporter: VerificationReporterWithSender,
verif_reporter: VerificationReporter,
) -> Self {
Self {
timestamp: [0; 7],
time_provider: TimeProvider::new_with_u16_days(0, 0),
time_provider: CdsTime::new_with_u16_days(0, 0),
verif_reporter,
tm_sender: Box::new(tm_sender),
request_rx,

View File

@ -1,68 +1,63 @@
use std::sync::mpsc::{self, SendError};
use std::sync::mpsc::{self};
use satrs::{
event_man::{
EventManager, EventManagerWithMpscQueue, MpscEventReceiver, MpscEventU32SendProvider,
SendEventProvider,
EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded,
MpscEventReceiver,
},
events::EventU32,
params::Params,
pus::{
event_man::{
DefaultPusMgmtBackendProvider, EventReporter, EventRequest, EventRequestWithToken,
PusEventDispatcher,
},
verification::{
TcStateStarted, VerificationReporterWithSender, VerificationReportingProvider,
VerificationToken,
DefaultPusEventU32Dispatcher, EventReporter, EventRequest, EventRequestWithToken,
},
verification::{TcStateStarted, VerificationReportingProvider, VerificationToken},
EcssTmSender,
},
spacepackets::time::cds::{self, TimeProvider},
spacepackets::time::cds::{self, CdsTime},
};
use satrs_example::config::PUS_APID;
use crate::update_time;
pub type MpscEventManager = EventManager<SendError<(EventU32, Option<Params>)>>;
pub struct PusEventHandler {
pub struct PusEventHandler<VerificationReporter: VerificationReportingProvider> {
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
pus_event_dispatcher: PusEventDispatcher<(), EventU32>,
pus_event_dispatcher: DefaultPusEventU32Dispatcher<()>,
pus_event_man_rx: mpsc::Receiver<(EventU32, Option<Params>)>,
tm_sender: Box<dyn EcssTmSender>,
time_provider: TimeProvider,
time_provider: CdsTime,
timestamp: [u8; 7],
verif_handler: VerificationReporterWithSender,
verif_handler: VerificationReporter,
}
/*
*/
impl PusEventHandler {
impl<VerificationReporter: VerificationReportingProvider> PusEventHandler<VerificationReporter> {
pub fn new(
verif_handler: VerificationReporterWithSender,
event_manager: &mut MpscEventManager,
verif_handler: VerificationReporter,
event_manager: &mut EventManagerWithBoundedMpsc,
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
tm_sender: impl EcssTmSender,
) -> Self {
let (pus_event_man_tx, pus_event_man_rx) = mpsc::channel();
let event_queue_cap = 30;
let (pus_event_man_tx, pus_event_man_rx) = mpsc::sync_channel(event_queue_cap);
// All events sent to the manager are routed to the PUS event manager, which generates PUS event
// telemetry for each event.
let event_reporter = EventReporter::new(PUS_APID, 128).unwrap();
let pus_tm_backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
let pus_event_dispatcher =
PusEventDispatcher::new(event_reporter, Box::new(pus_tm_backend));
let pus_event_man_send_provider = MpscEventU32SendProvider::new(1, pus_event_man_tx);
DefaultPusEventU32Dispatcher::new_with_default_backend(event_reporter);
let pus_event_man_send_provider =
EventU32SenderMpscBounded::new(1, pus_event_man_tx, event_queue_cap);
event_manager.subscribe_all(pus_event_man_send_provider.id());
event_manager.subscribe_all(pus_event_man_send_provider.channel_id());
event_manager.add_sender(pus_event_man_send_provider);
Self {
event_request_rx,
pus_event_dispatcher,
pus_event_man_rx,
time_provider: cds::TimeProvider::new_with_u16_days(0, 0),
time_provider: cds::CdsTime::new_with_u16_days(0, 0),
timestamp: [0; 7],
verif_handler,
tm_sender: Box::new(tm_sender),
@ -117,7 +112,7 @@ impl PusEventHandler {
}
pub struct EventManagerWrapper {
event_manager: MpscEventManager,
event_manager: EventManagerWithBoundedMpsc,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
}
@ -128,7 +123,7 @@ impl EventManagerWrapper {
let (event_sender, event_man_rx) = mpsc::channel();
let event_recv = MpscEventReceiver::<EventU32>::new(event_man_rx);
Self {
event_manager: EventManagerWithMpscQueue::new(Box::new(event_recv)),
event_manager: EventManagerWithBoundedMpsc::new(event_recv),
event_sender,
}
}
@ -137,7 +132,7 @@ impl EventManagerWrapper {
self.event_sender.clone()
}
pub fn event_manager(&mut self) -> &mut MpscEventManager {
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
&mut self.event_manager
}
@ -149,15 +144,15 @@ impl EventManagerWrapper {
}
}
pub struct EventHandler {
pub struct EventHandler<VerificationReporter: VerificationReportingProvider> {
pub event_man_wrapper: EventManagerWrapper,
pub pus_event_handler: PusEventHandler,
pub pus_event_handler: PusEventHandler<VerificationReporter>,
}
impl EventHandler {
impl<VerificationReporter: VerificationReportingProvider> EventHandler<VerificationReporter> {
pub fn new(
tm_sender: impl EcssTmSender,
verif_handler: VerificationReporterWithSender,
verif_handler: VerificationReporter,
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
) -> Self {
let mut event_man_wrapper = EventManagerWrapper::new();
@ -178,7 +173,7 @@ impl EventHandler {
}
#[allow(dead_code)]
pub fn event_manager(&mut self) -> &mut MpscEventManager {
pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
self.event_man_wrapper.event_manager()
}

View File

@ -44,8 +44,8 @@ use crate::tmtc::{
use crate::udp::{StaticUdpTmHandler, UdpTmtcServer};
use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender};
use satrs::pus::{EcssTmSender, MpscTmAsVecSender, MpscTmInSharedPoolSender};
use satrs::spacepackets::{time::cds::TimeProvider, time::TimeWriter};
use satrs::pus::{EcssTmSender, TmAsVecSenderWithId, TmInSharedPoolSenderWithId};
use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter};
use satrs::tmtc::CcsdsDistributor;
use satrs::ChannelId;
use std::net::{IpAddr, SocketAddr};
@ -54,11 +54,13 @@ use std::sync::{Arc, RwLock};
use std::thread;
use std::time::Duration;
fn create_verification_reporter(verif_sender: impl EcssTmSender) -> VerificationReporterWithSender {
fn create_verification_reporter<Sender: EcssTmSender + Clone>(
verif_sender: Sender,
) -> VerificationReporterWithSender<Sender> {
let verif_cfg = VerificationReporterCfg::new(PUS_APID, 1, 2, 8).unwrap();
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
VerificationReporterWithSender::new(&verif_cfg, Box::new(verif_sender))
VerificationReporterWithSender::new(&verif_cfg, verif_sender)
}
#[allow(dead_code)]
@ -68,13 +70,13 @@ fn static_tmtc_pool_main() {
let shared_tc_pool = SharedTcPool {
pool: Arc::new(RwLock::new(tc_pool)),
};
let (tc_source_tx, tc_source_rx) = channel();
let (tm_funnel_tx, tm_funnel_rx) = channel();
let (tm_server_tx, tm_server_rx) = channel();
let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50);
let (tm_funnel_tx, tm_funnel_rx) = mpsc::sync_channel(50);
let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50);
// Every software component which needs to generate verification telemetry, receives a cloned
// verification reporter.
let verif_reporter = create_verification_reporter(MpscTmInSharedPoolSender::new(
let verif_reporter = create_verification_reporter(TmInSharedPoolSenderWithId::new(
TmSenderId::PusVerification as ChannelId,
"verif_sender",
shared_tm_pool.clone(),
@ -102,7 +104,7 @@ fn static_tmtc_pool_main() {
// The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation.
let mut event_handler = EventHandler::new(
MpscTmInSharedPoolSender::new(
TmInSharedPoolSenderWithId::new(
TmSenderId::AllEvents as ChannelId,
"ALL_EVENTS_TX",
shared_tm_pool.clone(),
@ -180,7 +182,7 @@ fn static_tmtc_pool_main() {
);
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver.clone()));
let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone());
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
.expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
@ -191,7 +193,7 @@ fn static_tmtc_pool_main() {
},
};
let tcp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver));
let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver);
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new(
@ -202,7 +204,7 @@ fn static_tmtc_pool_main() {
.expect("tcp server creation failed");
let mut acs_task = AcsTask::new(
MpscTmInSharedPoolSender::new(
TmInSharedPoolSenderWithId::new(
TmSenderId::AcsSubsystem as ChannelId,
"ACS_TASK_SENDER",
shared_tm_pool.clone(),
@ -303,7 +305,7 @@ fn dyn_tmtc_pool_main() {
let (tm_server_tx, tm_server_rx) = channel();
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
let verif_reporter = create_verification_reporter(MpscTmAsVecSender::new(
let verif_reporter = create_verification_reporter(TmAsVecSenderWithId::new(
TmSenderId::PusVerification as ChannelId,
"verif_sender",
tm_funnel_tx.clone(),
@ -324,7 +326,7 @@ fn dyn_tmtc_pool_main() {
// The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation.
let mut event_handler = EventHandler::new(
MpscTmAsVecSender::new(
TmAsVecSenderWithId::new(
TmSenderId::AllEvents as ChannelId,
"ALL_EVENTS_TX",
tm_funnel_tx.clone(),
@ -394,7 +396,7 @@ fn dyn_tmtc_pool_main() {
);
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver.clone()));
let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone());
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
.expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer {
@ -404,7 +406,7 @@ fn dyn_tmtc_pool_main() {
},
};
let tcp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver));
let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver);
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new(
@ -415,7 +417,7 @@ fn dyn_tmtc_pool_main() {
.expect("tcp server creation failed");
let mut acs_task = AcsTask::new(
MpscTmAsVecSender::new(
TmAsVecSenderWithId::new(
TmSenderId::AcsSubsystem as ChannelId,
"ACS_TASK_SENDER",
tm_funnel_tx.clone(),
@ -511,7 +513,7 @@ fn main() {
dyn_tmtc_pool_main();
}
pub fn update_time(time_provider: &mut TimeProvider, timestamp: &mut [u8]) {
pub fn update_time(time_provider: &mut CdsTime, timestamp: &mut [u8]) {
time_provider
.update_from_now()
.expect("Could not get current time");

View File

@ -2,14 +2,17 @@ use log::{error, warn};
use satrs::action::ActionRequest;
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
use satrs::pus::action::{PusActionToRequestConverter, PusService8ActionHandler};
use satrs::pus::verification::std_mod::{
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
};
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReporterWithSender, VerificationReportingProvider,
VerificationToken,
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceHelper,
EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceHelper, TmAsVecSenderWithId, TmAsVecSenderWithMpsc,
TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId,
};
use satrs::request::TargetAndApidId;
use satrs::spacepackets::ecss::tc::PusTcReader;
@ -74,13 +77,18 @@ impl PusActionToRequestConverter for ExampleActionRequestConverter {
pub fn create_action_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter,
) -> Pus8Wrapper<EcssTcInSharedStoreConverter> {
let action_srv_tm_sender = MpscTmInSharedPoolSender::new(
) -> Pus8Wrapper<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
> {
let action_srv_tm_sender = TmInSharedPoolSenderWithId::new(
TmSenderId::PusAction as ChannelId,
"PUS_8_TM_SENDER",
shared_tm_store.clone(),
@ -93,8 +101,8 @@ pub fn create_action_service_static(
);
let pus_8_handler = PusService8ActionHandler::new(
PusServiceHelper::new(
Box::new(action_srv_receiver),
Box::new(action_srv_tm_sender),
action_srv_receiver,
action_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
@ -108,11 +116,16 @@ pub fn create_action_service_static(
pub fn create_action_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
verif_reporter: VerificationReporterWithVecMpscSender,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter,
) -> Pus8Wrapper<EcssTcInVecConverter> {
let action_srv_tm_sender = MpscTmAsVecSender::new(
) -> Pus8Wrapper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
> {
let action_srv_tm_sender = TmAsVecSenderWithId::new(
TmSenderId::PusAction as ChannelId,
"PUS_8_TM_SENDER",
tm_funnel_tx.clone(),
@ -124,8 +137,8 @@ pub fn create_action_service_dynamic(
);
let pus_8_handler = PusService8ActionHandler::new(
PusServiceHelper::new(
Box::new(action_srv_receiver),
Box::new(action_srv_tm_sender),
action_srv_receiver,
action_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInVecConverter::default(),
@ -137,17 +150,30 @@ pub fn create_action_service_dynamic(
Pus8Wrapper { pus_8_handler }
}
pub struct Pus8Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub struct Pus8Wrapper<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
pub(crate) pus_8_handler: PusService8ActionHandler<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporterWithSender,
VerificationReporter,
ExampleActionRequestConverter,
GenericRequestRouter,
GenericRoutingErrorHandler<8>,
>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Pus8Wrapper<TcInMemConverter> {
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> Pus8Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn handle_next_packet(&mut self) -> bool {
match self.pus_8_handler.handle_one_tc() {
Ok(result) => match result {

View File

@ -4,11 +4,15 @@ use log::{error, warn};
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusService5EventHandler;
use satrs::pus::verification::VerificationReporterWithSender;
use satrs::pus::verification::std_mod::{
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
};
use satrs::pus::verification::VerificationReportingProvider;
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusServiceHelper,
EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper,
TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
TmInSharedPoolSenderWithId,
};
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::ChannelId;
@ -16,13 +20,18 @@ use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID};
pub fn create_event_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
tc_pool: SharedStaticMemoryPool,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> Pus5Wrapper<EcssTcInSharedStoreConverter> {
let event_srv_tm_sender = MpscTmInSharedPoolSender::new(
) -> Pus5Wrapper<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
> {
let event_srv_tm_sender = TmInSharedPoolSenderWithId::new(
TmSenderId::PusEvent as ChannelId,
"PUS_5_TM_SENDER",
shared_tm_store.clone(),
@ -35,8 +44,8 @@ pub fn create_event_service_static(
);
let pus_5_handler = PusService5EventHandler::new(
PusServiceHelper::new(
Box::new(event_srv_receiver),
Box::new(event_srv_tm_sender),
event_srv_receiver,
event_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
@ -48,11 +57,16 @@ pub fn create_event_service_static(
pub fn create_event_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
verif_reporter: VerificationReporterWithVecMpscSender,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> Pus5Wrapper<EcssTcInVecConverter> {
let event_srv_tm_sender = MpscTmAsVecSender::new(
) -> Pus5Wrapper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
> {
let event_srv_tm_sender = TmAsVecSenderWithId::new(
TmSenderId::PusEvent as ChannelId,
"PUS_5_TM_SENDER",
tm_funnel_tx,
@ -64,8 +78,8 @@ pub fn create_event_service_dynamic(
);
let pus_5_handler = PusService5EventHandler::new(
PusServiceHelper::new(
Box::new(event_srv_receiver),
Box::new(event_srv_tm_sender),
event_srv_receiver,
event_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInVecConverter::default(),
@ -75,11 +89,23 @@ pub fn create_event_service_dynamic(
Pus5Wrapper { pus_5_handler }
}
pub struct Pus5Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub pus_5_handler: PusService5EventHandler<TcInMemConverter, VerificationReporterWithSender>,
pub struct Pus5Wrapper<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
pub pus_5_handler:
PusService5EventHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Pus5Wrapper<TcInMemConverter> {
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> Pus5Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn handle_next_packet(&mut self) -> bool {
match self.pus_5_handler.handle_one_tc() {
Ok(result) => match result {

View File

@ -2,14 +2,17 @@ use log::{error, warn};
use satrs::hk::{CollectionIntervalFactor, HkRequest};
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
use satrs::pus::hk::{PusHkToRequestConverter, PusService3HkHandler};
use satrs::pus::verification::std_mod::{
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
};
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReporterWithSender, VerificationReportingProvider,
VerificationToken,
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceHelper,
EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult,
PusPacketHandlingError, PusServiceHelper, TmAsVecSenderWithId, TmAsVecSenderWithMpsc,
TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId,
};
use satrs::request::TargetAndApidId;
use satrs::spacepackets::ecss::tc::PusTcReader;
@ -143,13 +146,18 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter {
pub fn create_hk_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
tc_pool: SharedStaticMemoryPool,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter,
) -> Pus3Wrapper<EcssTcInSharedStoreConverter> {
let hk_srv_tm_sender = MpscTmInSharedPoolSender::new(
) -> Pus3Wrapper<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
> {
let hk_srv_tm_sender = TmInSharedPoolSenderWithId::new(
TmSenderId::PusHk as ChannelId,
"PUS_3_TM_SENDER",
shared_tm_store.clone(),
@ -159,8 +167,8 @@ pub fn create_hk_service_static(
MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
let pus_3_handler = PusService3HkHandler::new(
PusServiceHelper::new(
Box::new(hk_srv_receiver),
Box::new(hk_srv_tm_sender),
hk_srv_receiver,
hk_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
@ -174,11 +182,16 @@ pub fn create_hk_service_static(
pub fn create_hk_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
verif_reporter: VerificationReporterWithVecMpscSender,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter,
) -> Pus3Wrapper<EcssTcInVecConverter> {
let hk_srv_tm_sender = MpscTmAsVecSender::new(
) -> Pus3Wrapper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
> {
let hk_srv_tm_sender = TmAsVecSenderWithId::new(
TmSenderId::PusHk as ChannelId,
"PUS_3_TM_SENDER",
tm_funnel_tx.clone(),
@ -187,8 +200,8 @@ pub fn create_hk_service_dynamic(
MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
let pus_3_handler = PusService3HkHandler::new(
PusServiceHelper::new(
Box::new(hk_srv_receiver),
Box::new(hk_srv_tm_sender),
hk_srv_receiver,
hk_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInVecConverter::default(),
@ -200,17 +213,30 @@ pub fn create_hk_service_dynamic(
Pus3Wrapper { pus_3_handler }
}
pub struct Pus3Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub struct Pus3Wrapper<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
pub(crate) pus_3_handler: PusService3HkHandler<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporterWithSender,
VerificationReporter,
ExampleHkRequestConverter,
GenericRequestRouter,
GenericRoutingErrorHandler<3>,
>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Pus3Wrapper<TcInMemConverter> {
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> Pus3Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn handle_next_packet(&mut self) -> bool {
match self.pus_3_handler.handle_one_tc() {
Ok(result) => match result {

View File

@ -1,14 +1,12 @@
use crate::tmtc::MpscStoreAndSendError;
use log::warn;
use satrs::pus::verification::{
FailParams, StdVerifReporterWithSender, VerificationReportingProvider,
};
use satrs::pus::verification::{FailParams, VerificationReportingProvider};
use satrs::pus::{
EcssTcAndToken, GenericRoutingError, PusPacketHandlerResult, PusRoutingErrorHandler, TcInMemory,
};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusServiceId;
use satrs::spacepackets::time::cds::TimeProvider;
use satrs::spacepackets::time::cds::CdsTime;
use satrs::spacepackets::time::TimeWriter;
use satrs_example::config::{tmtc_err, CustomPusServiceId};
use std::sync::mpsc::Sender;
@ -28,21 +26,21 @@ pub struct PusTcMpscRouter {
pub action_service_receiver: Sender<EcssTcAndToken>,
}
pub struct PusReceiver {
pub verif_reporter: StdVerifReporterWithSender,
pub struct PusReceiver<VerificationReporter: VerificationReportingProvider> {
pub verif_reporter: VerificationReporter,
pub pus_router: PusTcMpscRouter,
stamp_helper: TimeStampHelper,
}
struct TimeStampHelper {
stamper: TimeProvider,
stamper: CdsTime,
time_stamp: [u8; 7],
}
impl TimeStampHelper {
pub fn new() -> Self {
Self {
stamper: TimeProvider::new_with_u16_days(0, 0),
stamper: CdsTime::new_with_u16_days(0, 0),
time_stamp: [0; 7],
}
}
@ -61,8 +59,8 @@ impl TimeStampHelper {
}
}
impl PusReceiver {
pub fn new(verif_reporter: StdVerifReporterWithSender, pus_router: PusTcMpscRouter) -> Self {
impl<VerificationReporter: VerificationReportingProvider> PusReceiver<VerificationReporter> {
pub fn new(verif_reporter: VerificationReporter, pus_router: PusTcMpscRouter) -> Self {
Self {
verif_reporter,
pus_router,
@ -71,7 +69,7 @@ impl PusReceiver {
}
}
impl PusReceiver {
impl<VerificationReporter: VerificationReportingProvider> PusReceiver<VerificationReporter> {
pub fn handle_tc_packet(
&mut self,
tc_in_memory: TcInMemory,

View File

@ -5,11 +5,15 @@ use log::{error, info, warn};
use satrs::pool::{PoolProvider, StaticMemoryPool, StoreAddr};
use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler_srv::PusService11SchedHandler;
use satrs::pus::verification::VerificationReporterWithSender;
use satrs::pus::verification::std_mod::{
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
};
use satrs::pus::verification::VerificationReportingProvider;
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusServiceHelper,
EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper,
TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
TmInSharedPoolSenderWithId,
};
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::ChannelId;
@ -51,15 +55,31 @@ impl TcReleaser for mpsc::Sender<Vec<u8>> {
}
}
pub struct Pus11Wrapper<TcInMemConverter: EcssTcInMemConverter> {
pub pus_11_handler:
PusService11SchedHandler<TcInMemConverter, VerificationReporterWithSender, PusScheduler>,
pub struct Pus11Wrapper<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
pub pus_11_handler: PusService11SchedHandler<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
PusScheduler,
>,
pub sched_tc_pool: StaticMemoryPool,
pub releaser_buf: [u8; 4096],
pub tc_releaser: Box<dyn TcReleaser + Send>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> Pus11Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn release_tcs(&mut self) {
let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool {
self.tc_releaser.release(enabled, info, tc)
@ -110,13 +130,18 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
pub fn create_scheduler_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
tc_releaser: PusTcSourceProviderSharedPool,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
) -> Pus11Wrapper<EcssTcInSharedStoreConverter> {
let sched_srv_tm_sender = MpscTmInSharedPoolSender::new(
) -> Pus11Wrapper<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
> {
let sched_srv_tm_sender = TmInSharedPoolSenderWithId::new(
TmSenderId::PusSched as ChannelId,
"PUS_11_TM_SENDER",
shared_tm_store.clone(),
@ -131,8 +156,8 @@ pub fn create_scheduler_service_static(
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusService11SchedHandler::new(
PusServiceHelper::new(
Box::new(sched_srv_receiver),
Box::new(sched_srv_tm_sender),
sched_srv_receiver,
sched_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_releaser.clone_backing_pool(), 2048),
@ -149,12 +174,17 @@ pub fn create_scheduler_service_static(
pub fn create_scheduler_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
verif_reporter: VerificationReporterWithVecMpscSender,
tc_source_sender: mpsc::Sender<Vec<u8>>,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
) -> Pus11Wrapper<EcssTcInVecConverter> {
let sched_srv_tm_sender = MpscTmAsVecSender::new(
) -> Pus11Wrapper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
> {
let sched_srv_tm_sender = TmAsVecSenderWithId::new(
TmSenderId::PusSched as ChannelId,
"PUS_11_TM_SENDER",
tm_funnel_tx,
@ -168,8 +198,8 @@ pub fn create_scheduler_service_dynamic(
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusService11SchedHandler::new(
PusServiceHelper::new(
Box::new(sched_srv_receiver),
Box::new(sched_srv_tm_sender),
sched_srv_receiver,
sched_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInVecConverter::default(),

View File

@ -1,25 +1,44 @@
use satrs::pus::EcssTcInMemConverter;
use satrs::pus::{
verification::VerificationReportingProvider, EcssTcInMemConverter, EcssTcReceiverCore,
EcssTmSenderCore,
};
use super::{
action::Pus8Wrapper, event::Pus5Wrapper, hk::Pus3Wrapper, scheduler::Pus11Wrapper,
test::Service17CustomWrapper,
};
pub struct PusStack<TcInMemConverter: EcssTcInMemConverter> {
event_srv: Pus5Wrapper<TcInMemConverter>,
hk_srv: Pus3Wrapper<TcInMemConverter>,
action_srv: Pus8Wrapper<TcInMemConverter>,
schedule_srv: Pus11Wrapper<TcInMemConverter>,
test_srv: Service17CustomWrapper<TcInMemConverter>,
pub struct PusStack<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
event_srv: Pus5Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
hk_srv: Pus3Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
action_srv: Pus8Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
schedule_srv: Pus11Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
test_srv: Service17CustomWrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
}
impl<TcInMemConverter: EcssTcInMemConverter> PusStack<TcInMemConverter> {
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> PusStack<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
hk_srv: Pus3Wrapper<TcInMemConverter>,
event_srv: Pus5Wrapper<TcInMemConverter>,
action_srv: Pus8Wrapper<TcInMemConverter>,
schedule_srv: Pus11Wrapper<TcInMemConverter>,
test_srv: Service17CustomWrapper<TcInMemConverter>,
hk_srv: Pus3Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
event_srv: Pus5Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
action_srv: Pus8Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
schedule_srv: Pus11Wrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
test_srv: Service17CustomWrapper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
) -> Self {
Self {
event_srv,

View File

@ -2,16 +2,19 @@ use log::{info, warn};
use satrs::params::Params;
use satrs::pool::{SharedStaticMemoryPool, StoreAddr};
use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{FailParams, VerificationReportingProvider};
use satrs::pus::verification::{
FailParams, VerificationReporterWithSender, VerificationReportingProvider,
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
};
use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender,
MpscTmInSharedPoolSender, PusPacketHandlerResult, PusServiceHelper,
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTcReceiverCore,
EcssTmSenderCore, MpscTcReceiver, PusPacketHandlerResult, PusServiceHelper,
TmAsVecSenderWithId, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
TmInSharedPoolSenderWithId,
};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::spacepackets::time::cds::TimeProvider;
use satrs::spacepackets::time::cds::CdsTime;
use satrs::spacepackets::time::TimeWriter;
use satrs::tmtc::tm_helper::SharedTmPool;
use satrs::ChannelId;
@ -21,13 +24,18 @@ use std::sync::mpsc::{self, Sender};
pub fn create_test_service_static(
shared_tm_store: SharedTmPool,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tm_funnel_tx: mpsc::SyncSender<StoreAddr>,
verif_reporter: VerificationReporterWithSharedPoolMpscBoundedSender,
tc_pool: SharedStaticMemoryPool,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> Service17CustomWrapper<EcssTcInSharedStoreConverter> {
let test_srv_tm_sender = MpscTmInSharedPoolSender::new(
) -> Service17CustomWrapper<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
> {
let test_srv_tm_sender = TmInSharedPoolSenderWithId::new(
TmSenderId::PusTest as ChannelId,
"PUS_17_TM_SENDER",
shared_tm_store.clone(),
@ -39,8 +47,8 @@ pub fn create_test_service_static(
pus_test_rx,
);
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
Box::new(test_srv_receiver),
Box::new(test_srv_tm_sender),
test_srv_receiver,
test_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
@ -53,11 +61,16 @@ pub fn create_test_service_static(
pub fn create_test_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>,
verif_reporter: VerificationReporterWithSender,
verif_reporter: VerificationReporterWithVecMpscSender,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> Service17CustomWrapper<EcssTcInVecConverter> {
let test_srv_tm_sender = MpscTmAsVecSender::new(
) -> Service17CustomWrapper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
> {
let test_srv_tm_sender = TmAsVecSenderWithId::new(
TmSenderId::PusTest as ChannelId,
"PUS_17_TM_SENDER",
tm_funnel_tx.clone(),
@ -68,8 +81,8 @@ pub fn create_test_service_dynamic(
pus_test_rx,
);
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
Box::new(test_srv_receiver),
Box::new(test_srv_tm_sender),
test_srv_receiver,
test_srv_tm_sender,
PUS_APID,
verif_reporter.clone(),
EcssTcInVecConverter::default(),
@ -80,12 +93,24 @@ pub fn create_test_service_dynamic(
}
}
pub struct Service17CustomWrapper<TcInMemConverter: EcssTcInMemConverter> {
pub pus17_handler: PusService17TestHandler<TcInMemConverter, VerificationReporterWithSender>,
pub struct Service17CustomWrapper<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
pub pus17_handler:
PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub test_srv_event_sender: Sender<(EventU32, Option<Params>)>,
}
impl<TcInMemConverter: EcssTcInMemConverter> Service17CustomWrapper<TcInMemConverter> {
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> Service17CustomWrapper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn handle_next_packet(&mut self) -> bool {
let res = self.pus17_handler.handle_one_tc();
if res.is_err() {
@ -114,7 +139,7 @@ impl<TcInMemConverter: EcssTcInMemConverter> Service17CustomWrapper<TcInMemConve
.tc_slice_raw(),
)
.unwrap();
let time_stamper = TimeProvider::from_now_with_u16_days().unwrap();
let time_stamper = CdsTime::now_with_u16_days().unwrap();
let mut stamp_buf: [u8; 7] = [0; 7];
time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
if subservice == 128 {

View File

@ -6,11 +6,14 @@ use std::{
use log::{info, warn};
use satrs::{
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer},
pus::ReceivesEcssPusTc,
spacepackets::PacketId,
tmtc::{CcsdsDistributor, CcsdsError, TmPacketSourceCore},
tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, TmPacketSourceCore},
};
use satrs_example::config::PUS_APID;
use crate::ccsds::CcsdsReceiver;
pub const PACKET_ID_LOOKUP: &[PacketId] = &[PacketId::const_tc(true, PUS_APID)];
#[derive(Default, Clone)]
@ -69,20 +72,37 @@ impl TmPacketSourceCore for SyncTcpTmSource {
}
}
pub struct TcpTask<MpscErrorType: 'static> {
server: TcpSpacepacketsServer<
(),
CcsdsError<MpscErrorType>,
SyncTcpTmSource,
CcsdsDistributor<MpscErrorType>,
>,
pub type TcpServerType<TcSource, MpscErrorType> = TcpSpacepacketsServer<
(),
CcsdsError<MpscErrorType>,
SyncTcpTmSource,
CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
>;
pub struct TcpTask<
TcSource: ReceivesCcsdsTc<Error = MpscErrorType>
+ ReceivesEcssPusTc<Error = MpscErrorType>
+ Clone
+ Send
+ 'static,
MpscErrorType: 'static,
> {
server: TcpServerType<TcSource, MpscErrorType>,
}
impl<MpscErrorType: 'static + core::fmt::Debug> TcpTask<MpscErrorType> {
impl<
TcSource: ReceivesCcsdsTc<Error = MpscErrorType>
+ ReceivesEcssPusTc<Error = MpscErrorType>
+ Clone
+ Send
+ 'static,
MpscErrorType: 'static + core::fmt::Debug,
> TcpTask<TcSource, MpscErrorType>
{
pub fn new(
cfg: ServerConfig,
tm_source: SyncTcpTmSource,
tc_receiver: CcsdsDistributor<MpscErrorType>,
tc_receiver: CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
) -> Result<Self, std::io::Error> {
Ok(Self {
server: TcpSpacepacketsServer::new(

View File

@ -1,6 +1,6 @@
use std::{
collections::HashMap,
sync::mpsc::{Receiver, Sender},
sync::mpsc::{self},
};
use log::info;
@ -77,16 +77,16 @@ impl TmFunnelCommon {
pub struct TmFunnelStatic {
common: TmFunnelCommon,
shared_tm_store: SharedTmPool,
tm_funnel_rx: Receiver<StoreAddr>,
tm_server_tx: Sender<StoreAddr>,
tm_funnel_rx: mpsc::Receiver<StoreAddr>,
tm_server_tx: mpsc::SyncSender<StoreAddr>,
}
impl TmFunnelStatic {
pub fn new(
shared_tm_store: SharedTmPool,
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: Receiver<StoreAddr>,
tm_server_tx: Sender<StoreAddr>,
tm_funnel_rx: mpsc::Receiver<StoreAddr>,
tm_server_tx: mpsc::SyncSender<StoreAddr>,
) -> Self {
Self {
common: TmFunnelCommon::new(sync_tm_tcp_source),
@ -123,15 +123,15 @@ impl TmFunnelStatic {
pub struct TmFunnelDynamic {
common: TmFunnelCommon,
tm_funnel_rx: Receiver<Vec<u8>>,
tm_server_tx: Sender<Vec<u8>>,
tm_funnel_rx: mpsc::Receiver<Vec<u8>>,
tm_server_tx: mpsc::Sender<Vec<u8>>,
}
impl TmFunnelDynamic {
pub fn new(
sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: Receiver<Vec<u8>>,
tm_server_tx: Sender<Vec<u8>>,
tm_funnel_rx: mpsc::Receiver<Vec<u8>>,
tm_server_tx: mpsc::Sender<Vec<u8>>,
) -> Self {
Self {
common: TmFunnelCommon::new(sync_tm_tcp_source),

View File

@ -1,7 +1,10 @@
use log::warn;
use satrs::pus::verification::std_mod::{
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
};
use satrs::pus::{EcssTcAndToken, ReceivesEcssPusTc};
use satrs::spacepackets::SpHeader;
use std::sync::mpsc::{self, Receiver, SendError, Sender, TryRecvError};
use std::sync::mpsc::{self, Receiver, SendError, Sender, SyncSender, TryRecvError};
use thiserror::Error;
use crate::pus::PusReceiver;
@ -37,7 +40,7 @@ impl SharedTcPool {
#[derive(Clone)]
pub struct PusTcSourceProviderSharedPool {
pub tc_source: Sender<StoreAddr>,
pub tc_source: SyncSender<StoreAddr>,
pub shared_pool: SharedTcPool,
}
@ -97,14 +100,14 @@ pub struct TcSourceTaskStatic {
shared_tc_pool: SharedTcPool,
tc_receiver: Receiver<StoreAddr>,
tc_buf: [u8; 4096],
pus_receiver: PusReceiver,
pus_receiver: PusReceiver<VerificationReporterWithSharedPoolMpscBoundedSender>,
}
impl TcSourceTaskStatic {
pub fn new(
shared_tc_pool: SharedTcPool,
tc_receiver: Receiver<StoreAddr>,
pus_receiver: PusReceiver,
pus_receiver: PusReceiver<VerificationReporterWithSharedPoolMpscBoundedSender>,
) -> Self {
Self {
shared_tc_pool,
@ -161,11 +164,14 @@ impl TcSourceTaskStatic {
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub tc_receiver: Receiver<Vec<u8>>,
pus_receiver: PusReceiver,
pus_receiver: PusReceiver<VerificationReporterWithVecMpscSender>,
}
impl TcSourceTaskDynamic {
pub fn new(tc_receiver: Receiver<Vec<u8>>, pus_receiver: PusReceiver) -> Self {
pub fn new(
tc_receiver: Receiver<Vec<u8>>,
pus_receiver: PusReceiver<VerificationReporterWithVecMpscSender>,
) -> Self {
Self {
tc_receiver,
pus_receiver,

View File

@ -23,6 +23,7 @@ version = "1"
optional = true
[dependencies.satrs-shared]
path = "../satrs-shared"
version = "0.1.2"
features = ["serde"]

View File

@ -26,7 +26,10 @@ features = ["full"]
[dev-dependencies]
trybuild = { version = "1", features = ["diff"] }
satrs-shared = "0.1.2"
[dev-dependencies.satrs-shared]
version = "0.1.2"
path = "../../satrs-shared"
[dev-dependencies.satrs-mib]
path = ".."

21
satrs-minisim/Cargo.toml Normal file
View File

@ -0,0 +1,21 @@
[package]
name = "satrs-minisim"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
log = "0.4"
thiserror = "1"
[dependencies.asynchronix]
version = "0.2.1"
[dependencies.satrs]
path = "../satrs"
[dev-dependencies]
delegate = "0.12"

338
satrs-minisim/src/acs.rs Normal file
View File

@ -0,0 +1,338 @@
use std::{f32::consts::PI, sync::mpsc, time::Duration};
use asynchronix::{
model::{Model, Output},
time::Scheduler,
};
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
acs::{MgmReply, MgmSensorValues, MgtDipole, MgtHkSet, MgtReply, MGT_GEN_MAGNETIC_FIELD},
SimReply,
};
use crate::time::current_millis;
// Earth magnetic field varies between -30 uT and 30 uT
const AMPLITUDE_MGM: f32 = 0.03;
// Lets start with a simple frequency here.
const FREQUENCY_MGM: f32 = 1.0;
const PHASE_X: f32 = 0.0;
// Different phases to have different values on the other axes.
const PHASE_Y: f32 = 0.1;
const PHASE_Z: f32 = 0.2;
/// Simple model for a magnetometer where the measure magnetic fields are modeled with sine waves.
///
/// Please note that that a more realistic MGM model wouold include the following components
/// which are not included here to simplify the model:
///
/// 1. It would probably generate signed [i16] values which need to be converted to SI units
/// because it is a digital sensor
/// 2. It would sample the magnetic field at a high fixed rate. This might not be possible for
/// a general purpose OS, but self self-sampling at a relatively high rate (20-40 ms) might
/// stil lbe possible.
pub struct MagnetometerModel {
pub switch_state: SwitchStateBinary,
pub periodicity: Duration,
pub external_mag_field: Option<MgmSensorValues>,
pub reply_sender: mpsc::Sender<SimReply>,
}
impl MagnetometerModel {
pub fn new(periodicity: Duration, reply_sender: mpsc::Sender<SimReply>) -> Self {
Self {
switch_state: SwitchStateBinary::Off,
periodicity,
external_mag_field: None,
reply_sender,
}
}
pub async fn switch_device(&mut self, switch_state: SwitchStateBinary) {
self.switch_state = switch_state;
}
pub async fn send_sensor_values(&mut self, _: (), scheduler: &Scheduler<Self>) {
self.reply_sender
.send(SimReply::new(MgmReply {
switch_state: self.switch_state,
sensor_values: self.calculate_current_mgm_tuple(current_millis(scheduler.time())),
}))
.expect("sending MGM sensor values failed");
}
// Devices like magnetorquers generate a strong magnetic field which overrides the default
// model for the measured magnetic field.
pub async fn apply_external_magnetic_field(&mut self, field: MgmSensorValues) {
self.external_mag_field = Some(field);
}
fn calculate_current_mgm_tuple(&self, time_ms: u64) -> MgmSensorValues {
if SwitchStateBinary::On == self.switch_state {
if let Some(ext_field) = self.external_mag_field {
return ext_field;
}
let base_sin_val = 2.0 * PI * FREQUENCY_MGM * (time_ms as f32 / 1000.0);
return MgmSensorValues {
x: AMPLITUDE_MGM * (base_sin_val + PHASE_X).sin(),
y: AMPLITUDE_MGM * (base_sin_val + PHASE_Y).sin(),
z: AMPLITUDE_MGM * (base_sin_val + PHASE_Z).sin(),
};
}
MgmSensorValues {
x: 0.0,
y: 0.0,
z: 0.0,
}
}
}
impl Model for MagnetometerModel {}
pub struct MagnetorquerModel {
switch_state: SwitchStateBinary,
torquing: bool,
torque_dipole: MgtDipole,
pub gen_magnetic_field: Output<MgmSensorValues>,
reply_sender: mpsc::Sender<SimReply>,
}
impl MagnetorquerModel {
pub fn new(reply_sender: mpsc::Sender<SimReply>) -> Self {
Self {
switch_state: SwitchStateBinary::Off,
torquing: false,
torque_dipole: MgtDipole::default(),
gen_magnetic_field: Output::new(),
reply_sender,
}
}
pub async fn apply_torque(
&mut self,
duration_and_dipole: (Duration, MgtDipole),
scheduler: &Scheduler<Self>,
) {
self.torque_dipole = duration_and_dipole.1;
self.torquing = true;
if scheduler
.schedule_event(duration_and_dipole.0, Self::clear_torque, ())
.is_err()
{
log::warn!("torque clearing can only be set for a future time.");
}
self.generate_magnetic_field(()).await;
}
pub async fn clear_torque(&mut self, _: ()) {
self.torque_dipole = MgtDipole::default();
self.torquing = false;
self.generate_magnetic_field(()).await;
}
pub async fn switch_device(&mut self, switch_state: SwitchStateBinary) {
self.switch_state = switch_state;
self.generate_magnetic_field(()).await;
}
pub async fn request_housekeeping_data(&mut self, _: (), scheduler: &Scheduler<Self>) {
if self.switch_state != SwitchStateBinary::On {
return;
}
scheduler
.schedule_event(Duration::from_millis(15), Self::send_housekeeping_data, ())
.expect("requesting housekeeping data failed")
}
pub fn send_housekeeping_data(&mut self) {
self.reply_sender
.send(SimReply::new(MgtReply::Hk(MgtHkSet {
dipole: self.torque_dipole,
torquing: self.torquing,
})))
.unwrap();
}
fn calc_magnetic_field(&self, _: MgtDipole) -> MgmSensorValues {
// Simplified model: Just returns some fixed magnetic field for now.
// Later, we could make this more fancy by incorporating the commanded dipole.
MGT_GEN_MAGNETIC_FIELD
}
/// A torquing magnetorquer generates a magnetic field. This function can be used to apply
/// the magnetic field.
async fn generate_magnetic_field(&mut self, _: ()) {
if self.switch_state != SwitchStateBinary::On || !self.torquing {
return;
}
self.gen_magnetic_field
.send(self.calc_magnetic_field(self.torque_dipole))
.await;
}
}
impl Model for MagnetorquerModel {}
#[cfg(test)]
pub mod tests {
use std::time::Duration;
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
acs::{MgmReply, MgmRequest, MgtDipole, MgtHkSet, MgtReply, MgtRequest},
eps::PcduSwitch,
SerializableSimMsgPayload, SimMessageProvider, SimRequest, SimTarget,
};
use crate::{eps::tests::switch_device_on, test_helpers::SimTestbench};
#[test]
fn test_basic_mgm_request() {
let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(MgmRequest::RequestSensorData);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
assert_eq!(sim_reply.target(), SimTarget::Mgm);
let reply = MgmReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
assert_eq!(reply.switch_state, SwitchStateBinary::Off);
assert_eq!(reply.sensor_values.x, 0.0);
assert_eq!(reply.sensor_values.y, 0.0);
assert_eq!(reply.sensor_values.z, 0.0);
}
#[test]
fn test_basic_mgm_request_switched_on() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgm);
let mut request = SimRequest::new(MgmRequest::RequestSensorData);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let mut sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let mut sim_reply = sim_reply_res.unwrap();
assert_eq!(sim_reply.target(), SimTarget::Mgm);
let first_reply = MgmReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
sim_testbench.step_by(Duration::from_millis(50));
request = SimRequest::new(MgmRequest::RequestSensorData);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
sim_reply = sim_reply_res.unwrap();
let second_reply = MgmReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
// Check that the values are changing.
assert!(first_reply != second_reply);
}
#[test]
fn test_basic_mgt_request_is_off() {
let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(MgtRequest::RequestHk);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_none());
}
#[test]
fn test_basic_mgt_request_is_on() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgt);
let request = SimRequest::new(MgtRequest::RequestHk);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let sim_reply = sim_reply_res.unwrap();
let mgt_reply = MgtReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
match mgt_reply {
MgtReply::Hk(hk) => {
assert_eq!(hk.dipole, MgtDipole::default());
assert!(!hk.torquing);
}
_ => panic!("unexpected reply"),
}
}
fn check_mgt_hk(sim_testbench: &mut SimTestbench, expected_hk_set: MgtHkSet) {
let request = SimRequest::new(MgtRequest::RequestHk);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let sim_reply = sim_reply_res.unwrap();
let mgt_reply = MgtReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
match mgt_reply {
MgtReply::Hk(hk) => {
assert_eq!(hk, expected_hk_set);
}
_ => panic!("unexpected reply"),
}
}
#[test]
fn test_basic_mgt_request_is_on_and_torquing() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgt);
let commanded_dipole = MgtDipole {
x: -200,
y: 200,
z: 1000,
};
let request = SimRequest::new(MgtRequest::ApplyTorque {
duration: Duration::from_millis(100),
dipole: commanded_dipole,
});
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step_by(Duration::from_millis(5));
check_mgt_hk(
&mut sim_testbench,
MgtHkSet {
dipole: commanded_dipole,
torquing: true,
},
);
sim_testbench.step_by(Duration::from_millis(100));
check_mgt_hk(
&mut sim_testbench,
MgtHkSet {
dipole: MgtDipole::default(),
torquing: false,
},
);
}
}

View File

@ -0,0 +1,189 @@
use std::{sync::mpsc, time::Duration};
use asynchronix::{
simulation::{Address, Simulation},
time::{Clock, MonotonicTime, SystemClock},
};
use satrs_minisim::{
acs::{MgmRequest, MgtRequest},
eps::PcduRequest,
SerializableSimMsgPayload, SimCtrlReply, SimCtrlRequest, SimMessageProvider, SimReply,
SimRequest, SimRequestError, SimTarget,
};
use crate::{
acs::{MagnetometerModel, MagnetorquerModel},
eps::PcduModel,
};
// The simulation controller processes requests and drives the simulation.
pub struct SimController {
pub sys_clock: SystemClock,
pub request_receiver: mpsc::Receiver<SimRequest>,
pub reply_sender: mpsc::Sender<SimReply>,
pub simulation: Simulation,
pub mgm_addr: Address<MagnetometerModel>,
pub pcdu_addr: Address<PcduModel>,
pub mgt_addr: Address<MagnetorquerModel>,
}
impl SimController {
pub fn new(
sys_clock: SystemClock,
request_receiver: mpsc::Receiver<SimRequest>,
reply_sender: mpsc::Sender<SimReply>,
simulation: Simulation,
mgm_addr: Address<MagnetometerModel>,
pcdu_addr: Address<PcduModel>,
mgt_addr: Address<MagnetorquerModel>,
) -> Self {
Self {
sys_clock,
request_receiver,
reply_sender,
simulation,
mgm_addr,
pcdu_addr,
mgt_addr,
}
}
pub fn run(&mut self, start_time: MonotonicTime, udp_polling_interval_ms: u64) {
let mut t = start_time + Duration::from_millis(udp_polling_interval_ms);
self.sys_clock.synchronize(t);
loop {
// Check for UDP requests every millisecond. Shift the simulator ahead here to prevent
// replies lying in the past.
t += Duration::from_millis(udp_polling_interval_ms);
self.simulation
.step_until(t)
.expect("simulation step failed");
self.handle_sim_requests();
self.sys_clock.synchronize(t);
}
}
pub fn handle_sim_requests(&mut self) {
loop {
match self.request_receiver.try_recv() {
Ok(request) => {
if let Err(e) = match request.target() {
SimTarget::SimCtrl => self.handle_ctrl_request(&request),
SimTarget::Mgm => self.handle_mgm_request(&request),
SimTarget::Mgt => self.handle_mgt_request(&request),
SimTarget::Pcdu => self.handle_pcdu_request(&request),
} {
self.handle_invalid_request_with_valid_target(e, &request)
}
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
panic!("all request sender disconnected")
}
},
}
}
}
fn handle_ctrl_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let sim_ctrl_request = SimCtrlRequest::from_sim_message(request)?;
match sim_ctrl_request {
SimCtrlRequest::Ping => {
self.reply_sender
.send(SimReply::new(SimCtrlReply::Pong))
.expect("sending reply from sim controller failed");
}
}
Ok(())
}
fn handle_mgm_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let mgm_request = MgmRequest::from_sim_message(request)?;
match mgm_request {
MgmRequest::RequestSensorData => {
self.simulation.send_event(
MagnetometerModel::send_sensor_values,
(),
&self.mgm_addr,
);
}
}
Ok(())
}
fn handle_pcdu_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let pcdu_request = PcduRequest::from_sim_message(request)?;
match pcdu_request {
PcduRequest::RequestSwitchInfo => {
self.simulation
.send_event(PcduModel::request_switch_info, (), &self.pcdu_addr);
}
PcduRequest::SwitchDevice { switch, state } => {
self.simulation.send_event(
PcduModel::switch_device,
(switch, state),
&self.pcdu_addr,
);
}
}
Ok(())
}
fn handle_mgt_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let mgt_request = MgtRequest::from_sim_message(request)?;
match mgt_request {
MgtRequest::ApplyTorque { duration, dipole } => self.simulation.send_event(
MagnetorquerModel::apply_torque,
(duration, dipole),
&self.mgt_addr,
),
MgtRequest::RequestHk => self.simulation.send_event(
MagnetorquerModel::request_housekeeping_data,
(),
&self.mgt_addr,
),
}
Ok(())
}
fn handle_invalid_request_with_valid_target(
&self,
error: SimRequestError,
request: &SimRequest,
) {
log::warn!(
"received invalid {:?} request: {:?}",
request.target(),
error
);
self.reply_sender
.send(SimReply::new(SimCtrlReply::from(error)))
.expect("sending reply from sim controller failed");
}
}
#[cfg(test)]
mod tests {
use crate::test_helpers::SimTestbench;
use super::*;
#[test]
fn test_basic_ping() {
let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(SimCtrlRequest::Ping);
sim_testbench
.send_request(request)
.expect("sending sim ctrl request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
assert_eq!(sim_reply.target(), SimTarget::SimCtrl);
let reply = SimCtrlReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
assert_eq!(reply, SimCtrlReply::Pong);
}
}

185
satrs-minisim/src/eps.rs Normal file
View File

@ -0,0 +1,185 @@
use std::{collections::HashMap, sync::mpsc, time::Duration};
use asynchronix::{
model::{Model, Output},
time::Scheduler,
};
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
eps::{PcduReply, PcduSwitch, SwitchMap},
SimReply,
};
pub const SWITCH_INFO_DELAY_MS: u64 = 10;
pub struct PcduModel {
pub switcher_map: SwitchMap,
pub mgm_switch: Output<SwitchStateBinary>,
pub mgt_switch: Output<SwitchStateBinary>,
pub reply_sender: mpsc::Sender<SimReply>,
}
impl PcduModel {
pub fn new(reply_sender: mpsc::Sender<SimReply>) -> Self {
let mut switcher_map = HashMap::new();
switcher_map.insert(PcduSwitch::Mgm, SwitchStateBinary::Off);
switcher_map.insert(PcduSwitch::Mgt, SwitchStateBinary::Off);
Self {
switcher_map,
mgm_switch: Output::new(),
mgt_switch: Output::new(),
reply_sender,
}
}
pub async fn request_switch_info(&mut self, _: (), scheduler: &Scheduler<Self>) {
scheduler
.schedule_event(
Duration::from_millis(SWITCH_INFO_DELAY_MS),
Self::send_switch_info,
(),
)
.expect("requesting switch info failed");
}
pub fn send_switch_info(&mut self) {
let reply = SimReply::new(PcduReply::SwitchInfo(self.switcher_map.clone()));
self.reply_sender.send(reply).unwrap();
}
pub async fn switch_device(
&mut self,
switch_and_target_state: (PcduSwitch, SwitchStateBinary),
) {
let val = self
.switcher_map
.get_mut(&switch_and_target_state.0)
.unwrap_or_else(|| panic!("switch {:?} not found", switch_and_target_state.0));
*val = switch_and_target_state.1;
match switch_and_target_state.0 {
PcduSwitch::Mgm => {
self.mgm_switch.send(switch_and_target_state.1).await;
}
PcduSwitch::Mgt => {
self.mgt_switch.send(switch_and_target_state.1).await;
}
}
}
}
impl Model for PcduModel {}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use std::time::Duration;
use satrs_minisim::{
eps::PcduRequest, SerializableSimMsgPayload, SimMessageProvider, SimRequest, SimTarget,
};
use crate::test_helpers::SimTestbench;
fn switch_device(
sim_testbench: &mut SimTestbench,
switch: PcduSwitch,
target: SwitchStateBinary,
) {
let request = SimRequest::new(PcduRequest::SwitchDevice {
switch,
state: target,
});
sim_testbench
.send_request(request)
.expect("sending MGM switch request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
}
#[allow(dead_code)]
pub(crate) fn switch_device_off(sim_testbench: &mut SimTestbench, switch: PcduSwitch) {
switch_device(sim_testbench, switch, SwitchStateBinary::Off);
}
pub(crate) fn switch_device_on(sim_testbench: &mut SimTestbench, switch: PcduSwitch) {
switch_device(sim_testbench, switch, SwitchStateBinary::On);
}
pub(crate) fn get_all_off_switch_map() -> SwitchMap {
let mut switcher_map = SwitchMap::new();
switcher_map.insert(super::PcduSwitch::Mgm, super::SwitchStateBinary::Off);
switcher_map.insert(super::PcduSwitch::Mgt, super::SwitchStateBinary::Off);
switcher_map
}
fn check_switch_state(sim_testbench: &mut SimTestbench, expected_switch_map: &SwitchMap) {
let request = SimRequest::new(PcduRequest::RequestSwitchInfo);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
assert_eq!(sim_reply.target(), SimTarget::Pcdu);
let pcdu_reply = PcduReply::from_sim_message(&sim_reply)
.expect("failed to deserialize PCDU switch info");
match pcdu_reply {
PcduReply::SwitchInfo(switch_map) => {
assert_eq!(switch_map, *expected_switch_map);
}
}
}
fn test_pcdu_switching_single_switch(switch: PcduSwitch, target: SwitchStateBinary) {
let mut sim_testbench = SimTestbench::new();
switch_device(&mut sim_testbench, switch, target);
let mut switcher_map = get_all_off_switch_map();
*switcher_map.get_mut(&switch).unwrap() = target;
check_switch_state(&mut sim_testbench, &switcher_map);
}
#[test]
fn test_pcdu_switcher_request() {
let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(PcduRequest::RequestSwitchInfo);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step_by(Duration::from_millis(1));
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_none());
// Reply takes 20ms
sim_testbench.step_by(Duration::from_millis(25));
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
assert_eq!(sim_reply.target(), SimTarget::Pcdu);
let pcdu_reply = PcduReply::from_sim_message(&sim_reply)
.expect("failed to deserialize PCDU switch info");
match pcdu_reply {
PcduReply::SwitchInfo(switch_map) => {
assert_eq!(switch_map, get_all_off_switch_map());
}
}
}
#[test]
fn test_pcdu_switching_mgm_on() {
test_pcdu_switching_single_switch(PcduSwitch::Mgm, SwitchStateBinary::On);
}
#[test]
fn test_pcdu_switching_mgt_on() {
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::On);
}
#[test]
fn test_pcdu_switching_mgt_off() {
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::On);
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::Off);
}
}

383
satrs-minisim/src/lib.rs Normal file
View File

@ -0,0 +1,383 @@
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimTarget {
SimCtrl,
Mgm,
Mgt,
Pcdu,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SimMessage {
pub target: SimTarget,
pub payload: String,
}
/// A generic simulation request type. Right now, the payload data is expected to be
/// JSON, which might be changed in the future.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SimRequest {
inner: SimMessage,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimMessageType {
Request,
Reply,
}
/// Generic trait implemented by simulation request or reply payloads. It ties the request or
/// reply to a specific target and provides an API which does boilerplate tasks like checking the
/// validity of the target.
pub trait SerializableSimMsgPayload<P: SimMessageProvider>:
Serialize + DeserializeOwned + Sized
{
const TARGET: SimTarget;
fn from_sim_message(sim_message: &P) -> Result<Self, SimMessageError<P>> {
if sim_message.target() == Self::TARGET {
return Ok(serde_json::from_str(sim_message.payload())?);
}
Err(SimMessageError::TargetRequestMissmatch(sim_message.clone()))
}
}
pub trait SimMessageProvider: Serialize + DeserializeOwned + Clone + Sized {
fn msg_type(&self) -> SimMessageType;
fn target(&self) -> SimTarget;
fn payload(&self) -> &String;
fn from_raw_data(data: &[u8]) -> serde_json::Result<Self> {
serde_json::from_slice(data)
}
}
impl SimRequest {
pub fn new<T: SerializableSimMsgPayload<SimRequest>>(serializable_request: T) -> Self {
Self {
inner: SimMessage {
target: T::TARGET,
payload: serde_json::to_string(&serializable_request).unwrap(),
},
}
}
}
impl SimMessageProvider for SimRequest {
fn target(&self) -> SimTarget {
self.inner.target
}
fn payload(&self) -> &String {
&self.inner.payload
}
fn msg_type(&self) -> SimMessageType {
SimMessageType::Request
}
}
/// A generic simulation reply type. Right now, the payload data is expected to be
/// JSON, which might be changed inthe future.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SimReply {
inner: SimMessage,
}
impl SimReply {
pub fn new<T: SerializableSimMsgPayload<SimReply>>(serializable_reply: T) -> Self {
Self {
inner: SimMessage {
target: T::TARGET,
payload: serde_json::to_string(&serializable_reply).unwrap(),
},
}
}
}
impl SimMessageProvider for SimReply {
fn target(&self) -> SimTarget {
self.inner.target
}
fn payload(&self) -> &String {
&self.inner.payload
}
fn msg_type(&self) -> SimMessageType {
SimMessageType::Reply
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimCtrlRequest {
Ping,
}
impl SerializableSimMsgPayload<SimRequest> for SimCtrlRequest {
const TARGET: SimTarget = SimTarget::SimCtrl;
}
pub type SimReplyError = SimMessageError<SimReply>;
pub type SimRequestError = SimMessageError<SimRequest>;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimMessageError<P> {
SerdeJson(String),
TargetRequestMissmatch(P),
}
impl<P> From<serde_json::Error> for SimMessageError<P> {
fn from(error: serde_json::Error) -> SimMessageError<P> {
SimMessageError::SerdeJson(error.to_string())
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimCtrlReply {
Pong,
InvalidRequest(SimRequestError),
}
impl SerializableSimMsgPayload<SimReply> for SimCtrlReply {
const TARGET: SimTarget = SimTarget::SimCtrl;
}
impl From<SimRequestError> for SimCtrlReply {
fn from(error: SimRequestError) -> Self {
SimCtrlReply::InvalidRequest(error)
}
}
pub mod eps {
use super::*;
use std::collections::HashMap;
use satrs::power::SwitchStateBinary;
pub type SwitchMap = HashMap<PcduSwitch, SwitchStateBinary>;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)]
pub enum PcduSwitch {
Mgm = 0,
Mgt = 1,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum PcduRequest {
SwitchDevice {
switch: PcduSwitch,
state: SwitchStateBinary,
},
RequestSwitchInfo,
}
impl SerializableSimMsgPayload<SimRequest> for PcduRequest {
const TARGET: SimTarget = SimTarget::Pcdu;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PcduReply {
SwitchInfo(SwitchMap),
}
impl SerializableSimMsgPayload<SimReply> for PcduReply {
const TARGET: SimTarget = SimTarget::Pcdu;
}
}
pub mod acs {
use std::time::Duration;
use satrs::power::SwitchStateBinary;
use super::*;
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum MgmRequest {
RequestSensorData,
}
impl SerializableSimMsgPayload<SimRequest> for MgmRequest {
const TARGET: SimTarget = SimTarget::Mgm;
}
// Normally, small magnetometers generate their output as a signed 16 bit raw format or something
// similar which needs to be converted to a signed float value with physical units. We will
// simplify this now and generate the signed float values directly.
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct MgmSensorValues {
pub x: f32,
pub y: f32,
pub z: f32,
}
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct MgmReply {
pub switch_state: SwitchStateBinary,
pub sensor_values: MgmSensorValues,
}
impl SerializableSimMsgPayload<SimReply> for MgmReply {
const TARGET: SimTarget = SimTarget::Mgm;
}
pub const MGT_GEN_MAGNETIC_FIELD: MgmSensorValues = MgmSensorValues {
x: 0.03,
y: -0.03,
z: 0.03,
};
// Simple model using i16 values.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct MgtDipole {
pub x: i16,
pub y: i16,
pub z: i16,
}
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub enum MgtRequestType {
ApplyTorque,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum MgtRequest {
ApplyTorque {
duration: Duration,
dipole: MgtDipole,
},
RequestHk,
}
impl SerializableSimMsgPayload<SimRequest> for MgtRequest {
const TARGET: SimTarget = SimTarget::Mgt;
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct MgtHkSet {
pub dipole: MgtDipole,
pub torquing: bool,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum MgtReply {
Ack(MgtRequestType),
Nak(MgtRequestType),
Hk(MgtHkSet),
}
impl SerializableSimMsgPayload<SimReply> for MgtReply {
const TARGET: SimTarget = SimTarget::Mgm;
}
}
pub mod udp {
use std::{
net::{SocketAddr, UdpSocket},
time::Duration,
};
use thiserror::Error;
use crate::{SimReply, SimRequest};
#[derive(Error, Debug)]
pub enum ReceptionError {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Serde JSON error: {0}")]
SerdeJson(#[from] serde_json::Error),
}
pub struct SimUdpClient {
socket: UdpSocket,
pub reply_buf: [u8; 4096],
}
impl SimUdpClient {
pub fn new(
server_addr: &SocketAddr,
non_blocking: bool,
read_timeot_ms: Option<u64>,
) -> std::io::Result<Self> {
let socket = UdpSocket::bind("127.0.0.1:0")?;
socket.set_nonblocking(non_blocking)?;
socket
.connect(server_addr)
.expect("could not connect to server addr");
if let Some(read_timeout) = read_timeot_ms {
// Set a read timeout so the test does not hang on failures.
socket.set_read_timeout(Some(Duration::from_millis(read_timeout)))?;
}
Ok(Self {
socket,
reply_buf: [0; 4096],
})
}
pub fn set_nonblocking(&self, non_blocking: bool) -> std::io::Result<()> {
self.socket.set_nonblocking(non_blocking)
}
pub fn set_read_timeout(&self, read_timeout_ms: u64) -> std::io::Result<()> {
self.socket
.set_read_timeout(Some(Duration::from_millis(read_timeout_ms)))
}
pub fn send_request(&self, sim_request: &SimRequest) -> std::io::Result<usize> {
self.socket.send(
&serde_json::to_vec(sim_request).expect("conversion of request to vector failed"),
)
}
pub fn recv_raw(&mut self) -> std::io::Result<usize> {
self.socket.recv(&mut self.reply_buf)
}
pub fn recv_sim_reply(&mut self) -> Result<SimReply, ReceptionError> {
let read_len = self.recv_raw()?;
Ok(serde_json::from_slice(&self.reply_buf[0..read_len])?)
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum DummyRequest {
Ping,
}
impl SerializableSimMsgPayload<SimRequest> for DummyRequest {
const TARGET: SimTarget = SimTarget::SimCtrl;
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum DummyReply {
Pong,
}
impl SerializableSimMsgPayload<SimReply> for DummyReply {
const TARGET: SimTarget = SimTarget::SimCtrl;
}
#[test]
fn test_basic_request() {
let sim_request = SimRequest::new(DummyRequest::Ping);
assert_eq!(sim_request.target(), SimTarget::SimCtrl);
assert_eq!(sim_request.msg_type(), SimMessageType::Request);
let dummy_request =
DummyRequest::from_sim_message(&sim_request).expect("deserialization failed");
assert_eq!(dummy_request, DummyRequest::Ping);
}
#[test]
fn test_basic_reply() {
let sim_reply = SimReply::new(DummyReply::Pong);
assert_eq!(sim_reply.target(), SimTarget::SimCtrl);
assert_eq!(sim_reply.msg_type(), SimMessageType::Reply);
let dummy_request =
DummyReply::from_sim_message(&sim_reply).expect("deserialization failed");
assert_eq!(dummy_request, DummyReply::Pong);
}
}

103
satrs-minisim/src/main.rs Normal file
View File

@ -0,0 +1,103 @@
use acs::{MagnetometerModel, MagnetorquerModel};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{MonotonicTime, SystemClock};
use controller::SimController;
use eps::PcduModel;
use satrs_minisim::{SimReply, SimRequest};
use std::sync::mpsc;
use std::thread;
use std::time::{Duration, SystemTime};
use udp::SimUdpServer;
mod acs;
mod controller;
mod eps;
#[cfg(test)]
mod test_helpers;
mod time;
mod udp;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ThreadingModel {
Default = 0,
Single = 1,
}
fn create_sim_controller(
threading_model: ThreadingModel,
start_time: MonotonicTime,
reply_sender: mpsc::Sender<SimReply>,
request_receiver: mpsc::Receiver<SimRequest>,
) -> SimController {
// Instantiate models and their mailboxes.
let mgm_model = MagnetometerModel::new(Duration::from_millis(50), reply_sender.clone());
let mgm_mailbox = Mailbox::new();
let mgm_addr = mgm_mailbox.address();
let pcdu_mailbox = Mailbox::new();
let pcdu_addr = pcdu_mailbox.address();
let mgt_mailbox = Mailbox::new();
let mgt_addr = mgt_mailbox.address();
let mut pcdu_model = PcduModel::new(reply_sender.clone());
pcdu_model
.mgm_switch
.connect(MagnetometerModel::switch_device, &mgm_addr);
let mut mgt_model = MagnetorquerModel::new(reply_sender.clone());
// Input connections.
pcdu_model
.mgt_switch
.connect(MagnetorquerModel::switch_device, &mgt_addr);
// Output connections.
mgt_model
.gen_magnetic_field
.connect(MagnetometerModel::apply_external_magnetic_field, &mgm_addr);
// Instantiate the simulator
let sys_clock = SystemClock::from_system_time(start_time, SystemTime::now());
let sim_init = if threading_model == ThreadingModel::Single {
SimInit::with_num_threads(1)
} else {
SimInit::new()
};
let simulation = sim_init
.add_model(mgm_model, mgm_mailbox)
.add_model(pcdu_model, pcdu_mailbox)
.add_model(mgt_model, mgt_mailbox)
.init(start_time);
SimController::new(
sys_clock,
request_receiver,
reply_sender,
simulation,
mgm_addr,
pcdu_addr,
mgt_addr,
)
}
fn main() {
let (request_sender, request_receiver) = mpsc::channel();
let (reply_sender, reply_receiver) = mpsc::channel();
let t0 = MonotonicTime::EPOCH;
let mut sim_ctrl =
create_sim_controller(ThreadingModel::Default, t0, reply_sender, request_receiver);
// This thread schedules the simulator.
let sim_thread = thread::spawn(move || {
sim_ctrl.run(t0, 1);
});
let mut udp_server = SimUdpServer::new(0, request_sender, reply_receiver, 200, None)
.expect("could not create UDP request server");
// This thread manages the simulator UDP server.
let udp_tc_thread = thread::spawn(move || {
udp_server.run();
});
sim_thread.join().expect("joining simulation thread failed");
udp_tc_thread
.join()
.expect("joining UDP server thread failed");
}

View File

@ -0,0 +1,56 @@
use delegate::delegate;
use std::{sync::mpsc, time::Duration};
use asynchronix::time::MonotonicTime;
use satrs_minisim::{SimReply, SimRequest};
use crate::{controller::SimController, create_sim_controller, ThreadingModel};
pub struct SimTestbench {
pub sim_controller: SimController,
pub reply_receiver: mpsc::Receiver<SimReply>,
pub request_sender: mpsc::Sender<SimRequest>,
}
impl SimTestbench {
pub fn new() -> Self {
let (request_sender, request_receiver) = mpsc::channel();
let (reply_sender, reply_receiver) = mpsc::channel();
let t0 = MonotonicTime::EPOCH;
let sim_ctrl =
create_sim_controller(ThreadingModel::Single, t0, reply_sender, request_receiver);
Self {
sim_controller: sim_ctrl,
reply_receiver,
request_sender,
}
}
delegate! {
to self.sim_controller {
pub fn handle_sim_requests(&mut self);
}
to self.sim_controller.simulation {
pub fn step(&mut self);
pub fn step_by(&mut self, duration: Duration);
}
}
pub fn send_request(&self, request: SimRequest) -> Result<(), mpsc::SendError<SimRequest>> {
self.request_sender.send(request)
}
pub fn try_receive_next_reply(&self) -> Option<SimReply> {
match self.reply_receiver.try_recv() {
Ok(reply) => Some(reply),
Err(e) => {
if e == mpsc::TryRecvError::Empty {
None
} else {
panic!("reply_receiver disconnected");
}
}
}
}
}

View File

@ -0,0 +1,5 @@
use asynchronix::time::MonotonicTime;
pub fn current_millis(time: MonotonicTime) -> u64 {
(time.as_secs() as u64 * 1000) + (time.subsec_nanos() as u64 / 1_000_000)
}

390
satrs-minisim/src/udp.rs Normal file
View File

@ -0,0 +1,390 @@
use std::{
collections::VecDeque,
io::ErrorKind,
net::{SocketAddr, UdpSocket},
sync::{atomic::AtomicBool, mpsc, Arc},
time::Duration,
};
use satrs_minisim::{SimMessageProvider, SimReply, SimRequest};
// A UDP server which handles all TC received by a client application.
pub struct SimUdpServer {
socket: UdpSocket,
request_sender: mpsc::Sender<SimRequest>,
// shared_last_sender: SharedSocketAddr,
reply_receiver: mpsc::Receiver<SimReply>,
reply_queue: VecDeque<SimReply>,
max_num_replies: usize,
// Stop signal to stop the server. Required for unittests and useful to allow clean shutdown
// of the application.
stop_signal: Option<Arc<AtomicBool>>,
idle_sleep_period_ms: u64,
req_buf: [u8; 4096],
sender_addr: Option<SocketAddr>,
}
impl SimUdpServer {
pub fn new(
local_port: u16,
request_sender: mpsc::Sender<SimRequest>,
reply_receiver: mpsc::Receiver<SimReply>,
max_num_replies: usize,
stop_signal: Option<Arc<AtomicBool>>,
) -> std::io::Result<Self> {
let socket = UdpSocket::bind(SocketAddr::from(([0, 0, 0, 0], local_port)))?;
socket.set_nonblocking(true)?;
Ok(Self {
socket,
request_sender,
reply_receiver,
reply_queue: VecDeque::new(),
max_num_replies,
stop_signal,
idle_sleep_period_ms: 3,
req_buf: [0; 4096],
sender_addr: None,
})
}
#[allow(dead_code)]
pub fn server_addr(&self) -> std::io::Result<SocketAddr> {
self.socket.local_addr()
}
pub fn run(&mut self) {
loop {
if let Some(stop_signal) = &self.stop_signal {
if stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
let processed_requests = self.process_requests();
let processed_replies = self.process_replies();
let sent_replies = self.send_replies();
// Sleep for a bit if there is nothing to do to prevent burning CPU cycles. Delay
// should be kept short to ensure responsiveness of the system.
if !processed_requests && !processed_replies && !sent_replies {
std::thread::sleep(Duration::from_millis(self.idle_sleep_period_ms));
}
}
}
fn process_requests(&mut self) -> bool {
let mut processed_requests = false;
loop {
// Blocks for a certain amount of time until data is received to allow doing periodic
// work like checking the stop signal.
let (bytes_read, src) = match self.socket.recv_from(&mut self.req_buf) {
Ok((bytes_read, src)) => (bytes_read, src),
Err(e) if e.kind() == ErrorKind::WouldBlock => {
// Continue to perform regular checks like the stop signal.
break;
}
Err(e) => {
// Handle unexpected errors (e.g., socket closed) here.
log::error!("unexpected request server error: {e}");
break;
}
};
self.sender_addr = Some(src);
let sim_req = SimRequest::from_raw_data(&self.req_buf[..bytes_read]);
if sim_req.is_err() {
log::warn!(
"received UDP request with invalid format: {}",
sim_req.unwrap_err()
);
return processed_requests;
}
self.request_sender.send(sim_req.unwrap()).unwrap();
processed_requests = true;
}
processed_requests
}
fn process_replies(&mut self) -> bool {
let mut processed_replies = false;
loop {
match self.reply_receiver.try_recv() {
Ok(reply) => {
if self.reply_queue.len() >= self.max_num_replies {
self.reply_queue.pop_front();
}
self.reply_queue.push_back(reply);
processed_replies = true;
}
Err(e) => match e {
mpsc::TryRecvError::Empty => return processed_replies,
mpsc::TryRecvError::Disconnected => {
log::error!("all UDP reply senders disconnected")
}
},
}
}
}
fn send_replies(&mut self) -> bool {
if self.sender_addr.is_none() {
return false;
}
let mut sent_replies = false;
while !self.reply_queue.is_empty() {
let next_reply_to_send = self.reply_queue.pop_front().unwrap();
self.socket
.send_to(
serde_json::to_string(&next_reply_to_send)
.unwrap()
.as_bytes(),
self.sender_addr.unwrap(),
)
.expect("sending reply failed");
sent_replies = true;
}
sent_replies
}
}
#[cfg(test)]
mod tests {
use std::{
io::ErrorKind,
sync::{
atomic::{AtomicBool, Ordering},
mpsc, Arc,
},
time::Duration,
};
use satrs_minisim::{
eps::{PcduReply, PcduRequest},
udp::{ReceptionError, SimUdpClient},
SimCtrlReply, SimCtrlRequest, SimReply, SimRequest,
};
use crate::eps::tests::get_all_off_switch_map;
use delegate::delegate;
use super::SimUdpServer;
// Wait time to ensure even possibly laggy systems like CI servers can run the tests.
const SERVER_WAIT_TIME_MS: u64 = 50;
struct UdpTestbench {
client: SimUdpClient,
stop_signal: Arc<AtomicBool>,
request_receiver: mpsc::Receiver<SimRequest>,
reply_sender: mpsc::Sender<SimReply>,
}
impl UdpTestbench {
pub fn new(
client_non_blocking: bool,
client_read_timeout_ms: Option<u64>,
max_num_replies: usize,
) -> std::io::Result<(Self, SimUdpServer)> {
let (request_sender, request_receiver) = mpsc::channel();
let (reply_sender, reply_receiver) = mpsc::channel();
let stop_signal = Arc::new(AtomicBool::new(false));
let server = SimUdpServer::new(
0,
request_sender,
reply_receiver,
max_num_replies,
Some(stop_signal.clone()),
)?;
let server_addr = server.server_addr()?;
Ok((
Self {
client: SimUdpClient::new(
&server_addr,
client_non_blocking,
client_read_timeout_ms,
)?,
stop_signal,
request_receiver,
reply_sender,
},
server,
))
}
pub fn try_recv_request(&self) -> Result<SimRequest, mpsc::TryRecvError> {
self.request_receiver.try_recv()
}
pub fn stop(&self) {
self.stop_signal.store(true, Ordering::Relaxed);
}
pub fn send_reply(&self, sim_reply: &SimReply) {
self.reply_sender
.send(sim_reply.clone())
.expect("sending sim reply failed");
}
delegate! {
to self.client {
pub fn send_request(&self, sim_request: &SimRequest) -> std::io::Result<usize>;
pub fn recv_sim_reply(&mut self) -> Result<SimReply, ReceptionError>;
}
}
pub fn check_no_sim_reply_available(&mut self) {
if let Err(ReceptionError::Io(ref io_error)) = self.recv_sim_reply() {
if io_error.kind() == ErrorKind::WouldBlock {
// Continue to perform regular checks like the stop signal.
return;
} else {
// Handle unexpected errors (e.g., socket closed) here.
panic!("unexpected request server error: {io_error}");
}
}
panic!("unexpected reply available");
}
pub fn check_next_sim_reply(&mut self, expected_reply: &SimReply) {
match self.recv_sim_reply() {
Ok(received_sim_reply) => assert_eq!(expected_reply, &received_sim_reply),
Err(e) => match e {
ReceptionError::Io(ref io_error) => {
if io_error.kind() == ErrorKind::WouldBlock {
// Continue to perform regular checks like the stop signal.
panic!("no simulation reply received");
} else {
// Handle unexpected errors (e.g., socket closed) here.
panic!("unexpected request server error: {e}");
}
}
ReceptionError::SerdeJson(json_error) => {
panic!("unexpected JSON error: {json_error}");
}
},
}
}
}
#[test]
fn test_basic_udp_request_reception() {
let (udp_testbench, mut udp_server) =
UdpTestbench::new(true, Some(SERVER_WAIT_TIME_MS), 10)
.expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
let sim_request = SimRequest::new(PcduRequest::RequestSwitchInfo);
udp_testbench
.send_request(&sim_request)
.expect("sending request failed");
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
// Check that the sim request has arrives and was forwarded.
let received_sim_request = udp_testbench
.try_recv_request()
.expect("did not receive request");
assert_eq!(sim_request, received_sim_request);
// Stop the server.
udp_testbench.stop();
server_thread.join().unwrap();
}
#[test]
fn test_udp_reply_server() {
let (mut udp_testbench, mut udp_server) =
UdpTestbench::new(false, Some(SERVER_WAIT_TIME_MS), 10)
.expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
.expect("sending request failed");
let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map()));
udp_testbench.send_reply(&sim_reply);
udp_testbench.check_next_sim_reply(&sim_reply);
// Stop the server.
udp_testbench.stop();
server_thread.join().unwrap();
}
#[test]
fn test_udp_req_server_and_reply_sender() {
let (mut udp_testbench, mut udp_server) =
UdpTestbench::new(false, Some(SERVER_WAIT_TIME_MS), 10)
.expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
// Send a ping so that the server knows the address of the client.
// Do not check that the request arrives on the receiver side, is done by other test.
udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
.expect("sending request failed");
// Send a reply to the server, ensure it gets forwarded to the client.
let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map()));
udp_testbench.send_reply(&sim_reply);
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
// Now we check that the reply server can send back replies to the client.
udp_testbench.check_next_sim_reply(&sim_reply);
udp_testbench.stop();
server_thread.join().unwrap();
}
#[test]
fn test_udp_replies_client_unconnected() {
let (mut udp_testbench, mut udp_server) =
UdpTestbench::new(true, None, 10).expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
// Send a reply to the server. The client is not connected, so it won't get forwarded.
let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map()));
udp_testbench.send_reply(&sim_reply);
std::thread::sleep(Duration::from_millis(10));
udp_testbench.check_no_sim_reply_available();
// Connect by sending a ping.
udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
.expect("sending request failed");
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
udp_testbench.check_next_sim_reply(&sim_reply);
// Now we check that the reply server can sent back replies to the client.
udp_testbench.stop();
server_thread.join().unwrap();
}
#[test]
fn test_udp_reply_server_old_replies_overwritten() {
let (mut udp_testbench, mut udp_server) =
UdpTestbench::new(true, None, 3).expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
// The server only caches up to 3 replies.
let sim_reply = SimReply::new(SimCtrlReply::Pong);
for _ in 0..4 {
udp_testbench.send_reply(&sim_reply);
}
std::thread::sleep(Duration::from_millis(20));
udp_testbench.check_no_sim_reply_available();
// Connect by sending a ping.
udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
.expect("sending request failed");
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
for _ in 0..3 {
udp_testbench.check_next_sim_reply(&sim_reply);
}
udp_testbench.check_no_sim_reply_available();
udp_testbench.stop();
server_thread.join().unwrap();
}
}

View File

@ -18,7 +18,9 @@ default-features = false
optional = true
[dependencies.spacepackets]
version = "0.10"
git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
version = "0.11.0-rc.0"
branch = "main"
default-features = false
[features]

View File

@ -8,7 +8,28 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.2.0]
## Changed
- Refactored `EventManager` to heavily use generics instead of trait objects.
- `SendEventProvider` -> `EventSendProvider`. `id` trait method renamed to `channel_id`.
- `ListenerTable` -> `ListenerMapProvider`
- `SenderTable` -> `SenderMapProvider`
- There is an `EventManagerWithMpsc` and a `EventManagerWithBoundedMpsc` helper type now.
- Refactored ECSS TM sender abstractions to be generic over different message queue backends.
- Refactored Verification Reporter abstractions and implementation to be generic over the sender
instead of using trait objects.
- `PusServiceProvider` renamed to `PusServiceDistributor` to make the purpose of the object
more clear
- `PusServiceProvider::handle_pus_tc_packet` renamed to `PusServiceDistributor::distribute_packet`.
- `PusServiceDistibutor` and `CcsdsDistributor` now use generics instead of trait objects.
This makes accessing the concrete trait implementations more easy as well.
## Fixed
- Update deprecated API for `PusScheduler::insert_wrapped_tc_cds_short`
and `PusScheduler::insert_wrapped_tc_cds_long`.
# [v0.2.0-rc.0] 2024-02-21
## Added

View File

@ -17,7 +17,10 @@ delegate = ">0.7, <=0.10"
paste = "1"
smallvec = "1"
crc = "3"
satrs-shared = "0.1.2"
[dependencies.satrs-shared]
version = "0.1.2"
path = "../satrs-shared"
[dependencies.num_enum]
version = ">0.5, <=0.7"
@ -68,7 +71,9 @@ features = ["all"]
optional = true
[dependencies.spacepackets]
version = "0.10"
git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
version = "0.11.0-rc.0"
branch = "main"
default-features = false
[dependencies.cobs]

View File

@ -40,3 +40,24 @@ impl TargetedActionRequest {
}
}
}
/// A reply to an action request.
#[non_exhaustive]
#[derive(Clone, Eq, PartialEq, Debug)]
pub enum ActionReply {
CompletionFailed(ActionId),
StepFailed {
id: ActionId,
step: u32,
},
Completed(ActionId),
#[cfg(feature = "alloc")]
CompletedStringId(alloc::string::String),
#[cfg(feature = "alloc")]
CompletionFailedStringId(alloc::string::String),
#[cfg(feature = "alloc")]
StepFailedStringId {
id: alloc::string::String,
step: u32,
},
}

View File

@ -7,7 +7,7 @@ use spacepackets::ByteConversionError;
use std::error::Error;
use std::path::Path;
#[cfg(feature = "std")]
pub use stdmod::*;
pub use std_mod::*;
pub const CRC_32: Crc<u32> = Crc::<u32>::new(&CRC_32_CKSUM);
@ -148,12 +148,11 @@ pub trait VirtualFilestore {
}
#[cfg(feature = "std")]
pub mod stdmod {
pub mod std_mod {
use super::*;
use std::{
fs::{self, File, OpenOptions},
io::{BufReader, Read, Seek, SeekFrom, Write},
path::Path,
};
#[derive(Default)]

View File

@ -10,27 +10,27 @@
//! [sat-rs book chapter](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/events.html)
//! about events first:
//!
//! The event manager has a listener table abstracted by the [ListenerTable], which maps
//! The event manager has a listener table abstracted by the [ListenerMapProvider], which maps
//! listener groups identified by [ListenerKey]s to a [sender ID][ChannelId].
//! It also contains a sender table abstracted by the [SenderTable] which maps these sender IDs
//! to a concrete [SendEventProvider]s. A simple approach would be to use one send event provider
//! It also contains a sender table abstracted by the [SenderMapProvider] which maps these sender
//! IDs to concrete [EventSendProvider]s. A simple approach would be to use one send event provider
//! for each OBSW thread and then subscribe for all interesting events for a particular thread
//! using the send event provider ID.
//!
//! This can be done with the [EventManager] like this:
//!
//! 1. Provide a concrete [EventReceiver] implementation. This abstraction allow to use different
//! 1. Provide a concrete [EventReceiveProvider] implementation. This abstraction allow to use different
//! message queue backends. A straightforward implementation where dynamic memory allocation is
//! not a big concern could use [std::sync::mpsc::channel] to do this and is provided in
//! form of the [MpscEventReceiver].
//! 2. To set up event creators, create channel pairs using some message queue implementation.
//! Each event creator gets a (cloned) sender component which allows it to send events to the
//! manager.
//! 3. The event manager receives the receiver component as part of a [EventReceiver]
//! 3. The event manager receives the receiver component as part of a [EventReceiveProvider]
//! implementation so all events are routed to the manager.
//! 4. Create the [send event providers][SendEventProvider]s which allow routing events to
//! subscribers. You can now use their [sender IDs][SendEventProvider::id] to subscribe for
//! event groups, for example by using the [EventManager::subscribe_single] method.
//! 4. Create the [send event providers][EventSendProvider]s which allow routing events to
//! subscribers. You can now use their [sender IDs][EventSendProvider::channel_id] to subscribe
//! for event groups, for example by using the [EventManager::subscribe_single] method.
//! 5. Add the send provider as well using the [EventManager::add_sender] call so the event
//! manager can route listener groups to a the send provider.
//!
@ -41,24 +41,22 @@
//!
//! # Examples
//!
//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/pus_events.rs)
//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/pus_events.rs)
//! for a concrete example using multi-threading where events are routed to
//! different threads.
use crate::events::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw};
use crate::params::{Params, ParamsHeapless};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[cfg(feature = "alloc")]
use alloc::vec;
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use crate::queue::GenericSendError;
use core::marker::PhantomData;
use core::slice::Iter;
#[cfg(feature = "alloc")]
use hashbrown::HashMap;
use crate::ChannelId;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "std")]
pub use stdmod::*;
pub use std_mod::*;
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub enum ListenerKey {
@ -75,108 +73,110 @@ pub type EventWithAuxData<Event> = (Event, Option<Params>);
pub type EventU32WithAuxData = EventWithAuxData<EventU32>;
pub type EventU16WithAuxData = EventWithAuxData<EventU16>;
pub trait SendEventProvider<Provider: GenericEvent, AuxDataProvider = Params> {
type Error;
pub trait EventSendProvider<EV: GenericEvent, AuxDataProvider = Params> {
fn channel_id(&self) -> ChannelId;
fn id(&self) -> ChannelId;
fn send_no_data(&self, event: Provider) -> Result<(), Self::Error> {
fn send_no_data(&self, event: EV) -> Result<(), GenericSendError> {
self.send(event, None)
}
fn send(&self, event: Provider, aux_data: Option<AuxDataProvider>) -> Result<(), Self::Error>;
fn send(&self, event: EV, aux_data: Option<AuxDataProvider>) -> Result<(), GenericSendError>;
}
/// Generic abstraction for an event receiver.
pub trait EventReceiver<Event: GenericEvent, AuxDataProvider = Params> {
/// This function has to be provided by any event receiver. A receive call may or may not return
/// an event.
///
/// To allow returning arbitrary additional auxiliary data, a mutable slice is passed to the
/// [Self::receive] call as well. Receivers can write data to this slice, but care must be taken
/// to avoid panics due to size missmatches or out of bound writes.
fn receive(&self) -> Option<(Event, Option<AuxDataProvider>)>;
pub trait EventReceiveProvider<Event: GenericEvent, AuxDataProvider = Params> {
/// This function has to be provided by any event receiver. A call may or may not return
/// an event and optional auxiliary data.
fn try_recv_event(&self) -> Option<(Event, Option<AuxDataProvider>)>;
}
pub trait ListenerTable {
fn get_listeners(&self) -> Vec<ListenerKey>;
pub trait ListenerMapProvider {
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
fn get_listeners(&self) -> alloc::vec::Vec<ListenerKey>;
fn contains_listener(&self, key: &ListenerKey) -> bool;
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ChannelId>>;
fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool;
fn remove_duplicates(&mut self, key: &ListenerKey);
}
pub trait SenderTable<SendProviderError, Event: GenericEvent = EventU32, AuxDataProvider = Params> {
pub trait SenderMapProvider<
SP: EventSendProvider<EV, AUX>,
EV: GenericEvent = EventU32,
AUX = Params,
>
{
fn contains_send_event_provider(&self, id: &ChannelId) -> bool;
fn get_send_event_provider(
&self,
id: &ChannelId,
) -> Option<&dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>>;
fn add_send_event_provider(
&mut self,
send_provider: Box<
dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>,
>,
) -> bool;
fn get_send_event_provider(&self, id: &ChannelId) -> Option<&SP>;
fn add_send_event_provider(&mut self, send_provider: SP) -> bool;
}
/// Generic event manager implementation.
///
/// # Generics
///
/// * `SendProviderError`: [SendEventProvider] error type
/// * `Event`: Concrete event provider, currently either [EventU32] or [EventU16]
/// * `AuxDataProvider`: Concrete auxiliary data provider, currently either [Params] or
/// [ParamsHeapless]
pub struct EventManager<SendProviderError, Event: GenericEvent = EventU32, AuxDataProvider = Params>
{
listener_table: Box<dyn ListenerTable>,
sender_table: Box<dyn SenderTable<SendProviderError, Event, AuxDataProvider>>,
event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>,
/// * `ERP`: [EventReceiveProvider] used to receive all events.
/// * `SMP`: [SenderMapProvider] which maps channel IDs to send providers.
/// * `LTR`: [ListenerMapProvider] which maps listener keys to channel IDs.
/// * `SP`: [EventSendProvider] contained within the sender map which sends the events.
/// * `EV`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32]
/// and [EventU16] are supported.
/// * `AUX`: Auxiliary data which is sent with the event to provide optional context information
pub struct EventManager<
ERP: EventReceiveProvider<EV, AUX>,
SMP: SenderMapProvider<SP, EV, AUX>,
LTR: ListenerMapProvider,
SP: EventSendProvider<EV, AUX>,
EV: GenericEvent = EventU32,
AUX = Params,
> {
event_receiver: ERP,
sender_map: SMP,
listener_map: LTR,
phantom: core::marker::PhantomData<(SP, EV, AUX)>,
}
/// Safety: It is safe to implement [Send] because all fields in the [EventManager] are [Send]
/// as well
#[cfg(feature = "std")]
unsafe impl<E, Event: GenericEvent + Send, AuxDataProvider: Send> Send
for EventManager<E, Event, AuxDataProvider>
{
}
#[cfg(feature = "std")]
pub type EventManagerWithMpscQueue<Event, AuxDataProvider> = EventManager<
std::sync::mpsc::SendError<(Event, Option<AuxDataProvider>)>,
Event,
AuxDataProvider,
>;
#[derive(Debug)]
pub enum EventRoutingResult<Event: GenericEvent, AuxDataProvider> {
pub enum EventRoutingResult<EV: GenericEvent, AUX> {
/// No event was received
Empty,
/// An event was received and routed.
/// The first tuple entry will contain the number of recipients.
Handled(u32, Event, Option<AuxDataProvider>),
/// An event was received and routed to listeners.
Handled {
num_recipients: u32,
event: EV,
aux_data: Option<AUX>,
},
}
#[derive(Debug)]
pub enum EventRoutingError<E> {
SendError(E),
pub enum EventRoutingError {
Send(GenericSendError),
NoSendersForKey(ListenerKey),
NoSenderForId(ChannelId),
}
#[derive(Debug)]
pub struct EventRoutingErrorsWithResult<Event: GenericEvent, AuxDataProvider, E> {
pub result: EventRoutingResult<Event, AuxDataProvider>,
pub errors: [Option<EventRoutingError<E>>; 3],
pub struct EventRoutingErrorsWithResult<EV: GenericEvent, AUX> {
pub result: EventRoutingResult<EV, AUX>,
pub errors: [Option<EventRoutingError>; 3],
}
impl<E, Event: GenericEvent + Copy> EventManager<E, Event> {
impl<
ER: EventReceiveProvider<EV, AUX>,
S: SenderMapProvider<SP, EV, AUX>,
L: ListenerMapProvider,
SP: EventSendProvider<EV, AUX>,
EV: GenericEvent + Copy,
AUX: Clone,
> EventManager<ER, S, L, SP, EV, AUX>
{
pub fn remove_duplicates(&mut self, key: &ListenerKey) {
self.listener_table.remove_duplicates(key)
self.listener_map.remove_duplicates(key)
}
/// Subscribe for a unique event.
pub fn subscribe_single(&mut self, event: &Event, sender_id: ChannelId) {
pub fn subscribe_single(&mut self, event: &EV, sender_id: ChannelId) {
self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id);
}
@ -194,49 +194,37 @@ impl<E, Event: GenericEvent + Copy> EventManager<E, Event> {
}
}
impl<E: 'static, Event: GenericEvent + Copy + 'static, AuxDataProvider: Clone + 'static>
EventManager<E, Event, AuxDataProvider>
impl<
ERP: EventReceiveProvider<EV, AUX>,
SMP: SenderMapProvider<SP, EV, AUX>,
LTR: ListenerMapProvider,
SP: EventSendProvider<EV, AUX>,
EV: GenericEvent + Copy,
AUX: Clone,
> EventManager<ERP, SMP, LTR, SP, EV, AUX>
{
/// Create an event manager where the sender table will be the [DefaultSenderTableProvider]
/// and the listener table will be the [DefaultListenerTableProvider].
pub fn new(event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>) -> Self {
let listener_table: Box<DefaultListenerTableProvider> = Box::default();
let sender_table: Box<DefaultSenderTableProvider<E, Event, AuxDataProvider>> =
Box::default();
Self::new_custom_tables(listener_table, sender_table, event_receiver)
}
}
impl<E, Event: GenericEvent + Copy, AuxDataProvider: Clone>
EventManager<E, Event, AuxDataProvider>
{
pub fn new_custom_tables(
listener_table: Box<dyn ListenerTable>,
sender_table: Box<dyn SenderTable<E, Event, AuxDataProvider>>,
event_receiver: Box<dyn EventReceiver<Event, AuxDataProvider>>,
) -> Self {
pub fn new_with_custom_maps(event_receiver: ERP, sender_map: SMP, listener_map: LTR) -> Self {
EventManager {
listener_table,
sender_table,
listener_map,
sender_map,
event_receiver,
phantom: PhantomData,
}
}
pub fn add_sender(
&mut self,
send_provider: impl SendEventProvider<Event, AuxDataProvider, Error = E> + 'static,
) {
/// Add a new sender component which can be used to send events to subscribers.
pub fn add_sender(&mut self, send_provider: SP) {
if !self
.sender_table
.contains_send_event_provider(&send_provider.id())
.sender_map
.contains_send_event_provider(&send_provider.channel_id())
{
self.sender_table
.add_send_event_provider(Box::new(send_provider));
self.sender_map.add_send_event_provider(send_provider);
}
}
/// Generic function to update the event subscribers.
fn update_listeners(&mut self, key: ListenerKey, sender_id: ChannelId) {
self.listener_table.add_listener(key, sender_id);
self.listener_map.add_listener(key, sender_id);
}
/// This function will use the cached event receiver and try to receive one event.
@ -248,40 +236,36 @@ impl<E, Event: GenericEvent + Copy, AuxDataProvider: Clone>
/// [EventRoutingErrorsWithResult] error struct.
pub fn try_event_handling(
&self,
) -> Result<
EventRoutingResult<Event, AuxDataProvider>,
EventRoutingErrorsWithResult<Event, AuxDataProvider, E>,
> {
) -> Result<EventRoutingResult<EV, AUX>, EventRoutingErrorsWithResult<EV, AUX>> {
let mut err_idx = 0;
let mut err_slice = [None, None, None];
let mut num_recipients = 0;
let mut add_error = |error: EventRoutingError<E>| {
let mut add_error = |error: EventRoutingError| {
if err_idx < 3 {
err_slice[err_idx] = Some(error);
err_idx += 1;
}
};
let mut send_handler =
|key: &ListenerKey, event: Event, aux_data: &Option<AuxDataProvider>| {
if self.listener_table.contains_listener(key) {
if let Some(ids) = self.listener_table.get_listener_ids(key) {
for id in ids {
if let Some(sender) = self.sender_table.get_send_event_provider(id) {
if let Err(e) = sender.send(event, aux_data.clone()) {
add_error(EventRoutingError::SendError(e));
} else {
num_recipients += 1;
}
let mut send_handler = |key: &ListenerKey, event: EV, aux_data: &Option<AUX>| {
if self.listener_map.contains_listener(key) {
if let Some(ids) = self.listener_map.get_listener_ids(key) {
for id in ids {
if let Some(sender) = self.sender_map.get_send_event_provider(id) {
if let Err(e) = sender.send(event, aux_data.clone()) {
add_error(EventRoutingError::Send(e));
} else {
add_error(EventRoutingError::NoSenderForId(*id));
num_recipients += 1;
}
} else {
add_error(EventRoutingError::NoSenderForId(*id));
}
} else {
add_error(EventRoutingError::NoSendersForKey(*key));
}
} else {
add_error(EventRoutingError::NoSendersForKey(*key));
}
};
if let Some((event, aux_data)) = self.event_receiver.receive() {
}
};
if let Some((event, aux_data)) = self.event_receiver.try_recv_event() {
let single_key = ListenerKey::Single(event.raw_as_largest_type());
send_handler(&single_key, event, &aux_data);
let group_key = ListenerKey::Group(event.group_id_as_largest_type());
@ -289,130 +273,177 @@ impl<E, Event: GenericEvent + Copy, AuxDataProvider: Clone>
send_handler(&ListenerKey::All, event, &aux_data);
if err_idx > 0 {
return Err(EventRoutingErrorsWithResult {
result: EventRoutingResult::Handled(num_recipients, event, aux_data),
result: EventRoutingResult::Handled {
num_recipients,
event,
aux_data,
},
errors: err_slice,
});
}
return Ok(EventRoutingResult::Handled(num_recipients, event, aux_data));
return Ok(EventRoutingResult::Handled {
num_recipients,
event,
aux_data,
});
}
Ok(EventRoutingResult::Empty)
}
}
#[derive(Default)]
pub struct DefaultListenerTableProvider {
listeners: HashMap<ListenerKey, Vec<ChannelId>>,
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use alloc::vec::Vec;
use hashbrown::HashMap;
pub struct DefaultSenderTableProvider<
SendProviderError,
Event: GenericEvent = EventU32,
AuxDataProvider = Params,
> {
senders: HashMap<
ChannelId,
Box<dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>>,
>,
}
use super::*;
impl<SendProviderError, Event: GenericEvent, AuxDataProvider> Default
for DefaultSenderTableProvider<SendProviderError, Event, AuxDataProvider>
{
fn default() -> Self {
Self {
senders: HashMap::new(),
/// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap]
/// and the [DefaultListenerMap]. It uses regular mpsc channels as the message queue backend.
pub type EventManagerWithMpsc<EV = EventU32, AUX = Params> = EventManager<
MpscEventReceiver,
DefaultSenderMap<EventSenderMpsc<EV>, EV, AUX>,
DefaultListenerMap,
EventSenderMpsc<EV>,
>;
/// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap]
/// and the [DefaultListenerMap]. It uses
/// [bounded mpsc senders](https://doc.rust-lang.org/std/sync/mpsc/struct.SyncSender.html) as the
/// message queue backend.
pub type EventManagerWithBoundedMpsc<EV = EventU32, AUX = Params> = EventManager<
MpscEventReceiver,
DefaultSenderMap<EventSenderMpscBounded<EV>, EV, AUX>,
DefaultListenerMap,
EventSenderMpscBounded<EV>,
>;
impl<
ER: EventReceiveProvider<EV, AUX>,
SP: EventSendProvider<EV, AUX>,
EV: GenericEvent + Copy,
AUX: 'static,
> EventManager<ER, DefaultSenderMap<SP, EV, AUX>, DefaultListenerMap, SP, EV, AUX>
{
/// Create an event manager where the sender table will be the [DefaultSenderMap]
/// and the listener table will be the [DefaultListenerMap].
pub fn new(event_receiver: ER) -> Self {
Self {
listener_map: DefaultListenerMap::default(),
sender_map: DefaultSenderMap::default(),
event_receiver,
phantom: PhantomData,
}
}
}
}
impl ListenerTable for DefaultListenerTableProvider {
fn get_listeners(&self) -> Vec<ListenerKey> {
let mut key_list = Vec::new();
for key in self.listeners.keys() {
key_list.push(*key);
/// Default listener map.
///
/// Simple implementation which uses a [HashMap] and a [Vec] internally.
#[derive(Default)]
pub struct DefaultListenerMap {
listeners: HashMap<ListenerKey, Vec<ChannelId>>,
}
impl ListenerMapProvider for DefaultListenerMap {
fn get_listeners(&self) -> Vec<ListenerKey> {
let mut key_list = Vec::new();
for key in self.listeners.keys() {
key_list.push(*key);
}
key_list
}
key_list
}
fn contains_listener(&self, key: &ListenerKey) -> bool {
self.listeners.contains_key(key)
}
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ChannelId>> {
self.listeners.get(key).map(|vec| vec.iter())
}
fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool {
if let Some(existing_list) = self.listeners.get_mut(&key) {
existing_list.push(sender_id);
} else {
let new_list = vec![sender_id];
self.listeners.insert(key, new_list);
fn contains_listener(&self, key: &ListenerKey) -> bool {
self.listeners.contains_key(key)
}
true
}
fn remove_duplicates(&mut self, key: &ListenerKey) {
if let Some(list) = self.listeners.get_mut(key) {
list.sort_unstable();
list.dedup();
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<ChannelId>> {
self.listeners.get(key).map(|vec| vec.iter())
}
fn add_listener(&mut self, key: ListenerKey, sender_id: ChannelId) -> bool {
if let Some(existing_list) = self.listeners.get_mut(&key) {
existing_list.push(sender_id);
} else {
let new_list = alloc::vec![sender_id];
self.listeners.insert(key, new_list);
}
true
}
fn remove_duplicates(&mut self, key: &ListenerKey) {
if let Some(list) = self.listeners.get_mut(key) {
list.sort_unstable();
list.dedup();
}
}
}
}
impl<SendProviderError, Event: GenericEvent, AuxDataProvider>
SenderTable<SendProviderError, Event, AuxDataProvider>
for DefaultSenderTableProvider<SendProviderError, Event, AuxDataProvider>
{
fn contains_send_event_provider(&self, id: &ChannelId) -> bool {
self.senders.contains_key(id)
/// Default sender map.
///
/// Simple implementation which uses a [HashMap] internally.
pub struct DefaultSenderMap<
SP: EventSendProvider<EV, AUX>,
EV: GenericEvent = EventU32,
AUX = Params,
> {
senders: HashMap<ChannelId, SP>,
phantom: PhantomData<(EV, AUX)>,
}
fn get_send_event_provider(
&self,
id: &ChannelId,
) -> Option<&dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>> {
self.senders
.get(id)
.filter(|sender| sender.id() == *id)
.map(|v| v.as_ref())
}
fn add_send_event_provider(
&mut self,
send_provider: Box<
dyn SendEventProvider<Event, AuxDataProvider, Error = SendProviderError>,
>,
) -> bool {
let id = send_provider.id();
if self.senders.contains_key(&id) {
return false;
impl<SP: EventSendProvider<EV, AUX>, EV: GenericEvent, AUX> Default
for DefaultSenderMap<SP, EV, AUX>
{
fn default() -> Self {
Self {
senders: Default::default(),
phantom: Default::default(),
}
}
}
impl<SP: EventSendProvider<EV, AUX>, EV: GenericEvent, AUX> SenderMapProvider<SP, EV, AUX>
for DefaultSenderMap<SP, EV, AUX>
{
fn contains_send_event_provider(&self, id: &ChannelId) -> bool {
self.senders.contains_key(id)
}
fn get_send_event_provider(&self, id: &ChannelId) -> Option<&SP> {
self.senders
.get(id)
.filter(|sender| sender.channel_id() == *id)
}
fn add_send_event_provider(&mut self, send_provider: SP) -> bool {
let id = send_provider.channel_id();
if self.senders.contains_key(&id) {
return false;
}
self.senders.insert(id, send_provider).is_none()
}
self.senders.insert(id, send_provider).is_none()
}
}
#[cfg(feature = "std")]
pub mod stdmod {
pub mod std_mod {
use super::*;
use crate::event_man::{EventReceiver, EventWithAuxData};
use crate::events::{EventU16, EventU32, GenericEvent};
use crate::params::Params;
use std::sync::mpsc::{Receiver, SendError, Sender};
use std::sync::mpsc;
pub struct MpscEventReceiver<Event: GenericEvent + Send = EventU32> {
mpsc_receiver: Receiver<(Event, Option<Params>)>,
mpsc_receiver: mpsc::Receiver<(Event, Option<Params>)>,
}
impl<Event: GenericEvent + Send> MpscEventReceiver<Event> {
pub fn new(receiver: Receiver<(Event, Option<Params>)>) -> Self {
pub fn new(receiver: mpsc::Receiver<(Event, Option<Params>)>) -> Self {
Self {
mpsc_receiver: receiver,
}
}
}
impl<Event: GenericEvent + Send> EventReceiver<Event> for MpscEventReceiver<Event> {
fn receive(&self) -> Option<EventWithAuxData<Event>> {
impl<Event: GenericEvent + Send> EventReceiveProvider<Event> for MpscEventReceiver<Event> {
fn try_recv_event(&self) -> Option<EventWithAuxData<Event>> {
if let Ok(event_and_data) = self.mpsc_receiver.try_recv() {
return Some(event_and_data);
}
@ -423,31 +454,75 @@ pub mod stdmod {
pub type MpscEventU32Receiver = MpscEventReceiver<EventU32>;
pub type MpscEventU16Receiver = MpscEventReceiver<EventU16>;
/// Generic event sender which uses a regular [mpsc::Sender] as the messaging backend to
/// send events.
#[derive(Clone)]
pub struct MpscEventSendProvider<Event: GenericEvent + Send> {
pub struct EventSenderMpsc<Event: GenericEvent + Send> {
id: u32,
sender: Sender<(Event, Option<Params>)>,
sender: mpsc::Sender<(Event, Option<Params>)>,
}
impl<Event: GenericEvent + Send> MpscEventSendProvider<Event> {
pub fn new(id: u32, sender: Sender<(Event, Option<Params>)>) -> Self {
impl<Event: GenericEvent + Send> EventSenderMpsc<Event> {
pub fn new(id: u32, sender: mpsc::Sender<(Event, Option<Params>)>) -> Self {
Self { id, sender }
}
}
impl<Event: GenericEvent + Send> SendEventProvider<Event> for MpscEventSendProvider<Event> {
type Error = SendError<(Event, Option<Params>)>;
fn id(&self) -> u32 {
impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpsc<Event> {
fn channel_id(&self) -> u32 {
self.id
}
fn send(&self, event: Event, aux_data: Option<Params>) -> Result<(), Self::Error> {
self.sender.send((event, aux_data))
fn send(&self, event: Event, aux_data: Option<Params>) -> Result<(), GenericSendError> {
self.sender
.send((event, aux_data))
.map_err(|_| GenericSendError::RxDisconnected)
}
}
pub type MpscEventU32SendProvider = MpscEventSendProvider<EventU32>;
pub type MpscEventU16SendProvider = MpscEventSendProvider<EventU16>;
/// Generic event sender which uses the [mpsc::SyncSender] as the messaging backend to send
/// events. This has the advantage that the channel is bounded and thus more deterministic.
#[derive(Clone)]
pub struct EventSenderMpscBounded<Event: GenericEvent + Send> {
channel_id: u32,
sender: mpsc::SyncSender<(Event, Option<Params>)>,
capacity: usize,
}
impl<Event: GenericEvent + Send> EventSenderMpscBounded<Event> {
pub fn new(
channel_id: u32,
sender: mpsc::SyncSender<(Event, Option<Params>)>,
capacity: usize,
) -> Self {
Self {
channel_id,
sender,
capacity,
}
}
}
impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpscBounded<Event> {
fn channel_id(&self) -> u32 {
self.channel_id
}
fn send(&self, event: Event, aux_data: Option<Params>) -> Result<(), GenericSendError> {
if let Err(e) = self.sender.try_send((event, aux_data)) {
return match e {
mpsc::TrySendError::Full(_) => {
Err(GenericSendError::QueueFull(Some(self.capacity as u32)))
}
mpsc::TrySendError::Disconnected(_) => Err(GenericSendError::RxDisconnected),
};
}
Ok(())
}
}
pub type EventU32SenderMpsc = EventSenderMpsc<EventU32>;
pub type EventU16SenderMpsc = EventSenderMpsc<EventU16>;
pub type EventU32SenderMpscBounded = EventSenderMpscBounded<EventU32>;
pub type EventU16SenderMpscBounded = EventSenderMpscBounded<EventU16>;
}
#[cfg(test)]
@ -456,32 +531,10 @@ mod tests {
use crate::event_man::EventManager;
use crate::events::{EventU32, GenericEvent, Severity};
use crate::params::ParamsRaw;
use alloc::boxed::Box;
use std::format;
use std::sync::mpsc::{channel, Receiver, SendError, Sender};
use std::sync::mpsc::{self, channel, Receiver, Sender};
#[derive(Clone)]
struct MpscEventSenderQueue {
id: u32,
mpsc_sender: Sender<EventU32WithAuxData>,
}
impl MpscEventSenderQueue {
fn new(id: u32, mpsc_sender: Sender<EventU32WithAuxData>) -> Self {
Self { id, mpsc_sender }
}
}
impl SendEventProvider<EventU32> for MpscEventSenderQueue {
type Error = SendError<EventU32WithAuxData>;
fn id(&self) -> u32 {
self.id
}
fn send(&self, event: EventU32, aux_data: Option<Params>) -> Result<(), Self::Error> {
self.mpsc_sender.send((event, aux_data))
}
}
const TEST_EVENT: EventU32 = EventU32::const_new(Severity::INFO, 0, 5);
fn check_next_event(
expected: EventU32,
@ -500,22 +553,21 @@ mod tests {
expected_num_sent: u32,
) {
assert!(matches!(res, EventRoutingResult::Handled { .. }));
if let EventRoutingResult::Handled(num_recipients, event, _aux_data) = res {
if let EventRoutingResult::Handled {
num_recipients,
event,
..
} = res
{
assert_eq!(event, expected);
assert_eq!(num_recipients, expected_num_sent);
}
}
fn generic_event_man() -> (
Sender<EventU32WithAuxData>,
EventManager<SendError<EventU32WithAuxData>>,
) {
fn generic_event_man() -> (Sender<EventU32WithAuxData>, EventManagerWithMpsc) {
let (event_sender, manager_queue) = channel();
let event_man_receiver = MpscEventReceiver::new(manager_queue);
(
event_sender,
EventManager::new(Box::new(event_man_receiver)),
)
(event_sender, EventManager::new(event_man_receiver))
}
#[test]
@ -524,15 +576,12 @@ mod tests {
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let (single_event_sender, single_event_receiver) = channel();
let single_event_listener = MpscEventSenderQueue::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.id());
let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.channel_id());
event_man.add_sender(single_event_listener);
let (group_event_sender_0, group_event_receiver_0) = channel();
let group_event_listener = MpscEventSenderQueue {
id: 1,
mpsc_sender: group_event_sender_0,
};
event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.id());
let group_event_listener = EventU32SenderMpsc::new(1, group_event_sender_0);
event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.channel_id());
event_man.add_sender(group_event_listener);
// Test event with one listener
@ -559,8 +608,8 @@ mod tests {
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let (single_event_sender, single_event_receiver) = channel();
let single_event_listener = MpscEventSenderQueue::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.id());
let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.channel_id());
event_man.add_sender(single_event_listener);
event_sender
.send((event_grp_0, Some(Params::Heapless((2_u32, 3_u32).into()))))
@ -591,12 +640,15 @@ mod tests {
let event_grp_0 = EventU32::new(Severity::INFO, 0, 0).unwrap();
let event_grp_1_0 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let (event_grp_0_sender, event_grp_0_receiver) = channel();
let event_grp_0_and_1_listener = MpscEventSenderQueue {
id: 0,
mpsc_sender: event_grp_0_sender,
};
event_man.subscribe_group(event_grp_0.group_id(), event_grp_0_and_1_listener.id());
event_man.subscribe_group(event_grp_1_0.group_id(), event_grp_0_and_1_listener.id());
let event_grp_0_and_1_listener = EventU32SenderMpsc::new(0, event_grp_0_sender);
event_man.subscribe_group(
event_grp_0.group_id(),
event_grp_0_and_1_listener.channel_id(),
);
event_man.subscribe_group(
event_grp_1_0.group_id(),
event_grp_0_and_1_listener.channel_id(),
);
event_man.add_sender(event_grp_0_and_1_listener);
event_sender
@ -625,18 +677,12 @@ mod tests {
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let (event_0_tx_0, event_0_rx_0) = channel();
let (event_0_tx_1, event_0_rx_1) = channel();
let event_listener_0 = MpscEventSenderQueue {
id: 0,
mpsc_sender: event_0_tx_0,
};
let event_listener_1 = MpscEventSenderQueue {
id: 1,
mpsc_sender: event_0_tx_1,
};
let event_listener_0_sender_id = event_listener_0.id();
let event_listener_0 = EventU32SenderMpsc::new(0, event_0_tx_0);
let event_listener_1 = EventU32SenderMpsc::new(1, event_0_tx_1);
let event_listener_0_sender_id = event_listener_0.channel_id();
event_man.subscribe_single(&event_0, event_listener_0_sender_id);
event_man.add_sender(event_listener_0);
let event_listener_1_sender_id = event_listener_1.id();
let event_listener_1_sender_id = event_listener_1.channel_id();
event_man.subscribe_single(&event_0, event_listener_1_sender_id);
event_man.add_sender(event_listener_1);
event_sender
@ -681,16 +727,12 @@ mod tests {
fn test_all_events_listener() {
let (event_sender, manager_queue) = channel();
let event_man_receiver = MpscEventReceiver::new(manager_queue);
let mut event_man: EventManager<SendError<EventU32WithAuxData>> =
EventManager::new(Box::new(event_man_receiver));
let mut event_man = EventManagerWithMpsc::new(event_man_receiver);
let event_0 = EventU32::new(Severity::INFO, 0, 5).unwrap();
let event_1 = EventU32::new(Severity::HIGH, 1, 0).unwrap();
let (event_0_tx_0, all_events_rx) = channel();
let all_events_listener = MpscEventSenderQueue {
id: 0,
mpsc_sender: event_0_tx_0,
};
event_man.subscribe_all(all_events_listener.id());
let all_events_listener = EventU32SenderMpsc::new(0, event_0_tx_0);
event_man.subscribe_all(all_events_listener.channel_id());
event_man.add_sender(all_events_listener);
event_sender
.send((event_0, None))
@ -707,4 +749,36 @@ mod tests {
check_next_event(event_0, &all_events_rx);
check_next_event(event_1, &all_events_rx);
}
#[test]
fn test_bounded_event_sender_queue_full() {
let (event_sender, _event_receiver) = mpsc::sync_channel(3);
let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3);
event_sender
.send_no_data(TEST_EVENT)
.expect("sending test event failed");
event_sender
.send_no_data(TEST_EVENT)
.expect("sending test event failed");
event_sender
.send_no_data(TEST_EVENT)
.expect("sending test event failed");
let error = event_sender.send_no_data(TEST_EVENT);
if let Err(e) = error {
assert!(matches!(e, GenericSendError::QueueFull(Some(3))));
} else {
panic!("unexpected error {error:?}");
}
}
#[test]
fn test_bounded_event_sender_rx_dropped() {
let (event_sender, event_receiver) = mpsc::sync_channel(3);
let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3);
drop(event_receiver);
if let Err(e) = event_sender.send_no_data(TEST_EVENT) {
assert!(matches!(e, GenericSendError::RxDisconnected));
} else {
panic!("Expected error");
}
}
}

View File

@ -107,7 +107,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
///
/// ## Example
///
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/tcp_servers.rs)
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// test also serves as the example application for this module.
pub struct TcpTmtcInCobsServer<
TmError,

View File

@ -88,7 +88,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
/// [spacepackets::PacketId]s as part of the server configuration for that purpose.
///
/// ## Example
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/tcp_servers.rs)
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// also serves as the example application for this module.
pub struct TcpSpacepacketsServer<
TmError,

View File

@ -26,8 +26,6 @@ extern crate std;
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod cfdp;
pub mod encoding;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod event_man;
pub mod events;
#[cfg(feature = "std")]

View File

@ -43,22 +43,19 @@
//! This includes the [ParamsHeapless] enumeration for contained values which do not require heap
//! allocation, and the [Params] which enumerates [ParamsHeapless] and some additional types which
//! require [alloc] support but allow for more flexbility.
#[cfg(feature = "alloc")]
use crate::pool::StoreAddr;
#[cfg(feature = "alloc")]
use alloc::string::{String, ToString};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::fmt::Debug;
use core::mem::size_of;
use paste::paste;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU64, EcssEnumU8};
pub use spacepackets::util::ToBeBytes;
use spacepackets::util::UnsignedEnum;
use spacepackets::ByteConversionError;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
pub use spacepackets::util::ToBeBytes;
use alloc::string::{String, ToString};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
/// Generic trait which is used for objects which can be converted into a raw network (big) endian
/// byte format.
@ -560,56 +557,64 @@ from_conversions_for_raw!(
(f64, Self::F64),
);
#[cfg(feature = "alloc")]
mod alloc_mod {
use super::*;
/// Generic enumeration for additional parameters, including parameters which rely on heap
/// allocations.
/// Generic enumeration for additional parameters, including parameters which rely on heap
/// allocations.
#[derive(Debug, Clone)]
#[non_exhaustive]
pub enum Params {
Heapless(ParamsHeapless),
Store(StoreAddr),
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[derive(Debug, Clone)]
pub enum Params {
Heapless(ParamsHeapless),
Store(StoreAddr),
Vec(Vec<u8>),
String(String),
}
Vec(Vec<u8>),
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
String(String),
}
impl From<StoreAddr> for Params {
fn from(x: StoreAddr) -> Self {
Self::Store(x)
}
impl From<StoreAddr> for Params {
fn from(x: StoreAddr) -> Self {
Self::Store(x)
}
}
impl From<ParamsHeapless> for Params {
fn from(x: ParamsHeapless) -> Self {
Self::Heapless(x)
}
impl From<ParamsHeapless> for Params {
fn from(x: ParamsHeapless) -> Self {
Self::Heapless(x)
}
}
impl From<Vec<u8>> for Params {
fn from(val: Vec<u8>) -> Self {
Self::Vec(val)
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
impl From<Vec<u8>> for Params {
fn from(val: Vec<u8>) -> Self {
Self::Vec(val)
}
}
/// Converts a byte slice into the [Params::Vec] variant
impl From<&[u8]> for Params {
fn from(val: &[u8]) -> Self {
Self::Vec(val.to_vec())
}
/// Converts a byte slice into the [Params::Vec] variant
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
impl From<&[u8]> for Params {
fn from(val: &[u8]) -> Self {
Self::Vec(val.to_vec())
}
}
impl From<String> for Params {
fn from(val: String) -> Self {
Self::String(val)
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
impl From<String> for Params {
fn from(val: String) -> Self {
Self::String(val)
}
}
/// Converts a string slice into the [Params::String] variant
impl From<&str> for Params {
fn from(val: &str) -> Self {
Self::String(val.to_string())
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
/// Converts a string slice into the [Params::String] variant
impl From<&str> for Params {
fn from(val: &str) -> Self {
Self::String(val.to_string())
}
}

View File

@ -24,6 +24,42 @@ pub enum SwitchState {
Faulty = 3,
}
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum SwitchStateBinary {
Off = 0,
On = 1,
}
impl TryFrom<SwitchState> for SwitchStateBinary {
type Error = ();
fn try_from(value: SwitchState) -> Result<Self, Self::Error> {
match value {
SwitchState::Off => Ok(SwitchStateBinary::Off),
SwitchState::On => Ok(SwitchStateBinary::On),
_ => Err(()),
}
}
}
impl<T: Into<u64>> From<T> for SwitchStateBinary {
fn from(value: T) -> Self {
if value.into() == 0 {
return SwitchStateBinary::Off;
}
SwitchStateBinary::On
}
}
impl From<SwitchStateBinary> for SwitchState {
fn from(value: SwitchStateBinary) -> Self {
match value {
SwitchStateBinary::Off => SwitchState::Off,
SwitchStateBinary::On => SwitchState::On,
}
}
}
pub type SwitchId = u16;
/// Generic trait for a device capable of turning on and off switches.

View File

@ -44,7 +44,7 @@ pub mod alloc_mod {
/// - Checking the validity of the APID, service ID, subservice ID.
/// - Checking the validity of the user data.
///
/// A [VerificationReporterWithSender] instance is passed to the user to also allow handling
/// A [VerificationReportingProvider] instance is passed to the user to also allow handling
/// of the verification process as part of the PUS standard requirements.
pub trait PusActionToRequestConverter {
type Error;
@ -62,9 +62,9 @@ pub mod alloc_mod {
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod {
use crate::pus::{
verification::VerificationReportingProvider, EcssTcInMemConverter, GenericRoutingError,
PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceBase,
PusServiceHelper,
get_current_cds_short_timestamp, verification::VerificationReportingProvider,
EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericRoutingError,
PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper,
};
use super::*;
@ -81,6 +81,8 @@ pub mod std_mod {
/// 3. Route the action request using the provided [PusActionRequestRouter].
/// 4. Handle all routing errors using the provided [PusRoutingErrorHandler].
pub struct PusService8ActionHandler<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusActionToRequestConverter,
@ -88,13 +90,16 @@ pub mod std_mod {
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
RoutingError = GenericRoutingError,
> {
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub request_converter: RequestConverter,
pub request_router: RequestRouter,
pub routing_error_handler: RoutingErrorHandler,
}
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusActionToRequestConverter<Error = PusPacketHandlingError>,
@ -103,6 +108,8 @@ pub mod std_mod {
RoutingError: Clone,
>
PusService8ActionHandler<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
RequestConverter,
@ -114,7 +121,12 @@ pub mod std_mod {
PusPacketHandlingError: From<RoutingError>,
{
pub fn new(
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
request_converter: RequestConverter,
request_router: RequestRouter,
routing_error_handler: RoutingErrorHandler,
@ -139,10 +151,7 @@ pub mod std_mod {
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let mut partial_error = None;
let time_stamp =
PusServiceBase::<VerificationReporter>::get_current_cds_short_timestamp(
&mut partial_error,
);
let time_stamp = get_current_cds_short_timestamp(&mut partial_error);
let (target_id, action_request) = self.request_converter.convert(
ecss_tc_and_token.token,
&tc,
@ -189,7 +198,8 @@ mod tests {
verification::{
tests::TestVerificationReporter, FailParams, RequestId, VerificationReportingProvider,
},
EcssTcInVecConverter, GenericRoutingError, PusPacketHandlerResult, PusPacketHandlingError,
EcssTcInVecConverter, GenericRoutingError, MpscTcReceiver, PusPacketHandlerResult,
PusPacketHandlingError, TmAsVecSenderWithMpsc,
};
use super::*;
@ -259,6 +269,8 @@ mod tests {
struct Pus8HandlerWithVecTester {
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>,
handler: PusService8ActionHandler<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
TestVerificationReporter,
TestConverter<8>,

View File

@ -2,6 +2,7 @@ use crate::pus::{source_buffer_large_enough, EcssTmtcError};
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::tm::PusTmSecondaryHeader;
use spacepackets::ecss::{EcssEnumeration, PusError};
use spacepackets::ByteConversionError;
use spacepackets::{SpHeader, MAX_APID};
use crate::pus::EcssTmSenderCore;
@ -9,145 +10,125 @@ use crate::pus::EcssTmSenderCore;
pub use alloc_mod::EventReporter;
pub use spacepackets::ecss::event::*;
pub struct EventReporterBase {
msg_count: u16,
pub struct EventReportCreator {
apid: u16,
pub dest_id: u16,
}
impl EventReporterBase {
impl EventReportCreator {
pub fn new(apid: u16) -> Option<Self> {
if apid > MAX_APID {
return None;
}
Some(Self {
msg_count: 0,
// msg_count: 0,
dest_id: 0,
apid,
})
}
pub fn event_info(
pub fn event_info<'time, 'src_data>(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
src_data_buf: &'src_data mut [u8],
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
aux_data: Option<&'src_data [u8]>,
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm(
buf,
src_data_buf,
Subservice::TmInfoReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_low_severity(
pub fn event_low_severity<'time, 'src_data>(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
src_data_buf: &'src_data mut [u8],
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
aux_data: Option<&'src_data [u8]>,
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm(
buf,
src_data_buf,
Subservice::TmLowSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_medium_severity(
pub fn event_medium_severity<'time, 'src_data>(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
buf: &'src_data mut [u8],
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
aux_data: Option<&'src_data [u8]>,
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm(
buf,
Subservice::TmMediumSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
pub fn event_high_severity(
pub fn event_high_severity<'time, 'src_data>(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
src_data_buf: &'src_data mut [u8],
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
aux_data: Option<&'src_data [u8]>,
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm(
buf,
src_data_buf,
Subservice::TmHighSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
)
}
fn generate_and_send_generic_tm(
fn generate_and_send_generic_tm<'time, 'src_data>(
&mut self,
buf: &mut [u8],
src_data_buf: &'src_data mut [u8],
subservice: Subservice,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
let tm = self.generate_generic_event_tm(buf, subservice, time_stamp, event_id, aux_data)?;
sender.send_tm(tm.into())?;
self.msg_count += 1;
Ok(())
aux_data: Option<&'src_data [u8]>,
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_generic_event_tm(src_data_buf, subservice, time_stamp, event_id, aux_data)
}
fn generate_generic_event_tm<'a>(
&'a self,
buf: &'a mut [u8],
fn generate_generic_event_tm<'time, 'src_data>(
&self,
src_data_buf: &'src_data mut [u8],
subservice: Subservice,
time_stamp: &'a [u8],
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<PusTmCreator, EcssTmtcError> {
aux_data: Option<&'src_data [u8]>,
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
let mut src_data_len = event_id.size();
if let Some(aux_data) = aux_data {
src_data_len += aux_data.len();
}
source_buffer_large_enough(buf.len(), src_data_len)?;
source_buffer_large_enough(src_data_buf.len(), src_data_len)?;
let mut sp_header = SpHeader::tm_unseg(self.apid, 0, 0).unwrap();
let sec_header = PusTmSecondaryHeader::new(
5,
subservice.into(),
self.msg_count,
self.dest_id,
Some(time_stamp),
);
let sec_header =
PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, Some(time_stamp));
let mut current_idx = 0;
event_id
.write_to_be_bytes(&mut buf[0..event_id.size()])
.map_err(PusError::ByteConversion)?;
event_id.write_to_be_bytes(&mut src_data_buf[0..event_id.size()])?;
current_idx += event_id.size();
if let Some(aux_data) = aux_data {
buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data);
src_data_buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data);
current_idx += aux_data.len();
}
Ok(PusTmCreator::new(
&mut sp_header,
sec_header,
&buf[0..current_idx],
&src_data_buf[0..current_idx],
true,
))
}
@ -161,12 +142,12 @@ mod alloc_mod {
pub struct EventReporter {
source_data_buf: Vec<u8>,
pub reporter: EventReporterBase,
pub reporter: EventReportCreator,
}
impl EventReporter {
pub fn new(apid: u16, max_event_id_and_aux_data_size: usize) -> Option<Self> {
let reporter = EventReporterBase::new(apid)?;
let reporter = EventReportCreator::new(apid)?;
Some(Self {
source_data_buf: vec![0; max_event_id_and_aux_data_size],
reporter,
@ -179,13 +160,17 @@ mod alloc_mod {
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_info(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
let tm_creator = self
.reporter
.event_info(
self.source_data_buf.as_mut_slice(),
time_stamp,
event_id,
aux_data,
)
.map_err(PusError::ByteConversion)?;
sender.send_tm(tm_creator.into())?;
Ok(())
}
pub fn event_low_severity(
@ -195,13 +180,17 @@ mod alloc_mod {
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_low_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
let tm_creator = self
.reporter
.event_low_severity(
self.source_data_buf.as_mut_slice(),
time_stamp,
event_id,
aux_data,
)
.map_err(PusError::ByteConversion)?;
sender.send_tm(tm_creator.into())?;
Ok(())
}
pub fn event_medium_severity(
@ -211,13 +200,17 @@ mod alloc_mod {
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_medium_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
let tm_creator = self
.reporter
.event_medium_severity(
self.source_data_buf.as_mut_slice(),
time_stamp,
event_id,
aux_data,
)
.map_err(PusError::ByteConversion)?;
sender.send_tm(tm_creator.into())?;
Ok(())
}
pub fn event_high_severity(
@ -227,13 +220,17 @@ mod alloc_mod {
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.reporter.event_high_severity(
self.source_data_buf.as_mut_slice(),
sender,
time_stamp,
event_id,
aux_data,
)
let tm_creator = self
.reporter
.event_high_severity(
self.source_data_buf.as_mut_slice(),
time_stamp,
event_id,
aux_data,
)
.map_err(PusError::ByteConversion)?;
sender.send_tm(tm_creator.into())?;
Ok(())
}
}
}
@ -269,7 +266,7 @@ mod tests {
}
impl EcssChannel for TestSender {
fn id(&self) -> ChannelId {
fn channel_id(&self) -> ChannelId {
0
}
}

View File

@ -2,8 +2,6 @@ use crate::events::{EventU32, GenericEvent, Severity};
#[cfg(feature = "alloc")]
use crate::events::{EventU32TypedSev, HasSeverity};
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[cfg(feature = "alloc")]
use core::hash::Hash;
#[cfg(feature = "alloc")]
use hashbrown::HashSet;
@ -32,19 +30,19 @@ pub use heapless_mod::*;
/// structure to track disabled events. A more primitive and embedded friendly
/// solution could track this information in a static or pre-allocated list which contains
/// the disabled events.
pub trait PusEventMgmtBackendProvider<Provider: GenericEvent> {
pub trait PusEventMgmtBackendProvider<Event: GenericEvent> {
type Error;
fn event_enabled(&self, event: &Provider) -> bool;
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>;
fn event_enabled(&self, event: &Event) -> bool;
fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>;
}
#[cfg(feature = "heapless")]
pub mod heapless_mod {
use super::*;
use crate::events::{GenericEvent, LargestEventRaw};
use std::marker::PhantomData;
use crate::events::LargestEventRaw;
use core::marker::PhantomData;
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
@ -108,6 +106,10 @@ impl From<EcssTmtcError> for EventManError {
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use core::marker::PhantomData;
use crate::events::EventU16;
use super::*;
/// Default backend provider which uses a hash set as the event reporting status container
@ -115,14 +117,11 @@ pub mod alloc_mod {
///
/// This provider is a good option for host systems or larger embedded systems where
/// the expected occasional memory allocation performed by the [HashSet] is not an issue.
pub struct DefaultPusMgmtBackendProvider<Event: GenericEvent = EventU32> {
pub struct DefaultPusEventMgmtBackend<Event: GenericEvent = EventU32> {
disabled: HashSet<Event>,
}
/// Safety: All contained field are [Send] as well
unsafe impl<Event: GenericEvent + Send> Send for DefaultPusMgmtBackendProvider<Event> {}
impl<Event: GenericEvent> Default for DefaultPusMgmtBackendProvider<Event> {
impl<Event: GenericEvent> Default for DefaultPusEventMgmtBackend<Event> {
fn default() -> Self {
Self {
disabled: HashSet::default(),
@ -130,46 +129,50 @@ pub mod alloc_mod {
}
}
impl<Provider: GenericEvent + PartialEq + Eq + Hash + Copy + Clone>
PusEventMgmtBackendProvider<Provider> for DefaultPusMgmtBackendProvider<Provider>
impl<EV: GenericEvent + PartialEq + Eq + Hash + Copy + Clone> PusEventMgmtBackendProvider<EV>
for DefaultPusEventMgmtBackend<EV>
{
type Error = ();
fn event_enabled(&self, event: &Provider) -> bool {
fn event_enabled(&self, event: &EV) -> bool {
!self.disabled.contains(event)
}
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
fn enable_event_reporting(&mut self, event: &EV) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(event))
}
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> {
fn disable_event_reporting(&mut self, event: &EV) -> Result<bool, Self::Error> {
Ok(self.disabled.insert(*event))
}
}
pub struct PusEventDispatcher<BackendError, Provider: GenericEvent> {
pub struct PusEventDispatcher<
B: PusEventMgmtBackendProvider<EV, Error = E>,
EV: GenericEvent,
E,
> {
reporter: EventReporter,
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>,
backend: B,
phantom: PhantomData<(E, EV)>,
}
/// Safety: All contained fields are send as well.
unsafe impl<E: Send, Event: GenericEvent + Send> Send for PusEventDispatcher<E, Event> {}
impl<BackendError, Provider: GenericEvent> PusEventDispatcher<BackendError, Provider> {
pub fn new(
reporter: EventReporter,
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>,
) -> Self {
Self { reporter, backend }
impl<B: PusEventMgmtBackendProvider<EV, Error = E>, EV: GenericEvent, E>
PusEventDispatcher<B, EV, E>
{
pub fn new(reporter: EventReporter, backend: B) -> Self {
Self {
reporter,
backend,
phantom: PhantomData,
}
}
}
impl<BackendError, Event: GenericEvent> PusEventDispatcher<BackendError, Event> {
pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
pub fn enable_tm_for_event(&mut self, event: &EV) -> Result<bool, E> {
self.backend.enable_event_reporting(event)
}
pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
pub fn disable_tm_for_event(&mut self, event: &EV) -> Result<bool, E> {
self.backend.disable_event_reporting(event)
}
@ -177,7 +180,7 @@ pub mod alloc_mod {
&mut self,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event: Event,
event: EV,
aux_data: Option<&[u8]>,
) -> Result<bool, EventManError> {
if !self.backend.event_enabled(&event) {
@ -208,18 +211,30 @@ pub mod alloc_mod {
}
}
impl<BackendError> PusEventDispatcher<BackendError, EventU32> {
impl<EV: GenericEvent + Copy + PartialEq + Eq + Hash>
PusEventDispatcher<DefaultPusEventMgmtBackend<EV>, EV, ()>
{
pub fn new_with_default_backend(reporter: EventReporter) -> Self {
Self {
reporter,
backend: DefaultPusEventMgmtBackend::default(),
phantom: PhantomData,
}
}
}
impl<B: PusEventMgmtBackendProvider<EventU32, Error = E>, E> PusEventDispatcher<B, EventU32, E> {
pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self,
event: &EventU32TypedSev<Severity>,
) -> Result<bool, BackendError> {
) -> Result<bool, E> {
self.backend.enable_event_reporting(event.as_ref())
}
pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self,
event: &EventU32TypedSev<Severity>,
) -> Result<bool, BackendError> {
) -> Result<bool, E> {
self.backend.disable_event_reporting(event.as_ref())
}
@ -233,30 +248,38 @@ pub mod alloc_mod {
self.generate_pus_event_tm_generic(sender, time_stamp, event.into(), aux_data)
}
}
pub type DefaultPusEventU16Dispatcher<E> =
PusEventDispatcher<DefaultPusEventMgmtBackend<EventU16>, EventU16, E>;
pub type DefaultPusEventU32Dispatcher<E> =
PusEventDispatcher<DefaultPusEventMgmtBackend<EventU32>, EventU32, E>;
}
#[cfg(test)]
mod tests {
use super::*;
use crate::events::SeverityInfo;
use crate::pus::MpscTmAsVecSender;
use std::sync::mpsc::{channel, TryRecvError};
use crate::{events::SeverityInfo, pus::TmAsVecSenderWithMpsc};
use std::sync::mpsc::{self, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
fn create_basic_man() -> PusEventDispatcher<(), EventU32> {
fn create_basic_man_1() -> DefaultPusEventU32Dispatcher<()> {
let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed");
let backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
PusEventDispatcher::new(reporter, Box::new(backend))
PusEventDispatcher::new_with_default_backend(reporter)
}
fn create_basic_man_2() -> DefaultPusEventU32Dispatcher<()> {
let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed");
let backend = DefaultPusEventMgmtBackend::default();
PusEventDispatcher::new(reporter, backend)
}
#[test]
fn test_basic() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test_sender", event_tx);
let mut event_man = create_basic_man_1();
let (event_tx, event_rx) = mpsc::channel();
let mut sender = TmAsVecSenderWithMpsc::new(0, "test_sender", event_tx);
let event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed");
@ -268,9 +291,9 @@ mod tests {
#[test]
fn test_disable_event() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
let mut event_man = create_basic_man_2();
let (event_tx, event_rx) = mpsc::channel();
let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());
@ -291,9 +314,9 @@ mod tests {
#[test]
fn test_reenable_event() {
let mut event_man = create_basic_man();
let (event_tx, event_rx) = channel();
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
let mut event_man = create_basic_man_1();
let (event_tx, event_rx) = mpsc::channel();
let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok());
assert!(res.unwrap());

View File

@ -7,27 +7,40 @@ use spacepackets::ecss::PusPacket;
use std::sync::mpsc::Sender;
use super::verification::VerificationReportingProvider;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
use super::{
get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore,
PusServiceHelper,
};
pub struct PusService5EventHandler<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
pub service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
event_request_tx: Sender<EventRequestWithToken>,
}
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> PusService5EventHandler<TcInMemConverter, VerificationReporter>
> PusService5EventHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
service_handler: PusServiceHelper<TcInMemConverter, VerificationReporter>,
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
event_request_tx: Sender<EventRequestWithToken>,
) -> Self {
Self {
service_helper: service_handler,
service_helper,
event_request_tx,
}
}
@ -94,9 +107,7 @@ impl<
Ok(PusPacketHandlerResult::RequestHandled)
};
let mut partial_error = None;
let time_stamp = PusServiceBase::<VerificationReporter>::get_current_cds_short_timestamp(
&mut partial_error,
);
let time_stamp = get_current_cds_short_timestamp(&mut partial_error);
match srv.unwrap() {
Subservice::TmInfoReport
| Subservice::TmLowSeverityReport
@ -138,7 +149,10 @@ mod tests {
use crate::pus::event_man::EventRequest;
use crate::pus::tests::SimplePusPacketHandler;
use crate::pus::verification::{RequestId, VerificationReporterWithSender};
use crate::pus::verification::{
RequestId, VerificationReporterWithSharedPoolMpscBoundedSender,
};
use crate::pus::{MpscTcReceiver, TmInSharedPoolSenderWithBoundedMpsc};
use crate::{
events::EventU32,
pus::{
@ -155,8 +169,12 @@ mod tests {
struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler:
PusService5EventHandler<EcssTcInSharedStoreConverter, VerificationReporterWithSender>,
handler: PusService5EventHandler<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
>,
}
impl Pus5HandlerWithStoreTester {

View File

@ -46,7 +46,7 @@ pub mod alloc_mod {
/// - Checking the validity of the APID, service ID, subservice ID.
/// - Checking the validity of the user data.
///
/// A [VerificationReporterWithSender] instance is passed to the user to also allow handling
/// A [VerificationReportingProvider] is passed to the user to also allow handling
/// of the verification process as part of the PUS standard requirements.
pub trait PusHkToRequestConverter {
type Error;
@ -64,9 +64,9 @@ pub mod alloc_mod {
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod {
use crate::pus::{
verification::VerificationReportingProvider, EcssTcInMemConverter, GenericRoutingError,
PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceBase,
PusServiceHelper,
get_current_cds_short_timestamp, verification::VerificationReportingProvider,
EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericRoutingError,
PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper,
};
use super::*;
@ -78,13 +78,15 @@ pub mod std_mod {
/// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter]
/// allows to configure the used telecommand memory backend.
/// 2. Convert the TC to a targeted action request using the provided
/// [PusActionToRequestConverter]. The generic error type is constrained to the
/// [PusHkToRequestConverter]. The generic error type is constrained to the
/// [PusPacketHandlerResult] for the concrete implementation which offers a packet handler.
/// 3. Route the action request using the provided [PusActionRequestRouter]. The generic error
/// 3. Route the action request using the provided [PusHkRequestRouter]. The generic error
/// type is constrained to the [GenericRoutingError] for the concrete implementation.
/// 4. Handle all routing errors using the provided [PusRoutingErrorHandler]. The generic error
/// type is constrained to the [GenericRoutingError] for the concrete implementation.
pub struct PusService3HkHandler<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusHkToRequestConverter,
@ -92,13 +94,16 @@ pub mod std_mod {
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
RoutingError = GenericRoutingError,
> {
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub request_converter: RequestConverter,
pub request_router: RequestRouter,
pub routing_error_handler: RoutingErrorHandler,
}
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusHkToRequestConverter<Error = PusPacketHandlingError>,
@ -107,6 +112,8 @@ pub mod std_mod {
RoutingError: Clone,
>
PusService3HkHandler<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
RequestConverter,
@ -118,7 +125,12 @@ pub mod std_mod {
PusPacketHandlingError: From<RoutingError>,
{
pub fn new(
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
request_converter: RequestConverter,
request_router: RequestRouter,
routing_error_handler: RoutingErrorHandler,
@ -142,10 +154,7 @@ pub mod std_mod {
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let mut partial_error = None;
let time_stamp =
PusServiceBase::<VerificationReporter>::get_current_cds_short_timestamp(
&mut partial_error,
);
let time_stamp = get_current_cds_short_timestamp(&mut partial_error);
let (target_id, hk_request) = self.request_converter.convert(
ecss_tc_and_token.token,
&tc,
@ -185,6 +194,7 @@ mod tests {
CcsdsPacket, SequenceFlags, SpHeader,
};
use crate::pus::{MpscTcReceiver, TmAsVecSenderWithMpsc};
use crate::{
hk::HkRequest,
pus::{
@ -268,6 +278,8 @@ mod tests {
struct Pus3HandlerWithVecTester {
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>,
handler: PusService3HkHandler<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
TestVerificationReporter,
TestConverter<3>,

View File

@ -2,6 +2,8 @@
//!
//! This module contains structures to make working with the PUS C standard easier.
//! The satrs-example application contains various usage examples of these components.
use crate::pool::{StoreAddr, StoreError};
use crate::pus::verification::{TcStateAccepted, TcStateToken, VerificationToken};
use crate::queue::{GenericRecvError, GenericSendError};
use crate::ChannelId;
use core::fmt::{Display, Formatter};
@ -34,25 +36,23 @@ pub mod verification;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
use crate::pool::{StoreAddr, StoreError};
use crate::pus::verification::{TcStateAccepted, TcStateToken, VerificationToken};
#[cfg(feature = "std")]
pub use std_mod::*;
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum PusTmWrapper<'tm> {
pub enum PusTmWrapper<'time, 'src_data> {
InStore(StoreAddr),
Direct(PusTmCreator<'tm>),
Direct(PusTmCreator<'time, 'src_data>),
}
impl From<StoreAddr> for PusTmWrapper<'_> {
impl From<StoreAddr> for PusTmWrapper<'_, '_> {
fn from(value: StoreAddr) -> Self {
Self::InStore(value)
}
}
impl<'tm> From<PusTmCreator<'tm>> for PusTmWrapper<'tm> {
fn from(value: PusTmCreator<'tm>) -> Self {
impl<'time, 'src_data> From<PusTmCreator<'time, 'src_data>> for PusTmWrapper<'time, 'src_data> {
fn from(value: PusTmCreator<'time, 'src_data>) -> Self {
Self::Direct(value)
}
}
@ -63,6 +63,7 @@ pub enum EcssTmtcError {
Store(StoreError),
Pus(PusError),
CantSendAddr(StoreAddr),
CantSendDirectTm,
Send(GenericSendError),
Recv(GenericRecvError),
}
@ -82,6 +83,9 @@ impl Display for EcssTmtcError {
EcssTmtcError::CantSendAddr(addr) => {
write!(f, "can not send address {addr}")
}
EcssTmtcError::CantSendDirectTm => {
write!(f, "can not send TM directly")
}
EcssTmtcError::Send(send_e) => {
write!(f, "send error {send_e}")
}
@ -123,13 +127,14 @@ impl Error for EcssTmtcError {
EcssTmtcError::Store(e) => Some(e),
EcssTmtcError::Pus(e) => Some(e),
EcssTmtcError::Send(e) => Some(e),
EcssTmtcError::Recv(e) => Some(e),
_ => None,
}
}
}
pub trait EcssChannel: Send {
/// Each sender can have an ID associated with it
fn id(&self) -> ChannelId;
fn channel_id(&self) -> ChannelId;
fn name(&self) -> &'static str {
"unset"
}
@ -138,7 +143,7 @@ pub trait EcssChannel: Send {
/// Generic trait for a user supplied sender object.
///
/// This sender object is responsible for sending PUS telemetry to a TM sink.
pub trait EcssTmSenderCore: EcssChannel {
pub trait EcssTmSenderCore: Send {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError>;
}
@ -146,7 +151,7 @@ pub trait EcssTmSenderCore: EcssChannel {
///
/// This sender object is responsible for sending PUS telecommands to a TC recipient. Each
/// telecommand can optionally have a token which contains its verification state.
pub trait EcssTcSenderCore: EcssChannel {
pub trait EcssTcSenderCore {
fn send_tc(&self, tc: PusTcCreator, token: Option<TcStateToken>) -> Result<(), EcssTmtcError>;
}
@ -221,25 +226,25 @@ impl TryFrom<EcssTcAndToken> for AcceptedEcssTcAndToken {
#[derive(Debug, Clone)]
pub enum TryRecvTmtcError {
Error(EcssTmtcError),
Tmtc(EcssTmtcError),
Empty,
}
impl From<EcssTmtcError> for TryRecvTmtcError {
fn from(value: EcssTmtcError) -> Self {
Self::Error(value)
Self::Tmtc(value)
}
}
impl From<PusError> for TryRecvTmtcError {
fn from(value: PusError) -> Self {
Self::Error(value.into())
Self::Tmtc(value.into())
}
}
impl From<StoreError> for TryRecvTmtcError {
fn from(value: StoreError) -> Self {
Self::Error(value.into())
Self::Tmtc(value.into())
}
}
@ -366,19 +371,16 @@ pub mod std_mod {
use crate::pool::{PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr};
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{
EcssChannel, EcssTcAndToken, EcssTcReceiver, EcssTcReceiverCore, EcssTmSender,
EcssTmSenderCore, EcssTmtcError, GenericRecvError, GenericSendError, PusTmWrapper,
TryRecvTmtcError,
EcssChannel, EcssTcAndToken, EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError,
GenericRecvError, GenericSendError, PusTmWrapper, TryRecvTmtcError,
};
use crate::tmtc::tm_helper::SharedTmPool;
use crate::{ChannelId, TargetId};
use alloc::boxed::Box;
use alloc::vec::Vec;
use crossbeam_channel as cb;
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::PusError;
use spacepackets::time::cds::TimeProvider;
use spacepackets::ecss::{PusError, WritablePusPacket};
use spacepackets::time::cds::CdsTime;
use spacepackets::time::StdTimestampError;
use spacepackets::time::TimeWriter;
use std::string::String;
@ -386,6 +388,9 @@ pub mod std_mod {
use std::sync::mpsc::TryRecvError;
use thiserror::Error;
#[cfg(feature = "crossbeam")]
pub use cb_mod::*;
use super::verification::VerificationReportingProvider;
use super::{AcceptedEcssTcAndToken, TcInMemory};
@ -395,32 +400,65 @@ pub mod std_mod {
}
}
impl From<cb::SendError<StoreAddr>> for EcssTmtcError {
fn from(_: cb::SendError<StoreAddr>) -> Self {
Self::Send(GenericSendError::RxDisconnected)
impl EcssTmSenderCore for mpsc::Sender<StoreAddr> {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => self
.send(addr)
.map_err(|_| GenericSendError::RxDisconnected)?,
PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm),
};
Ok(())
}
}
impl From<cb::TrySendError<StoreAddr>> for EcssTmtcError {
fn from(value: cb::TrySendError<StoreAddr>) -> Self {
match value {
cb::TrySendError::Full(_) => Self::Send(GenericSendError::QueueFull(None)),
cb::TrySendError::Disconnected(_) => Self::Send(GenericSendError::RxDisconnected),
}
impl EcssTmSenderCore for mpsc::SyncSender<StoreAddr> {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => self
.try_send(addr)
.map_err(|e| EcssTmtcError::Send(e.into()))?,
PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm),
};
Ok(())
}
}
impl EcssTmSenderCore for mpsc::Sender<Vec<u8>> {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
PusTmWrapper::Direct(tm) => self
.send(tm.to_vec()?)
.map_err(|e| EcssTmtcError::Send(e.into()))?,
};
Ok(())
}
}
impl EcssTmSenderCore for mpsc::SyncSender<Vec<u8>> {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
PusTmWrapper::Direct(tm) => self
.send(tm.to_vec()?)
.map_err(|e| EcssTmtcError::Send(e.into()))?,
};
Ok(())
}
}
#[derive(Clone)]
pub struct MpscTmInSharedPoolSender {
id: ChannelId,
pub struct TmInSharedPoolSenderWithId<Sender: EcssTmSenderCore> {
channel_id: ChannelId,
name: &'static str,
shared_tm_store: SharedTmPool,
sender: mpsc::Sender<StoreAddr>,
sender: Sender,
}
impl EcssChannel for MpscTmInSharedPoolSender {
fn id(&self) -> ChannelId {
self.id
impl<Sender: EcssTmSenderCore> EcssChannel for TmInSharedPoolSenderWithId<Sender> {
fn channel_id(&self) -> ChannelId {
self.channel_id
}
fn name(&self) -> &'static str {
@ -428,36 +466,31 @@ pub mod std_mod {
}
}
impl MpscTmInSharedPoolSender {
impl<Sender: EcssTmSenderCore> TmInSharedPoolSenderWithId<Sender> {
pub fn send_direct_tm(&self, tm: PusTmCreator) -> Result<(), EcssTmtcError> {
let addr = self.shared_tm_store.add_pus_tm(&tm)?;
self.sender
.send(addr)
.map_err(|_| EcssTmtcError::Send(GenericSendError::RxDisconnected))
self.sender.send_tm(PusTmWrapper::InStore(addr))
}
}
impl EcssTmSenderCore for MpscTmInSharedPoolSender {
impl<Sender: EcssTmSenderCore> EcssTmSenderCore for TmInSharedPoolSenderWithId<Sender> {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => {
self.sender.send(addr)?;
Ok(())
}
PusTmWrapper::Direct(tm) => self.send_direct_tm(tm),
if let PusTmWrapper::Direct(tm) = tm {
return self.send_direct_tm(tm);
}
self.sender.send_tm(tm)
}
}
impl MpscTmInSharedPoolSender {
impl<Sender: EcssTmSenderCore> TmInSharedPoolSenderWithId<Sender> {
pub fn new(
id: ChannelId,
name: &'static str,
shared_tm_store: SharedTmPool,
sender: mpsc::Sender<StoreAddr>,
sender: Sender,
) -> Self {
Self {
id,
channel_id: id,
name,
shared_tm_store,
sender,
@ -465,6 +498,51 @@ pub mod std_mod {
}
}
pub type TmInSharedPoolSenderWithMpsc = TmInSharedPoolSenderWithId<mpsc::Sender<StoreAddr>>;
pub type TmInSharedPoolSenderWithBoundedMpsc =
TmInSharedPoolSenderWithId<mpsc::SyncSender<StoreAddr>>;
/// This class can be used if frequent heap allocations during run-time are not an issue.
/// PUS TM packets will be sent around as [Vec]s. Please note that the current implementation
/// of this class can not deal with store addresses, so it is assumed that is is always
/// going to be called with direct packets.
#[derive(Clone)]
pub struct TmAsVecSenderWithId<Sender: EcssTmSenderCore> {
id: ChannelId,
name: &'static str,
sender: Sender,
}
impl From<mpsc::SendError<Vec<u8>>> for EcssTmtcError {
fn from(_: mpsc::SendError<Vec<u8>>) -> Self {
Self::Send(GenericSendError::RxDisconnected)
}
}
impl<Sender: EcssTmSenderCore> TmAsVecSenderWithId<Sender> {
pub fn new(id: u32, name: &'static str, sender: Sender) -> Self {
Self { id, sender, name }
}
}
impl<Sender: EcssTmSenderCore> EcssChannel for TmAsVecSenderWithId<Sender> {
fn channel_id(&self) -> ChannelId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
}
impl<Sender: EcssTmSenderCore> EcssTmSenderCore for TmAsVecSenderWithId<Sender> {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
self.sender.send_tm(tm)
}
}
pub type TmAsVecSenderWithMpsc = TmAsVecSenderWithId<mpsc::Sender<Vec<u8>>>;
pub type TmAsVecSenderWithBoundedMpsc = TmAsVecSenderWithId<mpsc::SyncSender<Vec<u8>>>;
pub struct MpscTcReceiver {
id: ChannelId,
name: &'static str,
@ -472,7 +550,7 @@ pub mod std_mod {
}
impl EcssChannel for MpscTcReceiver {
fn id(&self) -> ChannelId {
fn channel_id(&self) -> ChannelId {
self.id
}
@ -486,7 +564,7 @@ pub mod std_mod {
self.receiver.try_recv().map_err(|e| match e {
TryRecvError::Empty => TryRecvTmtcError::Empty,
TryRecvError::Disconnected => {
TryRecvTmtcError::Error(EcssTmtcError::from(GenericRecvError::TxDisconnected))
TryRecvTmtcError::Tmtc(EcssTmtcError::from(GenericRecvError::TxDisconnected))
}
})
}
@ -502,133 +580,89 @@ pub mod std_mod {
}
}
/// This class can be used if frequent heap allocations during run-time are not an issue.
/// PUS TM packets will be sent around as [Vec]s. Please note that the current implementation
/// of this class can not deal with store addresses, so it is assumed that is is always
/// going to be called with direct packets.
#[derive(Clone)]
pub struct MpscTmAsVecSender {
id: ChannelId,
name: &'static str,
sender: mpsc::Sender<Vec<u8>>,
}
#[cfg(feature = "crossbeam")]
pub mod cb_mod {
use super::*;
use crossbeam_channel as cb;
impl From<mpsc::SendError<Vec<u8>>> for EcssTmtcError {
fn from(_: mpsc::SendError<Vec<u8>>) -> Self {
Self::Send(GenericSendError::RxDisconnected)
}
}
pub type TmInSharedPoolSenderWithCrossbeam =
TmInSharedPoolSenderWithId<cb::Sender<StoreAddr>>;
impl MpscTmAsVecSender {
pub fn new(id: u32, name: &'static str, sender: mpsc::Sender<Vec<u8>>) -> Self {
Self { id, sender, name }
impl From<cb::SendError<StoreAddr>> for EcssTmtcError {
fn from(_: cb::SendError<StoreAddr>) -> Self {
Self::Send(GenericSendError::RxDisconnected)
}
}
}
impl EcssChannel for MpscTmAsVecSender {
fn id(&self) -> ChannelId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
}
impl EcssTmSenderCore for MpscTmAsVecSender {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => Err(EcssTmtcError::CantSendAddr(addr)),
PusTmWrapper::Direct(tm) => {
let mut vec = Vec::new();
tm.append_to_vec(&mut vec).map_err(EcssTmtcError::Pus)?;
self.sender.send(vec)?;
Ok(())
impl From<cb::TrySendError<StoreAddr>> for EcssTmtcError {
fn from(value: cb::TrySendError<StoreAddr>) -> Self {
match value {
cb::TrySendError::Full(_) => Self::Send(GenericSendError::QueueFull(None)),
cb::TrySendError::Disconnected(_) => {
Self::Send(GenericSendError::RxDisconnected)
}
}
}
}
}
#[derive(Clone)]
pub struct CrossbeamTmInStoreSender {
id: ChannelId,
name: &'static str,
shared_tm_store: SharedTmPool,
sender: crossbeam_channel::Sender<StoreAddr>,
}
impl CrossbeamTmInStoreSender {
pub fn new(
id: ChannelId,
name: &'static str,
shared_tm_store: SharedTmPool,
sender: crossbeam_channel::Sender<StoreAddr>,
) -> Self {
Self {
id,
name,
shared_tm_store,
sender,
impl EcssTmSenderCore for cb::Sender<StoreAddr> {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => self
.try_send(addr)
.map_err(|e| EcssTmtcError::Send(e.into()))?,
PusTmWrapper::Direct(_) => return Err(EcssTmtcError::CantSendDirectTm),
};
Ok(())
}
}
}
impl EcssChannel for CrossbeamTmInStoreSender {
fn id(&self) -> ChannelId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
}
impl EcssTmSenderCore for CrossbeamTmInStoreSender {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => self.sender.try_send(addr)?,
PusTmWrapper::Direct(tm) => {
let addr = self.shared_tm_store.add_pus_tm(&tm)?;
self.sender.try_send(addr)?;
}
impl EcssTmSenderCore for cb::Sender<Vec<u8>> {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
match tm {
PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
PusTmWrapper::Direct(tm) => self
.send(tm.to_vec()?)
.map_err(|e| EcssTmtcError::Send(e.into()))?,
};
Ok(())
}
Ok(())
}
}
pub struct CrossbeamTcReceiver {
id: ChannelId,
name: &'static str,
receiver: cb::Receiver<EcssTcAndToken>,
}
impl CrossbeamTcReceiver {
pub fn new(
pub struct CrossbeamTcReceiver {
id: ChannelId,
name: &'static str,
receiver: cb::Receiver<EcssTcAndToken>,
) -> Self {
Self { id, name, receiver }
}
}
impl EcssChannel for CrossbeamTcReceiver {
fn id(&self) -> ChannelId {
self.id
}
fn name(&self) -> &'static str {
self.name
impl CrossbeamTcReceiver {
pub fn new(
id: ChannelId,
name: &'static str,
receiver: cb::Receiver<EcssTcAndToken>,
) -> Self {
Self { id, name, receiver }
}
}
}
impl EcssTcReceiverCore for CrossbeamTcReceiver {
fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError> {
self.receiver.try_recv().map_err(|e| match e {
cb::TryRecvError::Empty => TryRecvTmtcError::Empty,
cb::TryRecvError::Disconnected => {
TryRecvTmtcError::Error(EcssTmtcError::from(GenericRecvError::TxDisconnected))
}
})
impl EcssChannel for CrossbeamTcReceiver {
fn channel_id(&self) -> ChannelId {
self.id
}
fn name(&self) -> &'static str {
self.name
}
}
impl EcssTcReceiverCore for CrossbeamTcReceiver {
fn recv_tc(&self) -> Result<EcssTcAndToken, TryRecvTmtcError> {
self.receiver.try_recv().map_err(|e| match e {
cb::TryRecvError::Empty => TryRecvTmtcError::Empty,
cb::TryRecvError::Disconnected => TryRecvTmtcError::Tmtc(EcssTmtcError::from(
GenericRecvError::TxDisconnected,
)),
})
}
}
}
@ -811,36 +845,34 @@ pub mod std_mod {
}
}
pub struct PusServiceBase<VerificationReporter: VerificationReportingProvider> {
pub tc_receiver: Box<dyn EcssTcReceiver>,
pub tm_sender: Box<dyn EcssTmSender>,
pub struct PusServiceBase<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
VerificationReporter: VerificationReportingProvider,
> {
pub tc_receiver: TcReceiver,
pub tm_sender: TmSender,
pub tm_apid: u16,
/// The verification handler is wrapped in a [RefCell] to allow the interior mutability
/// pattern. This makes writing methods which are not mutable a lot easier.
pub verification_handler: VerificationReporter,
}
impl<VerificationReporter: VerificationReportingProvider> PusServiceBase<VerificationReporter> {
#[cfg(feature = "std")]
pub fn get_current_cds_short_timestamp(
partial_error: &mut Option<PartialPusHandlingError>,
) -> [u8; 7] {
let mut time_stamp: [u8; 7] = [0; 7];
let time_provider =
TimeProvider::from_now_with_u16_days().map_err(PartialPusHandlingError::Time);
if let Ok(time_provider) = time_provider {
// Can't fail, we have a buffer with the exact required size.
time_provider.write_to_bytes(&mut time_stamp).unwrap();
} else {
*partial_error = Some(time_provider.unwrap_err());
}
time_stamp
}
#[cfg(feature = "std")]
pub fn get_current_timestamp_ignore_error() -> [u8; 7] {
let mut dummy = None;
Self::get_current_cds_short_timestamp(&mut dummy)
#[cfg(feature = "std")]
pub fn get_current_cds_short_timestamp(
partial_error: &mut Option<PartialPusHandlingError>,
) -> [u8; 7] {
let mut time_stamp: [u8; 7] = [0; 7];
let time_provider = CdsTime::now_with_u16_days().map_err(PartialPusHandlingError::Time);
if let Ok(time_provider) = time_provider {
// Can't fail, we have a buffer with the exact required size.
time_provider.write_to_bytes(&mut time_stamp).unwrap();
} else {
*partial_error = Some(time_provider.unwrap_err());
}
time_stamp
}
#[cfg(feature = "std")]
pub fn get_current_timestamp_ignore_error() -> [u8; 7] {
let mut dummy = None;
get_current_cds_short_timestamp(&mut dummy)
}
/// This is a high-level PUS packet handler helper.
@ -853,21 +885,25 @@ pub mod std_mod {
/// by using the [EcssTcInMemConverter] abstraction. This object provides some convenience
/// methods to make the generic parts of TC handling easier.
pub struct PusServiceHelper<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
pub common: PusServiceBase<VerificationReporter>,
pub common: PusServiceBase<TcReceiver, TmSender, VerificationReporter>,
pub tc_in_mem_converter: TcInMemConverter,
}
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> PusServiceHelper<TcInMemConverter, VerificationReporter>
> PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
tc_receiver: Box<dyn EcssTcReceiver>,
tm_sender: Box<dyn EcssTmSender>,
tc_receiver: TcReceiver,
tm_sender: TmSender,
tm_apid: u16,
verification_handler: VerificationReporter,
tc_in_mem_converter: TcInMemConverter,
@ -883,10 +919,10 @@ pub mod std_mod {
}
}
/// This function can be used to poll the internal [EcssTcReceiver] object for the next
/// This function can be used to poll the internal [EcssTcReceiverCore] object for the next
/// telecommand packet. It will return `Ok(None)` if there are not packets available.
/// In any other case, it will perform the acceptance of the ECSS TC packet using the
/// internal [VerificationReporterWithSender] object. It will then return the telecommand
/// internal [VerificationReportingProvider] object. It will then return the telecommand
/// and the according accepted token.
pub fn retrieve_and_accept_next_packet(
&mut self,
@ -908,23 +944,52 @@ pub mod std_mod {
}))
}
Err(e) => match e {
TryRecvTmtcError::Error(e) => Err(PusPacketHandlingError::EcssTmtc(e)),
TryRecvTmtcError::Tmtc(e) => Err(PusPacketHandlingError::EcssTmtc(e)),
TryRecvTmtcError::Empty => Ok(None),
},
}
}
}
pub type PusServiceHelperDynWithMpsc<TcInMemConverter, VerificationReporter> = PusServiceHelper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
TcInMemConverter,
VerificationReporter,
>;
pub type PusServiceHelperDynWithBoundedMpsc<TcInMemConverter, VerificationReporter> =
PusServiceHelper<
MpscTcReceiver,
TmAsVecSenderWithBoundedMpsc,
TcInMemConverter,
VerificationReporter,
>;
pub type PusServiceHelperStaticWithMpsc<TcInMemConverter, VerificationReporter> =
PusServiceHelper<
MpscTcReceiver,
TmInSharedPoolSenderWithMpsc,
TcInMemConverter,
VerificationReporter,
>;
pub type PusServiceHelperStaticWithBoundedMpsc<TcInMemConverter, VerificationReporter> =
PusServiceHelper<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
TcInMemConverter,
VerificationReporter,
>;
}
pub(crate) fn source_buffer_large_enough(cap: usize, len: usize) -> Result<(), EcssTmtcError> {
pub(crate) fn source_buffer_large_enough(
cap: usize,
len: usize,
) -> Result<(), ByteConversionError> {
if len > cap {
return Err(
PusError::ByteConversion(ByteConversionError::ToSliceTooSmall {
found: cap,
expected: len,
})
.into(),
);
return Err(ByteConversionError::ToSliceTooSmall {
found: cap,
expected: len,
}
.into());
}
Ok(())
}
@ -935,7 +1000,6 @@ pub mod tests {
use std::sync::mpsc::TryRecvError;
use std::sync::{mpsc, RwLock};
use alloc::boxed::Box;
use alloc::collections::VecDeque;
use alloc::vec::Vec;
use satrs_shared::res_code::ResultU16;
@ -951,6 +1015,9 @@ pub mod tests {
use crate::tmtc::tm_helper::SharedTmPool;
use crate::TargetId;
use super::verification::std_mod::{
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
};
use super::verification::tests::{SharedVerificationMap, TestVerificationReporter};
use super::verification::{
TcStateAccepted, VerificationReporterCfg, VerificationReporterWithSender,
@ -958,8 +1025,9 @@ pub mod tests {
};
use super::{
EcssTcAndToken, EcssTcInSharedStoreConverter, EcssTcInVecConverter, GenericRoutingError,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult,
PusPacketHandlingError, PusRoutingErrorHandler, PusServiceHelper, TcInMemory,
MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler,
PusServiceHelper, TcInMemory, TmAsVecSenderWithId, TmAsVecSenderWithMpsc,
TmInSharedPoolSenderWithBoundedMpsc, TmInSharedPoolSenderWithId,
};
pub const TEST_APID: u16 = 0x101;
@ -1004,29 +1072,33 @@ pub mod tests {
tm_buf: [u8; 2048],
tc_pool: SharedStaticMemoryPool,
tm_pool: SharedTmPool,
tc_sender: mpsc::Sender<EcssTcAndToken>,
tc_sender: mpsc::SyncSender<EcssTcAndToken>,
tm_receiver: mpsc::Receiver<StoreAddr>,
verification_handler: VerificationReporterWithSender,
verification_handler: VerificationReporterWithSharedPoolMpscBoundedSender,
}
pub type PusServiceHelperStatic = PusServiceHelper<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
>;
impl PusServiceHandlerWithSharedStoreCommon {
/// This function generates the structure in addition to the PUS service handler
/// [PusServiceHandler] which might be required for a specific PUS service handler.
///
/// The PUS service handler is instantiated with a [EcssTcInStoreConverter].
pub fn new() -> (
Self,
PusServiceHelper<EcssTcInSharedStoreConverter, VerificationReporterWithSender>,
) {
pub fn new() -> (Self, PusServiceHelperStatic) {
let pool_cfg = StaticPoolConfig::new(alloc::vec![(16, 16), (8, 32), (4, 64)], false);
let tc_pool = StaticMemoryPool::new(pool_cfg.clone());
let tm_pool = StaticMemoryPool::new(pool_cfg);
let shared_tc_pool = SharedStaticMemoryPool::new(RwLock::new(tc_pool));
let shared_tm_pool = SharedTmPool::new(tm_pool);
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::sync_channel(10);
let (tm_tx, tm_rx) = mpsc::sync_channel(10);
let verif_sender = MpscTmInSharedPoolSender::new(
let verif_sender = TmInSharedPoolSenderWithBoundedMpsc::new(
0,
"verif_sender",
shared_tm_pool.clone(),
@ -1034,9 +1106,9 @@ pub mod tests {
);
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verification_handler =
VerificationReporterWithSender::new(&verif_cfg, Box::new(verif_sender));
VerificationReporterWithSharedPoolMpscBoundedSender::new(&verif_cfg, verif_sender);
let test_srv_tm_sender =
MpscTmInSharedPoolSender::new(0, "TEST_SENDER", shared_tm_pool.clone(), tm_tx);
TmInSharedPoolSenderWithId::new(0, "TEST_SENDER", shared_tm_pool.clone(), tm_tx);
let test_srv_tc_receiver = MpscTcReceiver::new(0, "TEST_RECEIVER", test_srv_tc_rx);
let in_store_converter =
EcssTcInSharedStoreConverter::new(shared_tc_pool.clone(), 2048);
@ -1051,8 +1123,8 @@ pub mod tests {
verification_handler: verification_handler.clone(),
},
PusServiceHelper::new(
Box::new(test_srv_tc_receiver),
Box::new(test_srv_tm_sender),
test_srv_tc_receiver,
test_srv_tm_sender,
TEST_APID,
verification_handler,
in_store_converter,
@ -1116,21 +1188,24 @@ pub mod tests {
tm_receiver: mpsc::Receiver<alloc::vec::Vec<u8>>,
pub verification_handler: VerificationReporter,
}
pub type PusServiceHelperDynamic = PusServiceHelper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
>;
impl PusServiceHandlerWithVecCommon<VerificationReporterWithSender> {
pub fn new_with_standard_verif_reporter() -> (
Self,
PusServiceHelper<EcssTcInVecConverter, VerificationReporterWithSender>,
) {
impl PusServiceHandlerWithVecCommon<VerificationReporterWithVecMpscSender> {
pub fn new_with_standard_verif_reporter() -> (Self, PusServiceHelperDynamic) {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let verif_sender = MpscTmAsVecSender::new(0, "verififcatio-sender", tm_tx.clone());
let verif_sender = TmAsVecSenderWithId::new(0, "verififcatio-sender", tm_tx.clone());
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verification_handler =
VerificationReporterWithSender::new(&verif_cfg, Box::new(verif_sender));
VerificationReporterWithSender::new(&verif_cfg, verif_sender);
let test_srv_tm_sender = MpscTmAsVecSender::new(0, "test-sender", tm_tx);
let test_srv_tm_sender = TmAsVecSenderWithId::new(0, "test-sender", tm_tx);
let test_srv_tc_receiver = MpscTcReceiver::new(0, "test-receiver", test_srv_tc_rx);
let in_store_converter = EcssTcInVecConverter::default();
(
@ -1141,8 +1216,8 @@ pub mod tests {
verification_handler: verification_handler.clone(),
},
PusServiceHelper::new(
Box::new(test_srv_tc_receiver),
Box::new(test_srv_tm_sender),
test_srv_tc_receiver,
test_srv_tm_sender,
TEST_APID,
verification_handler,
in_store_converter,
@ -1154,12 +1229,17 @@ pub mod tests {
impl PusServiceHandlerWithVecCommon<TestVerificationReporter> {
pub fn new_with_test_verif_sender() -> (
Self,
PusServiceHelper<EcssTcInVecConverter, TestVerificationReporter>,
PusServiceHelper<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
TestVerificationReporter,
>,
) {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let test_srv_tm_sender = MpscTmAsVecSender::new(0, "test-sender", tm_tx);
let test_srv_tm_sender = TmAsVecSenderWithId::new(0, "test-sender", tm_tx);
let test_srv_tc_receiver = MpscTcReceiver::new(0, "test-receiver", test_srv_tc_rx);
let in_store_converter = EcssTcInVecConverter::default();
let shared_verif_map = SharedVerificationMap::default();
@ -1172,8 +1252,8 @@ pub mod tests {
verification_handler: verification_handler.clone(),
},
PusServiceHelper::new(
Box::new(test_srv_tc_receiver),
Box::new(test_srv_tm_sender),
test_srv_tc_receiver,
test_srv_tm_sender,
TEST_APID,
verification_handler,
in_store_converter,

File diff suppressed because it is too large Load Diff

View File

@ -1,11 +1,20 @@
use super::scheduler::PusSchedulerProvider;
use super::verification::VerificationReportingProvider;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
use super::verification::{
VerificationReporterWithSharedPoolMpscBoundedSender,
VerificationReporterWithSharedPoolMpscSender, VerificationReporterWithVecMpscBoundedSender,
VerificationReporterWithVecMpscSender, VerificationReportingProvider,
};
use super::{
get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusServiceHelper,
TmAsVecSenderWithBoundedMpsc, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
TmInSharedPoolSenderWithMpsc,
};
use crate::pool::PoolProvider;
use crate::pus::{PusPacketHandlerResult, PusPacketHandlingError};
use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::time::cds::TimeProvider;
use spacepackets::time::cds::CdsTime;
/// This is a helper class for [std] environments to handle generic PUS 11 (scheduling service)
/// packets. This handler is able to handle the most important PUS requests for a scheduling
@ -16,22 +25,39 @@ use spacepackets::time::cds::TimeProvider;
/// [Self::scheduler] and [Self::scheduler_mut] function and then use the scheduler API to release
/// telecommands when applicable.
pub struct PusService11SchedHandler<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
PusScheduler: PusSchedulerProvider,
> {
pub service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
scheduler: PusScheduler,
}
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
Scheduler: PusSchedulerProvider,
> PusService11SchedHandler<TcInMemConverter, VerificationReporter, Scheduler>
>
PusService11SchedHandler<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
Scheduler,
>
{
pub fn new(
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
scheduler: Scheduler,
) -> Self {
Self {
@ -70,9 +96,7 @@ impl<
));
}
let mut partial_error = None;
let time_stamp = PusServiceBase::<VerificationReporter>::get_current_cds_short_timestamp(
&mut partial_error,
);
let time_stamp = get_current_cds_short_timestamp(&mut partial_error);
match standard_subservice.unwrap() {
scheduling::Subservice::TcEnableScheduling => {
let start_token = self
@ -144,7 +168,7 @@ impl<
// let mut pool = self.sched_tc_pool.write().expect("locking pool failed");
self.scheduler
.insert_wrapped_tc::<TimeProvider>(&tc, sched_tc_pool)
.insert_wrapped_tc::<CdsTime>(&tc, sched_tc_pool)
.expect("insertion of activity into pool failed");
self.service_helper
@ -169,18 +193,55 @@ impl<
Ok(PusPacketHandlerResult::RequestHandled)
}
}
/// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and regular
/// mpsc queues.
pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusService11SchedHandler<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
PusScheduler,
>;
/// Helper type definition for a PUS 11 handler with a dynamic TMTC memory backend and bounded MPSC
/// queues.
pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusService11SchedHandler<
MpscTcReceiver,
TmAsVecSenderWithBoundedMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscBoundedSender,
PusScheduler,
>;
/// Helper type definition for a PUS 11 handler with a shared store TMTC memory backend and regular
/// mpsc queues.
pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusService11SchedHandler<
MpscTcReceiver,
TmInSharedPoolSenderWithMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscSender,
PusScheduler,
>;
/// Helper type definition for a PUS 11 handler with a shared store TMTC memory backend and bounded
/// mpsc queues.
pub type PusService11SchedHandlerStaticWithBoundedMpsc<PusScheduler> = PusService11SchedHandler<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
PusScheduler,
>;
#[cfg(test)]
mod tests {
use crate::pool::{StaticMemoryPool, StaticPoolConfig};
use crate::pus::tests::TEST_APID;
use crate::pus::verification::VerificationReporterWithSender;
use crate::pus::verification::VerificationReporterWithSharedPoolMpscBoundedSender;
use crate::pus::{
scheduler::{self, PusSchedulerProvider, TcInfo},
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness},
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter,
};
use crate::pus::{MpscTcReceiver, TmInSharedPoolSenderWithBoundedMpsc};
use alloc::collections::VecDeque;
use delegate::delegate;
use spacepackets::ecss::scheduling::Subservice;
@ -198,8 +259,10 @@ mod tests {
struct Pus11HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusService11SchedHandler<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSender,
VerificationReporterWithSharedPoolMpscBoundedSender,
TestScheduler,
>,
sched_tc_pool: StaticMemoryPool,
@ -240,7 +303,7 @@ mod tests {
}
impl PusSchedulerProvider for TestScheduler {
type TimeProvider = cds::TimeProvider;
type TimeProvider = cds::CdsTime;
fn reset(
&mut self,
@ -266,7 +329,7 @@ mod tests {
fn insert_unwrapped_and_stored_tc(
&mut self,
_time_stamp: spacepackets::time::UnixTimestamp,
_time_stamp: spacepackets::time::UnixTime,
info: crate::pus::scheduler::TcInfo,
) -> Result<(), crate::pus::scheduler::ScheduleError> {
self.inserted_tcs.push_back(info);
@ -327,7 +390,7 @@ mod tests {
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new(&mut reply_header, sec_header, &[], true);
let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc);
let stamper = cds::TimeProvider::from_now_with_u16_days().expect("time provider failed");
let stamper = cds::CdsTime::now_with_u16_days().expect("time provider failed");
let mut sched_app_data: [u8; 64] = [0; 64];
let mut written_len = stamper.write_to_bytes(&mut sched_app_data).unwrap();
let ping_raw = ping_tc.to_vec().expect("generating raw tc failed");

View File

@ -5,24 +5,45 @@ use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::PusPacket;
use spacepackets::SpHeader;
use super::verification::VerificationReportingProvider;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper};
use super::verification::{
VerificationReporterWithSharedPoolMpscBoundedSender,
VerificationReporterWithSharedPoolMpscSender, VerificationReporterWithVecMpscBoundedSender,
VerificationReporterWithVecMpscSender, VerificationReportingProvider,
};
use super::{
get_current_cds_short_timestamp, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, EcssTcReceiverCore, EcssTmSenderCore, MpscTcReceiver, PusServiceHelper,
TmAsVecSenderWithBoundedMpsc, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
TmInSharedPoolSenderWithMpsc,
};
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
/// This handler only processes ping requests and generates a ping reply for them accordingly.
pub struct PusService17TestHandler<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> {
pub service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
}
impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
> PusService17TestHandler<TcInMemConverter, VerificationReporter>
> PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>) -> Self {
pub fn new(
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
) -> Self {
Self { service_helper }
}
@ -41,10 +62,7 @@ impl<
}
if tc.subservice() == 1 {
let mut partial_error = None;
let time_stamp =
PusServiceBase::<VerificationReporter>::get_current_cds_short_timestamp(
&mut partial_error,
);
let time_stamp = get_current_cds_short_timestamp(&mut partial_error);
let result = self
.service_helper
.common
@ -98,17 +116,53 @@ impl<
}
}
/// Helper type definition for a PUS 17 handler with a dynamic TMTC memory backend and regular
/// mpsc queues.
pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
>;
/// Helper type definition for a PUS 17 handler with a dynamic TMTC memory backend and bounded MPSC
/// queues.
pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver,
TmAsVecSenderWithBoundedMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscBoundedSender,
>;
/// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and regular
/// mpsc queues.
pub type PusService17TestHandlerStaticWithMpsc = PusService17TestHandler<
MpscTcReceiver,
TmInSharedPoolSenderWithMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscSender,
>;
/// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded
/// mpsc queues.
pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
>;
#[cfg(test)]
mod tests {
use crate::pus::tests::{
PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon, PusTestHarness,
SimplePusPacketHandler, TEST_APID,
};
use crate::pus::verification::{RequestId, VerificationReporterWithSender};
use crate::pus::verification::std_mod::{
VerificationReporterWithSharedPoolMpscBoundedSender, VerificationReporterWithVecMpscSender,
};
use crate::pus::verification::RequestId;
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{
EcssTcInSharedStoreConverter, EcssTcInVecConverter, PusPacketHandlerResult,
PusPacketHandlingError,
EcssTcInSharedStoreConverter, EcssTcInVecConverter, MpscTcReceiver, PusPacketHandlerResult,
PusPacketHandlingError, TmAsVecSenderWithMpsc, TmInSharedPoolSenderWithBoundedMpsc,
};
use delegate::delegate;
use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
@ -120,8 +174,12 @@ mod tests {
struct Pus17HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler:
PusService17TestHandler<EcssTcInSharedStoreConverter, VerificationReporterWithSender>,
handler: PusService17TestHandler<
MpscTcReceiver,
TmInSharedPoolSenderWithBoundedMpsc,
EcssTcInSharedStoreConverter,
VerificationReporterWithSharedPoolMpscBoundedSender,
>,
}
impl Pus17HandlerWithStoreTester {
@ -158,8 +216,13 @@ mod tests {
}
struct Pus17HandlerWithVecTester {
common: PusServiceHandlerWithVecCommon<VerificationReporterWithSender>,
handler: PusService17TestHandler<EcssTcInVecConverter, VerificationReporterWithSender>,
common: PusServiceHandlerWithVecCommon<VerificationReporterWithVecMpscSender>,
handler: PusService17TestHandler<
MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter,
VerificationReporterWithVecMpscSender,
>,
}
impl Pus17HandlerWithVecTester {

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,8 @@
use core::fmt::{Display, Formatter};
#[cfg(feature = "std")]
use std::error::Error;
#[cfg(feature = "std")]
use std::sync::mpsc;
/// Generic error type for sending something via a message queue.
#[derive(Debug, Copy, Clone)]
@ -47,3 +49,37 @@ impl Display for GenericRecvError {
#[cfg(feature = "std")]
impl Error for GenericRecvError {}
#[cfg(feature = "std")]
impl<T> From<mpsc::SendError<T>> for GenericSendError {
fn from(_: mpsc::SendError<T>) -> Self {
GenericSendError::RxDisconnected
}
}
#[cfg(feature = "std")]
impl<T> From<mpsc::TrySendError<T>> for GenericSendError {
fn from(err: mpsc::TrySendError<T>) -> Self {
match err {
mpsc::TrySendError::Full(_) => GenericSendError::QueueFull(None),
mpsc::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
}
}
}
#[cfg(feature = "crossbeam")]
impl<T> From<crossbeam_channel::SendError<T>> for GenericSendError {
fn from(_: crossbeam_channel::SendError<T>) -> Self {
GenericSendError::RxDisconnected
}
}
#[cfg(feature = "crossbeam")]
impl<T> From<crossbeam_channel::TrySendError<T>> for GenericSendError {
fn from(err: crossbeam_channel::TrySendError<T>) -> Self {
match err {
crossbeam_channel::TrySendError::Full(_) => GenericSendError::QueueFull(None),
crossbeam_channel::TrySendError::Disconnected(_) => GenericSendError::RxDisconnected,
}
}
}

View File

@ -22,7 +22,7 @@
//! use satrs::tmtc::{ReceivesTc, ReceivesTcCore};
//! use spacepackets::{CcsdsPacket, SpHeader};
//! use spacepackets::ecss::WritablePusPacket;
//! use spacepackets::ecss::tc::{PusTc, PusTcCreator};
//! use spacepackets::ecss::tc::PusTcCreator;
//!
//! #[derive (Default)]
//! struct ConcreteApidHandler {
@ -52,7 +52,7 @@
//! }
//!
//! let apid_handler = ConcreteApidHandler::default();
//! let mut ccsds_distributor = CcsdsDistributor::new(Box::new(apid_handler));
//! let mut ccsds_distributor = CcsdsDistributor::new(apid_handler);
//!
//! // Create and pass PUS telecommand with a valid APID
//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
@ -72,23 +72,17 @@
//! let tc_slice = &test_buf[0..size];
//! ccsds_distributor.pass_tc(&tc_slice).expect("Passing TC slice failed");
//!
//! // User helper function to retrieve concrete class
//! let concrete_handler_ref: &ConcreteApidHandler = ccsds_distributor
//! .apid_handler_ref()
//! .expect("Casting back to concrete type failed");
//! assert_eq!(concrete_handler_ref.known_call_count, 1);
//! assert_eq!(concrete_handler_ref.unknown_call_count, 1);
//! // Retrieve the APID handler.
//! let handler_ref = ccsds_distributor.packet_handler();
//! assert_eq!(handler_ref.known_call_count, 1);
//! assert_eq!(handler_ref.unknown_call_count, 1);
//!
//! // It's also possible to retrieve a mutable reference
//! let mutable_ref: &mut ConcreteApidHandler = ccsds_distributor
//! .apid_handler_mut()
//! .expect("Casting back to concrete type failed");
//! mutable_ref.mutable_foo();
//! // Mutable access to the handler.
//! let mutable_handler_ref = ccsds_distributor.packet_handler_mut();
//! mutable_handler_ref.mutable_foo();
//! ```
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore};
use alloc::boxed::Box;
use core::fmt::{Display, Formatter};
use downcast_rs::Downcast;
use spacepackets::{ByteConversionError, CcsdsPacket, SpHeader};
#[cfg(feature = "std")]
use std::error::Error;
@ -99,11 +93,7 @@ use std::error::Error;
/// instance of this handler to the [CcsdsDistributor]. The distributor will use the trait
/// interface to dispatch received packets to the user based on the Application Process Identifier
/// (APID) field of the CCSDS packet.
///
/// This trait automatically implements the [downcast_rs::Downcast] to allow a more convenient API
/// to cast trait objects back to their concrete type after the handler was passed to the
/// distributor.
pub trait CcsdsPacketHandler: Downcast {
pub trait CcsdsPacketHandler {
type Error;
fn valid_apids(&self) -> &'static [u16];
@ -116,23 +106,12 @@ pub trait CcsdsPacketHandler: Downcast {
) -> Result<(), Self::Error>;
}
downcast_rs::impl_downcast!(CcsdsPacketHandler assoc Error);
pub trait SendableCcsdsPacketHandler: CcsdsPacketHandler + Send {}
impl<T: CcsdsPacketHandler + Send> SendableCcsdsPacketHandler for T {}
downcast_rs::impl_downcast!(SendableCcsdsPacketHandler assoc Error);
/// The CCSDS distributor dispatches received CCSDS packets to a user provided packet handler.
///
/// The passed APID handler is required to be [Send]able to allow more ergonomic usage with
/// threads.
pub struct CcsdsDistributor<E> {
pub struct CcsdsDistributor<PacketHandler: CcsdsPacketHandler<Error = E>, E> {
/// User provided APID handler stored as a generic trait object.
/// It can be cast back to the original concrete type using the [Self::apid_handler_ref] or
/// the [Self::apid_handler_mut] method.
pub apid_handler: Box<dyn SendableCcsdsPacketHandler<Error = E>>,
/// It can be cast back to the original concrete type using [Self::packet_handler] or
/// the [Self::packet_handler_mut] method.
packet_handler: PacketHandler,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
@ -160,7 +139,9 @@ impl<E: Error> Error for CcsdsError<E> {
}
}
impl<E: 'static> ReceivesCcsdsTc for CcsdsDistributor<E> {
impl<PacketHandler: CcsdsPacketHandler<Error = E>, E: 'static> ReceivesCcsdsTc
for CcsdsDistributor<PacketHandler, E>
{
type Error = CcsdsError<E>;
fn pass_ccsds(&mut self, header: &SpHeader, tc_raw: &[u8]) -> Result<(), Self::Error> {
@ -168,7 +149,9 @@ impl<E: 'static> ReceivesCcsdsTc for CcsdsDistributor<E> {
}
}
impl<E: 'static> ReceivesTcCore for CcsdsDistributor<E> {
impl<PacketHandler: CcsdsPacketHandler<Error = E>, E: 'static> ReceivesTcCore
for CcsdsDistributor<PacketHandler, E>
{
type Error = CcsdsError<E>;
fn pass_tc(&mut self, tc_raw: &[u8]) -> Result<(), Self::Error> {
@ -186,36 +169,31 @@ impl<E: 'static> ReceivesTcCore for CcsdsDistributor<E> {
}
}
impl<E: 'static> CcsdsDistributor<E> {
pub fn new(apid_handler: Box<dyn SendableCcsdsPacketHandler<Error = E>>) -> Self {
CcsdsDistributor { apid_handler }
impl<PacketHandler: CcsdsPacketHandler<Error = E>, E: 'static> CcsdsDistributor<PacketHandler, E> {
pub fn new(packet_handler: PacketHandler) -> Self {
CcsdsDistributor { packet_handler }
}
/// This function can be used to retrieve a reference to the concrete instance of the APID
/// handler after it was passed to the distributor. See the
/// [module documentation][crate::tmtc::ccsds_distrib] for an fsrc-example.
pub fn apid_handler_ref<T: SendableCcsdsPacketHandler<Error = E>>(&self) -> Option<&T> {
self.apid_handler.downcast_ref::<T>()
pub fn packet_handler(&self) -> &PacketHandler {
&self.packet_handler
}
/// This function can be used to retrieve a mutable reference to the concrete instance of the
/// APID handler after it was passed to the distributor.
pub fn apid_handler_mut<T: SendableCcsdsPacketHandler<Error = E>>(&mut self) -> Option<&mut T> {
self.apid_handler.downcast_mut::<T>()
pub fn packet_handler_mut(&mut self) -> &mut PacketHandler {
&mut self.packet_handler
}
fn dispatch_ccsds(&mut self, sp_header: &SpHeader, tc_raw: &[u8]) -> Result<(), CcsdsError<E>> {
let apid = sp_header.apid();
let valid_apids = self.apid_handler.valid_apids();
let valid_apids = self.packet_handler.valid_apids();
for &valid_apid in valid_apids {
if valid_apid == apid {
return self
.apid_handler
.packet_handler
.handle_known_apid(sp_header, tc_raw)
.map_err(|e| CcsdsError::CustomError(e));
}
}
self.apid_handler
self.packet_handler
.handle_unknown_apid(sp_header, tc_raw)
.map_err(|e| CcsdsError::CustomError(e))
}
@ -244,6 +222,13 @@ pub(crate) mod tests {
&buf[0..size]
}
pub fn generate_ping_tc_as_vec() -> Vec<u8> {
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
PusTcCreator::new_simple(&mut sph, 17, 1, None, true)
.to_vec()
.unwrap()
}
type SharedPacketQueue = Arc<Mutex<VecDeque<(u16, Vec<u8>)>>>;
pub struct BasicApidHandlerSharedQueue {
pub known_packet_queue: SharedPacketQueue,
@ -305,7 +290,8 @@ pub(crate) mod tests {
) -> Result<(), Self::Error> {
let mut vec = Vec::new();
vec.extend_from_slice(tc_raw);
Ok(self.known_packet_queue.push_back((sp_header.apid(), vec)))
self.known_packet_queue.push_back((sp_header.apid(), vec));
Ok(())
}
fn handle_unknown_apid(
@ -315,7 +301,8 @@ pub(crate) mod tests {
) -> Result<(), Self::Error> {
let mut vec = Vec::new();
vec.extend_from_slice(tc_raw);
Ok(self.unknown_packet_queue.push_back((sp_header.apid(), vec)))
self.unknown_packet_queue.push_back((sp_header.apid(), vec));
Ok(())
}
}
@ -327,7 +314,7 @@ pub(crate) mod tests {
known_packet_queue: known_packet_queue.clone(),
unknown_packet_queue: unknown_packet_queue.clone(),
};
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
let mut ccsds_distrib = CcsdsDistributor::new(apid_handler);
is_send(&ccsds_distrib);
let mut test_buf: [u8; 32] = [0; 32];
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
@ -342,14 +329,9 @@ pub(crate) mod tests {
}
#[test]
fn test_distribs_unknown_apid() {
let known_packet_queue = Arc::new(Mutex::default());
let unknown_packet_queue = Arc::new(Mutex::default());
let apid_handler = BasicApidHandlerSharedQueue {
known_packet_queue: known_packet_queue.clone(),
unknown_packet_queue: unknown_packet_queue.clone(),
};
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
fn test_unknown_apid_handling() {
let apid_handler = BasicApidHandlerOwnedQueue::default();
let mut ccsds_distrib = CcsdsDistributor::new(apid_handler);
let mut sph = SpHeader::tc_unseg(0x004, 0x34, 0).unwrap();
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let mut test_buf: [u8; 32] = [0; 32];
@ -357,11 +339,52 @@ pub(crate) mod tests {
.write_to_bytes(test_buf.as_mut_slice())
.expect("Error writing TC to buffer");
ccsds_distrib.pass_tc(&test_buf).expect("Passing TC failed");
let recvd = unknown_packet_queue.lock().unwrap().pop_front();
assert!(known_packet_queue.lock().unwrap().is_empty());
assert!(ccsds_distrib.packet_handler().known_packet_queue.is_empty());
let apid_handler = ccsds_distrib.packet_handler_mut();
let recvd = apid_handler.unknown_packet_queue.pop_front();
assert!(recvd.is_some());
let (apid, packet) = recvd.unwrap();
assert_eq!(apid, 0x004);
assert_eq!(packet.as_slice(), test_buf);
}
#[test]
fn test_ccsds_distribution() {
let mut ccsds_distrib = CcsdsDistributor::new(BasicApidHandlerOwnedQueue::default());
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let tc_vec = pus_tc.to_vec().unwrap();
ccsds_distrib
.pass_ccsds(&sph, &tc_vec)
.expect("passing CCSDS TC failed");
let recvd = ccsds_distrib
.packet_handler_mut()
.known_packet_queue
.pop_front();
assert!(recvd.is_some());
let recvd = recvd.unwrap();
assert_eq!(recvd.0, 0x002);
assert_eq!(recvd.1, tc_vec);
}
#[test]
fn test_distribution_short_packet_fails() {
let mut ccsds_distrib = CcsdsDistributor::new(BasicApidHandlerOwnedQueue::default());
let mut sph = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let tc_vec = pus_tc.to_vec().unwrap();
let result = ccsds_distrib.pass_tc(&tc_vec[0..6]);
assert!(result.is_err());
let error = result.unwrap_err();
if let CcsdsError::ByteConversionError(ByteConversionError::FromSliceTooSmall {
found,
expected,
}) = error
{
assert_eq!(found, 6);
assert_eq!(expected, 7);
} else {
panic!("Unexpected error variant");
}
}
}

View File

@ -18,7 +18,7 @@ pub mod tm_helper;
#[cfg(feature = "alloc")]
pub use ccsds_distrib::{CcsdsDistributor, CcsdsError, CcsdsPacketHandler};
#[cfg(feature = "alloc")]
pub use pus_distrib::{PusDistributor, PusServiceProvider};
pub use pus_distrib::{PusDistributor, PusServiceDistributor};
/// Generic trait for object which can receive any telecommands in form of a raw bytestream, with
/// no assumptions about the received protocol.

View File

@ -2,7 +2,7 @@
//!
//! The routing components consist of two core components:
//! 1. [PusDistributor] component which dispatches received packets to a user-provided handler.
//! 2. [PusServiceProvider] trait which should be implemented by the user-provided PUS packet
//! 2. [PusServiceDistributor] trait which should be implemented by the user-provided PUS packet
//! handler.
//!
//! The [PusDistributor] implements the [ReceivesEcssPusTc], [ReceivesCcsdsTc] and the
@ -13,25 +13,26 @@
//! the raw bytestream. If this process fails, a [PusDistribError::PusError] is returned to the
//! user.
//! 2. If it was possible to extract both components, the packet will be passed to the
//! [PusServiceProvider::handle_pus_tc_packet] method provided by the user.
//! [PusServiceDistributor::distribute_packet] method provided by the user.
//!
//! # Example
//!
//! ```rust
//! use spacepackets::ecss::WritablePusPacket;
//! use satrs::tmtc::pus_distrib::{PusDistributor, PusServiceProvider};
//! use satrs::tmtc::pus_distrib::{PusDistributor, PusServiceDistributor};
//! use satrs::tmtc::{ReceivesTc, ReceivesTcCore};
//! use spacepackets::SpHeader;
//! use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
//!
//! struct ConcretePusHandler {
//! handler_call_count: u32
//! }
//!
//! // This is a very simple possible service provider. It increments an internal call count field,
//! // which is used to verify the handler was called
//! impl PusServiceProvider for ConcretePusHandler {
//! impl PusServiceDistributor for ConcretePusHandler {
//! type Error = ();
//! fn handle_pus_tc_packet(&mut self, service: u8, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
//! fn distribute_packet(&mut self, service: u8, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
//! assert_eq!(service, 17);
//! assert_eq!(pus_tc.len_packed(), 13);
//! self.handler_call_count += 1;
@ -42,7 +43,7 @@
//! let service_handler = ConcretePusHandler {
//! handler_call_count: 0
//! };
//! let mut pus_distributor = PusDistributor::new(Box::new(service_handler));
//! let mut pus_distributor = PusDistributor::new(service_handler);
//!
//! // Create and pass PUS ping telecommand with a valid APID
//! let mut space_packet_header = SpHeader::tc_unseg(0x002, 0x34, 0).unwrap();
@ -57,50 +58,42 @@
//!
//! // User helper function to retrieve concrete class. We check the call count here to verify
//! // that the PUS ping telecommand was routed successfully.
//! let concrete_handler_ref: &ConcretePusHandler = pus_distributor
//! .service_provider_ref()
//! .expect("Casting back to concrete type failed");
//! assert_eq!(concrete_handler_ref.handler_call_count, 1);
//! let concrete_handler = pus_distributor.service_distributor();
//! assert_eq!(concrete_handler.handler_call_count, 1);
//! ```
use crate::pus::ReceivesEcssPusTc;
use crate::tmtc::{ReceivesCcsdsTc, ReceivesTcCore};
use alloc::boxed::Box;
use core::fmt::{Display, Formatter};
use downcast_rs::Downcast;
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::{PusError, PusPacket};
use spacepackets::SpHeader;
#[cfg(feature = "std")]
use std::error::Error;
pub trait PusServiceProvider: Downcast {
/// Trait for a generic distributor object which can distribute PUS packets based on packet
/// properties like the PUS service, space packet header or any other content of the PUS packet.
pub trait PusServiceDistributor {
type Error;
fn handle_pus_tc_packet(
fn distribute_packet(
&mut self,
service: u8,
header: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error>;
}
downcast_rs::impl_downcast!(PusServiceProvider assoc Error);
pub trait SendablePusServiceProvider: PusServiceProvider + Send {}
impl<T: Send + PusServiceProvider> SendablePusServiceProvider for T {}
downcast_rs::impl_downcast!(SendablePusServiceProvider assoc Error);
/// Generic distributor object which dispatches received packets to a user provided handler.
///
/// This distributor expects the passed trait object to be [Send]able to allow more ergonomic
/// usage with threads.
pub struct PusDistributor<E> {
pub service_provider: Box<dyn SendablePusServiceProvider<Error = E>>,
pub struct PusDistributor<ServiceDistributor: PusServiceDistributor<Error = E>, E> {
service_distributor: ServiceDistributor,
}
impl<E> PusDistributor<E> {
pub fn new(service_provider: Box<dyn SendablePusServiceProvider<Error = E>>) -> Self {
PusDistributor { service_provider }
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E>
PusDistributor<ServiceDistributor, E>
{
pub fn new(service_provider: ServiceDistributor) -> Self {
PusDistributor {
service_distributor: service_provider,
}
}
}
@ -113,8 +106,8 @@ pub enum PusDistribError<E> {
impl<E: Display> Display for PusDistribError<E> {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
PusDistribError::CustomError(e) => write!(f, "{e}"),
PusDistribError::PusError(e) => write!(f, "{e}"),
PusDistribError::CustomError(e) => write!(f, "pus distribution error: {e}"),
PusDistribError::PusError(e) => write!(f, "pus distribution error: {e}"),
}
}
}
@ -129,7 +122,9 @@ impl<E: Error> Error for PusDistribError<E> {
}
}
impl<E: 'static> ReceivesTcCore for PusDistributor<E> {
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E: 'static> ReceivesTcCore
for PusDistributor<ServiceDistributor, E>
{
type Error = PusDistribError<E>;
fn pass_tc(&mut self, tm_raw: &[u8]) -> Result<(), Self::Error> {
// Convert to ccsds and call pass_ccsds
@ -139,7 +134,9 @@ impl<E: 'static> ReceivesTcCore for PusDistributor<E> {
}
}
impl<E: 'static> ReceivesCcsdsTc for PusDistributor<E> {
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E: 'static> ReceivesCcsdsTc
for PusDistributor<ServiceDistributor, E>
{
type Error = PusDistribError<E>;
fn pass_ccsds(&mut self, header: &SpHeader, tm_raw: &[u8]) -> Result<(), Self::Error> {
let (tc, _) = PusTcReader::new(tm_raw).map_err(|e| PusDistribError::PusError(e))?;
@ -147,34 +144,39 @@ impl<E: 'static> ReceivesCcsdsTc for PusDistributor<E> {
}
}
impl<E: 'static> ReceivesEcssPusTc for PusDistributor<E> {
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E: 'static> ReceivesEcssPusTc
for PusDistributor<ServiceDistributor, E>
{
type Error = PusDistribError<E>;
fn pass_pus_tc(&mut self, header: &SpHeader, pus_tc: &PusTcReader) -> Result<(), Self::Error> {
self.service_provider
.handle_pus_tc_packet(pus_tc.service(), header, pus_tc)
self.service_distributor
.distribute_packet(pus_tc.service(), header, pus_tc)
.map_err(|e| PusDistribError::CustomError(e))
}
}
impl<E: 'static> PusDistributor<E> {
pub fn service_provider_ref<T: SendablePusServiceProvider<Error = E>>(&self) -> Option<&T> {
self.service_provider.downcast_ref::<T>()
impl<ServiceDistributor: PusServiceDistributor<Error = E>, E: 'static>
PusDistributor<ServiceDistributor, E>
{
pub fn service_distributor(&self) -> &ServiceDistributor {
&self.service_distributor
}
pub fn service_provider_mut<T: SendablePusServiceProvider<Error = E>>(
&mut self,
) -> Option<&mut T> {
self.service_provider.downcast_mut::<T>()
pub fn service_distributor_mut(&mut self) -> &mut ServiceDistributor {
&mut self.service_distributor
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::queue::GenericSendError;
use crate::tmtc::ccsds_distrib::tests::{
generate_ping_tc, BasicApidHandlerOwnedQueue, BasicApidHandlerSharedQueue,
generate_ping_tc, generate_ping_tc_as_vec, BasicApidHandlerOwnedQueue,
BasicApidHandlerSharedQueue,
};
use crate::tmtc::ccsds_distrib::{CcsdsDistributor, CcsdsPacketHandler};
use alloc::format;
use alloc::vec::Vec;
use spacepackets::ecss::PusError;
use spacepackets::CcsdsPacket;
@ -185,54 +187,65 @@ mod tests {
fn is_send<T: Send>(_: &T) {}
struct PusHandlerSharedQueue {
pub pus_queue: Arc<Mutex<VecDeque<(u8, u16, Vec<u8>)>>>,
pub struct PacketInfo {
pub service: u8,
pub apid: u16,
pub packet: Vec<u8>,
}
struct PusHandlerSharedQueue(Arc<Mutex<VecDeque<PacketInfo>>>);
#[derive(Default)]
struct PusHandlerOwnedQueue {
pub pus_queue: VecDeque<(u8, u16, Vec<u8>)>,
}
struct PusHandlerOwnedQueue(VecDeque<PacketInfo>);
impl PusServiceProvider for PusHandlerSharedQueue {
impl PusServiceDistributor for PusHandlerSharedQueue {
type Error = PusError;
fn handle_pus_tc_packet(
fn distribute_packet(
&mut self,
service: u8,
sp_header: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error> {
let mut vec: Vec<u8> = Vec::new();
vec.extend_from_slice(pus_tc.raw_data());
Ok(self
.pus_queue
let mut packet: Vec<u8> = Vec::new();
packet.extend_from_slice(pus_tc.raw_data());
self.0
.lock()
.expect("Mutex lock failed")
.push_back((service, sp_header.apid(), vec)))
.push_back(PacketInfo {
service,
apid: sp_header.apid(),
packet,
});
Ok(())
}
}
impl PusServiceProvider for PusHandlerOwnedQueue {
impl PusServiceDistributor for PusHandlerOwnedQueue {
type Error = PusError;
fn handle_pus_tc_packet(
fn distribute_packet(
&mut self,
service: u8,
sp_header: &SpHeader,
pus_tc: &PusTcReader,
) -> Result<(), Self::Error> {
let mut vec: Vec<u8> = Vec::new();
vec.extend_from_slice(pus_tc.raw_data());
Ok(self.pus_queue.push_back((service, sp_header.apid(), vec)))
let mut packet: Vec<u8> = Vec::new();
packet.extend_from_slice(pus_tc.raw_data());
self.0.push_back(PacketInfo {
service,
apid: sp_header.apid(),
packet,
});
Ok(())
}
}
struct ApidHandlerShared {
pub pus_distrib: PusDistributor<PusError>,
pub pus_distrib: PusDistributor<PusHandlerSharedQueue, PusError>,
pub handler_base: BasicApidHandlerSharedQueue,
}
struct ApidHandlerOwned {
pub pus_distrib: PusDistributor<PusError>,
pub pus_distrib: PusDistributor<PusHandlerOwnedQueue, PusError>,
handler_base: BasicApidHandlerOwnedQueue,
}
@ -285,28 +298,36 @@ mod tests {
}
#[test]
#[cfg(feature = "std")]
fn test_pus_distribution() {
fn test_pus_distribution_as_raw_packet() {
let mut pus_distrib = PusDistributor::new(PusHandlerOwnedQueue::default());
let tc = generate_ping_tc_as_vec();
let result = pus_distrib.pass_tc(&tc);
assert!(result.is_ok());
assert_eq!(pus_distrib.service_distributor_mut().0.len(), 1);
let packet_info = pus_distrib.service_distributor_mut().0.pop_front().unwrap();
assert_eq!(packet_info.service, 17);
assert_eq!(packet_info.apid, 0x002);
assert_eq!(packet_info.packet, tc);
}
#[test]
fn test_pus_distribution_combined_handler() {
let known_packet_queue = Arc::new(Mutex::default());
let unknown_packet_queue = Arc::new(Mutex::default());
let pus_queue = Arc::new(Mutex::default());
let pus_handler = PusHandlerSharedQueue {
pus_queue: pus_queue.clone(),
};
let pus_handler = PusHandlerSharedQueue(pus_queue.clone());
let handler_base = BasicApidHandlerSharedQueue {
known_packet_queue: known_packet_queue.clone(),
unknown_packet_queue: unknown_packet_queue.clone(),
};
let pus_distrib = PusDistributor {
service_provider: Box::new(pus_handler),
};
let pus_distrib = PusDistributor::new(pus_handler);
is_send(&pus_distrib);
let apid_handler = ApidHandlerShared {
pus_distrib,
handler_base,
};
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
let mut ccsds_distrib = CcsdsDistributor::new(apid_handler);
let mut test_buf: [u8; 32] = [0; 32];
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
@ -322,25 +343,23 @@ mod tests {
assert_eq!(packet.as_slice(), tc_slice);
let recvd_pus = pus_queue.lock().unwrap().pop_front();
assert!(recvd_pus.is_some());
let (service, apid, tc_raw) = recvd_pus.unwrap();
assert_eq!(service, 17);
assert_eq!(apid, 0x002);
assert_eq!(tc_raw, tc_slice);
let packet_info = recvd_pus.unwrap();
assert_eq!(packet_info.service, 17);
assert_eq!(packet_info.apid, 0x002);
assert_eq!(packet_info.packet, tc_slice);
}
#[test]
fn test_as_any_cast() {
fn test_accessing_combined_distributor() {
let pus_handler = PusHandlerOwnedQueue::default();
let handler_base = BasicApidHandlerOwnedQueue::default();
let pus_distrib = PusDistributor {
service_provider: Box::new(pus_handler),
};
let pus_distrib = PusDistributor::new(pus_handler);
let apid_handler = ApidHandlerOwned {
pus_distrib,
handler_base,
};
let mut ccsds_distrib = CcsdsDistributor::new(Box::new(apid_handler));
let mut ccsds_distrib = CcsdsDistributor::new(apid_handler);
let mut test_buf: [u8; 32] = [0; 32];
let tc_slice = generate_ping_tc(test_buf.as_mut_slice());
@ -349,21 +368,38 @@ mod tests {
.pass_tc(tc_slice)
.expect("Passing TC slice failed");
let apid_handler_casted_back: &mut ApidHandlerOwned = ccsds_distrib
.apid_handler_mut()
.expect("Cast to concrete type ApidHandler failed");
let apid_handler_casted_back = ccsds_distrib.packet_handler_mut();
assert!(!apid_handler_casted_back
.handler_base
.known_packet_queue
.is_empty());
let handler_casted_back: &mut PusHandlerOwnedQueue = apid_handler_casted_back
let handler_owned_queue = apid_handler_casted_back
.pus_distrib
.service_provider_mut()
.expect("Cast to concrete type PusHandlerOwnedQueue failed");
assert!(!handler_casted_back.pus_queue.is_empty());
let (service, apid, packet_raw) = handler_casted_back.pus_queue.pop_front().unwrap();
assert_eq!(service, 17);
assert_eq!(apid, 0x002);
assert_eq!(packet_raw.as_slice(), tc_slice);
.service_distributor_mut();
assert!(!handler_owned_queue.0.is_empty());
let packet_info = handler_owned_queue.0.pop_front().unwrap();
assert_eq!(packet_info.service, 17);
assert_eq!(packet_info.apid, 0x002);
assert_eq!(packet_info.packet, tc_slice);
}
#[test]
fn test_pus_distrib_error_custom_error() {
let error = PusDistribError::CustomError(GenericSendError::RxDisconnected);
let error_string = format!("{}", error);
assert_eq!(
error_string,
"pus distribution error: rx side has disconnected"
);
}
#[test]
fn test_pus_distrib_error_pus_error() {
let error = PusDistribError::<GenericSendError>::PusError(PusError::CrcCalculationMissing);
let error_string = format!("{}", error);
assert_eq!(
error_string,
"pus distribution error: crc16 was not calculated"
);
}
}

View File

@ -1,5 +1,5 @@
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::time::cds::TimeProvider;
use spacepackets::time::cds::CdsTime;
use spacepackets::time::TimeWriter;
use spacepackets::SpHeader;
@ -66,7 +66,7 @@ impl PusTmWithCdsShortHelper {
source_data: &'a [u8],
seq_count: u16,
) -> PusTmCreator {
let time_stamp = TimeProvider::from_now_with_u16_days().unwrap();
let time_stamp = CdsTime::now_with_u16_days().unwrap();
time_stamp.write_to_bytes(&mut self.cds_short_buf).unwrap();
self.create_pus_tm_common(service, subservice, source_data, seq_count)
}
@ -76,7 +76,7 @@ impl PusTmWithCdsShortHelper {
service: u8,
subservice: u8,
source_data: &'a [u8],
stamper: &TimeProvider,
stamper: &CdsTime,
seq_count: u16,
) -> PusTmCreator {
stamper.write_to_bytes(&mut self.cds_short_buf).unwrap();
@ -98,14 +98,14 @@ impl PusTmWithCdsShortHelper {
#[cfg(test)]
mod tests {
use spacepackets::{ecss::PusPacket, time::cds::TimeProvider, CcsdsPacket};
use spacepackets::{ecss::PusPacket, time::cds::CdsTime, CcsdsPacket};
use super::PusTmWithCdsShortHelper;
#[test]
fn test_helper_with_stamper() {
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(0x123);
let stamper = TimeProvider::new_with_u16_days(0, 0);
let stamper = CdsTime::new_with_u16_days(0, 0);
let tm = pus_tm_helper.create_pus_tm_with_stamper(17, 1, &[1, 2, 3, 4], &stamper, 25);
assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 1);

View File

@ -2,7 +2,7 @@
use core::mem::size_of;
use serde::{Deserialize, Serialize};
use spacepackets::ecss::{PfcReal, PfcUnsigned, Ptc};
use spacepackets::time::cds::TimeProvider;
use spacepackets::time::cds::CdsTime;
use spacepackets::time::{CcsdsTimeProvider, TimeWriter};
enum NumOfParamsInfo {
@ -36,7 +36,7 @@ struct TestMgmHkWithIndividualValidity {
#[derive(Serialize, Deserialize)]
struct TestMgmHkWithGroupValidity {
last_valid_stamp: TimeProvider,
last_valid_stamp: CdsTime,
valid: bool,
temp: f32,
mgm_vals: [u16; 3],
@ -150,7 +150,7 @@ pub fn main() {
// The easiest and probably best approach, trading off big advantages for TM downlink capacity:
// Use a JSON format
let mgm_hk_group_validity = TestMgmHkWithGroupValidity {
last_valid_stamp: TimeProvider::from_now_with_u16_days().unwrap(),
last_valid_stamp: CdsTime::now_with_u16_days().unwrap(),
valid: false,
temp: 20.0,
mgm_vals: [0x1f1f, 0x2f2f, 0x3f3f],

View File

@ -1,14 +1,14 @@
use satrs::event_man::{
EventManagerWithMpscQueue, MpscEventU32Receiver, MpscEventU32SendProvider, SendEventProvider,
EventManagerWithMpsc, EventSendProvider, EventU32SenderMpsc, MpscEventU32Receiver,
};
use satrs::events::{EventU32, EventU32TypedSev, Severity, SeverityInfo};
use satrs::params::U32Pair;
use satrs::params::{Params, ParamsHeapless, WritableToBeBytes};
use satrs::pus::event_man::{DefaultPusMgmtBackendProvider, EventReporter, PusEventDispatcher};
use satrs::pus::MpscTmAsVecSender;
use satrs::pus::event_man::{DefaultPusEventMgmtBackend, EventReporter, PusEventDispatcher};
use satrs::pus::TmAsVecSenderWithMpsc;
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::{PusError, PusPacket};
use std::sync::mpsc::{channel, SendError, TryRecvError};
use std::sync::mpsc::{self, SendError, TryRecvError};
use std::thread;
const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
@ -24,21 +24,21 @@ pub enum CustomTmSenderError {
#[test]
fn test_threaded_usage() {
let (event_sender, event_man_receiver) = channel();
let (event_sender, event_man_receiver) = mpsc::channel();
let event_receiver = MpscEventU32Receiver::new(event_man_receiver);
let mut event_man = EventManagerWithMpscQueue::new(Box::new(event_receiver));
let mut event_man = EventManagerWithMpsc::new(event_receiver);
let (pus_event_man_tx, pus_event_man_rx) = channel();
let pus_event_man_send_provider = MpscEventU32SendProvider::new(1, pus_event_man_tx);
event_man.subscribe_all(pus_event_man_send_provider.id());
let (pus_event_man_tx, pus_event_man_rx) = mpsc::channel();
let pus_event_man_send_provider = EventU32SenderMpsc::new(1, pus_event_man_tx);
event_man.subscribe_all(pus_event_man_send_provider.channel_id());
event_man.add_sender(pus_event_man_send_provider);
let (event_tx, event_rx) = channel();
let (event_tx, event_rx) = mpsc::channel();
let reporter = EventReporter::new(0x02, 128).expect("Creating event reporter failed");
let backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
let mut pus_event_man = PusEventDispatcher::new(reporter, Box::new(backend));
let mut pus_event_man =
PusEventDispatcher::new(reporter, DefaultPusEventMgmtBackend::default());
// PUS + Generic event manager thread
let jh0 = thread::spawn(move || {
let mut sender = MpscTmAsVecSender::new(0, "event_sender", event_tx);
let mut sender = TmAsVecSenderWithMpsc::new(0, "event_sender", event_tx);
let mut event_cnt = 0;
let mut params_array: [u8; 128] = [0; 128];
loop {
@ -71,6 +71,7 @@ fn test_threaded_usage() {
Params::Vec(vec) => gen_event(Some(vec.as_slice())),
Params::String(str) => gen_event(Some(str.as_bytes())),
Params::Store(_) => gen_event(None),
_ => panic!("unsupported parameter type"),
}
} else {
gen_event(None)
@ -120,10 +121,7 @@ fn test_threaded_usage() {
}
}
event_sender
.send((
LOW_SEV_EVENT.into(),
Some(Params::Heapless((2_u32, 3_u32).into())),
))
.send((LOW_SEV_EVENT, Some(Params::Heapless((2_u32, 3_u32).into()))))
.expect("Sending low severity event failed");
loop {
match event_rx.try_recv() {

View File

@ -6,7 +6,7 @@ pub mod crossbeam_test {
FailParams, RequestId, VerificationReporterCfg, VerificationReporterWithSender,
VerificationReportingProvider,
};
use satrs::pus::CrossbeamTmInStoreSender;
use satrs::pus::TmInSharedPoolSenderWithCrossbeam;
use satrs::tmtc::tm_helper::SharedTmPool;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
use spacepackets::ecss::tm::PusTmReader;
@ -40,10 +40,13 @@ pub mod crossbeam_test {
let shared_tc_pool_0 = Arc::new(RwLock::new(StaticMemoryPool::new(pool_cfg)));
let shared_tc_pool_1 = shared_tc_pool_0.clone();
let (tx, rx) = crossbeam_channel::bounded(10);
let sender =
CrossbeamTmInStoreSender::new(0, "verif_sender", shared_tm_pool.clone(), tx.clone());
let mut reporter_with_sender_0 =
VerificationReporterWithSender::new(&cfg, Box::new(sender));
let sender = TmInSharedPoolSenderWithCrossbeam::new(
0,
"verif_sender",
shared_tm_pool.clone(),
tx.clone(),
);
let mut reporter_with_sender_0 = VerificationReporterWithSender::new(&cfg, sender);
let mut reporter_with_sender_1 = reporter_with_sender_0.clone();
// For test purposes, we retrieve the request ID from the TCs and pass them to the receiver
// tread.