38 Commits

Author SHA1 Message Date
ce7eb8650f changelog
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-04-04 15:20:07 +02:00
df2733a176 Major refactoring and update of PUS module 2024-04-04 15:18:53 +02:00
344fe6a4c0 Merge pull request 'Simplify low level PUS API' (#145) from simplify-low-level-pus-api into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #145
2024-03-29 23:41:14 +01:00
a5941751d7 Simplify low-level PUS API for verification and events
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-29 16:22:40 +01:00
977e29894b Merge pull request 'STM32 defmt + RTT support' (#144) from stm32-defmt-support into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #144
2024-03-29 12:34:02 +01:00
dd1417a368 Upgrade example to use defmt/RTT/probe-rs
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-28 23:47:07 +01:00
3195cf5111 update config.toml template file
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-28 23:06:16 +01:00
8280c70682 Merge pull request 'Framework to Library' (#143) from framework-to-library into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #143
2024-03-27 14:33:02 +01:00
19c5aa9b83 update rust book as well
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-27 14:17:59 +01:00
713b4e097b update the README 2024-03-27 14:14:45 +01:00
746b31ec5d Merge pull request 'satrs-example RTIC v2' (#142) from satrs-example-rtic-v2 into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #142
2024-03-25 16:09:06 +01:00
2318cd4293 Update satrs-example for the STM32F3
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Update RTIC to v2
- Update Python client version
2024-03-25 14:26:07 +01:00
a6b57d3eb9 Merge pull request 'Update STM32F3 example' (#141) from update-stm32f3-example into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #141
2024-03-22 15:19:58 +01:00
bddd3132d4 added some more instructions for Python
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-22 13:18:20 +01:00
6a6ffba754 why have two files
Some checks are pending
Rust/sat-rs/pipeline/head Build started...
2024-03-22 13:09:27 +01:00
d27a41e4de Start updating the STM32F3 Discovery example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-22 13:08:01 +01:00
128df9a813 Merge pull request 'First version of asynchronix based mini simulator' (#139) from init-minisim into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #139
2024-03-11 10:41:24 +01:00
7387be3bc3 new request/reponse API
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-11 10:26:48 +01:00
d3fb504545 clean up manifest file
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-03-09 15:14:15 +01:00
ae8e39f626 First version of asynchronix based mini simulator
Some checks are pending
Rust/sat-rs/pipeline/pr-main Build queued...
- Basic simulator with 3 devices
- Can be driven via a UDP interface
- Design allows to drive the simulation via different interface in the future
  by using Request/Reply messaging.
2024-03-09 15:11:11 +01:00
ab3d907d4e Merge pull request 'Refactor TMTC distribution modules' (#138) from ccsds-distrib-refactoring into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #138
2024-03-04 16:53:23 +01:00
3de5954898 Refactor TMTC distribution modules
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-03-04 16:26:34 +01:00
5600aa576c Merge pull request 'use generics for the PUS stack' (#134) from pus-stack-use-generics into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #134
2024-02-26 15:46:47 +01:00
88793cfa87 add some helper types
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-26 15:34:20 +01:00
223b637eb8 use generics for the PUS stack
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-26 15:18:15 +01:00
cf9b115e1e Merge pull request 'Refactored Verification Reporter Module' (#132) from refactor-verification-mod into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #132
2024-02-26 11:58:57 +01:00
eea9b11b39 refactored verification reporter
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
- Use generics instead of trait objects where applicable.
2024-02-26 11:41:42 +01:00
f21ab0017e Merge pull request 'fixed for scheduler' (#133) from scheduler-fixes into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #133
2024-02-26 11:15:50 +01:00
a7ca00317f cargo fmt
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-26 11:00:48 +01:00
75fda42f4f fixed for scheduler
Some checks failed
Rust/sat-rs/pipeline/head There was a failure building this commit
2024-02-26 10:53:33 +01:00
faf0f6f6c6 Merge pull request 'refactored event manager' (#131) from refactor-event-man into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #131
2024-02-23 14:31:48 +01:00
a690c7720d Refactored event manager
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-23 14:19:30 +01:00
b48b5b8caa Merge pull request 'bump example patch version' (#129) from prepare-example-release into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #129
2024-02-21 13:51:49 +01:00
238c3a8d43 bump example patch version
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-21 11:34:35 +01:00
de8c0bc13e Merge pull request 'Use released package versions again' (#128) from bump-example into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #128
2024-02-21 11:34:07 +01:00
012eb82f42 bump example
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
2024-02-21 11:22:16 +01:00
d26f6cbe27 Merge pull request 'sat-rs v0.2.0-rc.0' (#127) from prepare-satrs-release-candidate into main
All checks were successful
Rust/sat-rs/pipeline/head This commit looks good
Reviewed-on: #127
2024-02-21 11:13:05 +01:00
82d3215761 changelog
All checks were successful
Rust/sat-rs/pipeline/pr-main This commit looks good
2024-02-21 11:08:22 +01:00
119 changed files with 52077 additions and 6433 deletions

6
.gitignore vendored
View File

@ -1,5 +1,9 @@
/target target/
output.log
/Cargo.lock /Cargo.lock
output.log
/.idea/* /.idea/*
!/.idea/runConfigurations !/.idea/runConfigurations

View File

@ -4,6 +4,7 @@ members = [
"satrs", "satrs",
"satrs-mib", "satrs-mib",
"satrs-example", "satrs-example",
"satrs-minisim",
"satrs-shared", "satrs-shared",
] ]

View File

@ -1,4 +1,4 @@
<p align="center"> <img src="misc/satrs-logo.png" width="40%"> </p> <p align="center"> <img src="misc/satrs-logo-v2.png" width="40%"> </p>
[![sat-rs website](https://img.shields.io/badge/sat--rs-website-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/) [![sat-rs website](https://img.shields.io/badge/sat--rs-website-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
[![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/) [![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/)
@ -8,17 +8,27 @@
sat-rs sat-rs
========= =========
This is the repository of the sat-rs framework. Its primary goal is to provide re-usable components This is the repository of the sat-rs library. Its primary goal is to provide re-usable components
to write on-board software for remote systems like rovers or satellites. It is specifically written to write on-board software for remote systems like rovers or satellites. It is specifically written
for the special requirements for these systems. You can find an overview of the project and the for the special requirements for these systems. You can find an overview of the project and the
link to the [more high-level sat-rs book](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/) link to the [more high-level sat-rs book](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
at the [IRS software projects website](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/). at the [IRS software projects website](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/).
This is early-stage software. Important features are missing. New releases
with breaking changes are released regularly, with all changes documented inside respective
changelog files. You should only use this library if your are willing to work in this
environment.
A lot of the architecture and general design considerations are based on the A lot of the architecture and general design considerations are based on the
[FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage [FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage
through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/) through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/). and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/).
This framework is in the early stages of development. Important features are missing. New releases
with breaking changes are released regularly, with all changes documented inside respective
changelog files. You should only use this framework if your are willing to work in this
environment.
# Overview # Overview
This project currently contains following crates: This project currently contains following crates:
@ -35,7 +45,7 @@ This project currently contains following crates:
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib): * [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib):
Components to build a mission information base from the on-board software directly. Components to build a mission information base from the on-board software directly.
* [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example-stm32f3-disco): * [`satrs-example-stm32f3-disco`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example-stm32f3-disco):
Example of a simple example on-board software using sat-rs components on a bare-metal system Example of a simple example using low-level sat-rs components on a bare-metal system
with constrained resources. with constrained resources.
Each project has its own `CHANGELOG.md`. Each project has its own `CHANGELOG.md`.

View File

@ -18,15 +18,19 @@ def generate_cov_report(open_report: bool, format: str, package: str):
out_path = "./target/debug/coverage" out_path = "./target/debug/coverage"
if format == "lcov": if format == "lcov":
out_path = "./target/debug/lcov.info" out_path = "./target/debug/lcov.info"
os.system( grcov_cmd = (
f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing " f"grcov . -s . --binary-path ./target/debug/ -t {format} --branch --ignore-not-existing "
f"-o {out_path}" f"-o {out_path}"
) )
print(f"Running: {grcov_cmd}")
os.system(grcov_cmd)
if format == "lcov": if format == "lcov":
os.system( lcov_cmd = (
"genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source " "genhtml -o ./target/debug/coverage/ --show-details --highlight --ignore-errors source "
"--legend ./target/debug/lcov.info" "--legend ./target/debug/lcov.info"
) )
print(f"Running: {lcov_cmd}")
os.system(lcov_cmd)
if open_report: if open_report:
coverage_report_path = os.path.abspath("./target/debug/coverage/index.html") coverage_report_path = os.path.abspath("./target/debug/coverage/index.html")
webbrowser.open_new_tab(coverage_report_path) webbrowser.open_new_tab(coverage_report_path)
@ -43,7 +47,7 @@ def main():
parser.add_argument( parser.add_argument(
"-p", "-p",
"--package", "--package",
choices=["satrs"], choices=["satrs", "satrs-minisim", "satrs-example"],
default="satrs", default="satrs",
help="Choose project to generate coverage for", help="Choose project to generate coverage for",
) )

BIN
misc/satrs-logo-v2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

View File

@ -15,7 +15,7 @@ action commanding could look like.
2. Target ID and Action String based. The target ID is the same as in the first proposal, but 2. Target ID and Action String based. The target ID is the same as in the first proposal, but
the unique action is identified by a string. the unique action is identified by a string.
The framework provides an `ActionRequest` abstraction to model both of these cases. The library provides an `ActionRequest` abstraction to model both of these cases.
## Commanding with ECSS PUS 8 ## Commanding with ECSS PUS 8

View File

@ -20,7 +20,7 @@ components.
1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/host/udp_server/index.html). 1. [UDP TMTC Server](https://docs.rs/satrs/latest/satrs/hal/host/udp_server/index.html).
UDP is already packet based which makes it an excellent fit for exchanging space packets. UDP is already packet based which makes it an excellent fit for exchanging space packets.
2. [TCP TMTC Server Components](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/index.html). 2. [TCP TMTC Server Components](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/index.html).
TCP is a stream based protocol, so the framework provides building blocks to parse telemetry TCP is a stream based protocol, so the library provides building blocks to parse telemetry
from an arbitrary bytestream. Two concrete implementations are provided: from an arbitrary bytestream. Two concrete implementations are provided:
- [TCP spacepackets server](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/struct.TcpSpacepacketsServer.html) - [TCP spacepackets server](https://docs.rs/satrs/latest/satrs/hal/std/tcp_server/struct.TcpSpacepacketsServer.html)
to parse tightly packed CCSDS Spacepackets. to parse tightly packed CCSDS Spacepackets.

View File

@ -1,13 +1,14 @@
# Framework Design # Library Design
Satellites and space systems in general are complex systems with a wide range of requirements for Satellites and space systems in general are complex systems with a wide range of requirements for
both the hardware and the software. Consequently, the general design of the framework is centered both the hardware and the software. Consequently, the general design of the library is centered
around many light-weight components which try to impose as few restrictions as possible on how to around many light-weight components which try to impose as few restrictions as possible on how to
solve certain problems. solve certain problems. This is also the reason why sat-rs is explicitely called a library
instead of a framework.
There are still a lot of common patterns and architectures across these systems where guidance There are still a lot of common patterns and architectures across these systems where guidance
of how to solve a problem and a common structure would still be extremely useful to avoid pitfalls of how to solve a problem and a common structure would still be extremely useful to avoid pitfalls
which were already solved and to avoid boilerplate code. This framework tries to provide this which were already solved and to avoid boilerplate code. This library tries to provide this
structure and guidance the following way: structure and guidance the following way:
1. Providing this book which explains the architecture and design patterns in respect to common 1. Providing this book which explains the architecture and design patterns in respect to common
@ -18,7 +19,7 @@ structure and guidance the following way:
3. Providing a good test suite. This includes both unittests and integration tests. The integration 3. Providing a good test suite. This includes both unittests and integration tests. The integration
tests can also serve as smaller usage examples than the large `satrs-example` application. tests can also serve as smaller usage examples than the large `satrs-example` application.
This framework has special support for standards used in the space industry. This especially This library has special support for standards used in the space industry. This especially
includes standards provided by Consultative Committee for Space Data Systems (CCSDS) and European includes standards provided by Consultative Committee for Space Data Systems (CCSDS) and European
Cooperation for Space Standardization (ECSS). It does not enforce using any of those standards, Cooperation for Space Standardization (ECSS). It does not enforce using any of those standards,
but it is always recommended to use some sort of standard for interoperability. but it is always recommended to use some sort of standard for interoperability.
@ -30,10 +31,10 @@ Flying Laptop Project by the University of Stuttgart with Airbus Defence and Spa
It has flight heritage through the 2 mssions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/) It has flight heritage through the 2 mssions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)
and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/). and [EIVE](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/EIVE/).
Therefore, a lot of the design concepts were ported more or less unchanged to the `sat-rs` Therefore, a lot of the design concepts were ported more or less unchanged to the `sat-rs`
framework. library.
FLP is a medium-size small satellite with a higher budget and longer development time than EIVE, FLP is a medium-size small satellite with a higher budget and longer development time than EIVE,
which allowed to build a highly reliable system while EIVE is a smaller 6U+ cubesat which had a which allowed to build a highly reliable system while EIVE is a smaller 6U+ cubesat which had a
shorter development cycle and was built using cheaper COTS components. This framework also tries shorter development cycle and was built using cheaper COTS components. This library also tries
to accumulate the knowledge of developing the OBSW and operating the satellite for both these to accumulate the knowledge of developing the OBSW and operating the satellite for both these
different systems and provide a solution for a wider range of small satellite systems. different systems and provide a solution for a wider range of small satellite systems.

View File

@ -1,6 +1,6 @@
# sat-rs Example Application # sat-rs Example Application
The `sat-rs` framework includes a monolithic example application which can be found inside The `sat-rs` library includes a monolithic example application which can be found inside
the [`satrs-example`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example) the [`satrs-example`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example)
subdirectory of the repository. The primary purpose of this example application is to show how subdirectory of the repository. The primary purpose of this example application is to show how
the various components of the sat-rs framework could be used as part of a larger on-board the various components of the sat-rs framework could be used as part of a larger on-board

View File

@ -1,7 +1,7 @@
The sat-rs book The sat-rs book
====== ======
This book is the primary information resource for the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/sat-rs) This book is the primary information resource for the [sat-rs library](https://egit.irs.uni-stuttgart.de/rust/sat-rs)
in addition to the regular API documentation. It contains the following resources: in addition to the regular API documentation. It contains the following resources:
1. Architecture informations and consideration which would exceeds the scope of the regular API. 1. Architecture informations and consideration which would exceeds the scope of the regular API.
@ -12,10 +12,15 @@ in addition to the regular API documentation. It contains the following resource
# Introduction # Introduction
The primary goal of the sat-rs framework is to provide re-usable components The primary goal of the sat-rs library is to provide re-usable components
to write on-board software for remote systems like rovers or satellites. It is specifically written to write on-board software for remote systems like rovers or satellites. It is specifically written
for the special requirements for these systems. for the special requirements for these systems.
It should be noted that sat-rs is early-stage software. Important features are missing. New releases
with breaking changes are released regularly, with all changes documented inside respective
changelog files. You should only use this library if your are willing to work in this
environment.
A lot of the architecture and general design considerations are based on the A lot of the architecture and general design considerations are based on the
[FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage [FSFW](https://egit.irs.uni-stuttgart.de/fsfw/fsfw) C++ framework which has flight heritage
through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/) through the 2 missions [FLP](https://www.irs.uni-stuttgart.de/en/research/satellitetechnology-and-instruments/smallsatelliteprogram/flying-laptop/)

View File

@ -0,0 +1,37 @@
[target.'cfg(all(target_arch = "arm", target_os = "none"))']
# uncomment ONE of these three option to make `cargo run` start a GDB session
# which option to pick depends on your system
# You can also replace openocd.gdb by jlink.gdb when using a J-Link.
# runner = "arm-none-eabi-gdb -q -x openocd.gdb"
# runner = "gdb-multiarch -q -x openocd.gdb"
# runner = "gdb -q -x openocd.gdb"
runner = "probe-rs run --chip STM32F303VCTx"
rustflags = [
"-C", "linker=flip-link",
# LLD (shipped with the Rust toolchain) is used as the default linker
"-C", "link-arg=-Tlink.x",
"-C", "link-arg=-Tdefmt.x",
# This is needed if your flash or ram addresses are not aligned to 0x10000 in memory.x
# See https://github.com/rust-embedded/cortex-m-quickstart/pull/95
"-C", "link-arg=--nmagic",
# if you run into problems with LLD switch to the GNU linker by commenting out
# this line
# "-C", "linker=arm-none-eabi-ld",
# if you need to link to pre-compiled C libraries provided by a C toolchain
# use GCC as the linker by commenting out both lines above and then
# uncommenting the three lines below
# "-C", "linker=arm-none-eabi-gcc",
# "-C", "link-arg=-Wl,-Tlink.x",
# "-C", "link-arg=-nostartfiles",
]
[build]
# comment out the following line if you intend to run unit tests on host machine
target = "thumbv7em-none-eabihf" # Cortex-M4F and Cortex-M7F (with FPU)
[env]
DEFMT_LOG = "info"

View File

@ -1,3 +1,4 @@
/target /target
/itm.txt /itm.txt
/.cargo/config* /.cargo/config*
/.vscode

View File

@ -1,66 +0,0 @@
{
/*
* Requires the Rust Language Server (RLS) and Cortex-Debug extensions
* https://marketplace.visualstudio.com/items?itemName=rust-lang.rust
* https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug
*/
"version": "0.2.0",
"configurations": [
{
/* Launches debug session for currently open example */
"type": "cortex-debug",
"request": "launch",
"name": "Debug",
"servertype": "openocd",
"cwd": "${workspaceRoot}",
"preLaunchTask": "cargo build",
"runToEntryPoint": "true",
"executable": "./target/thumbv7em-none-eabihf/debug/satrs-example-stm32f3-disco",
"preLaunchCommands": ["break rust_begin_unwind"],
"device": "STM32F303VCT6",
"configFiles": [
"${workspaceRoot}/.vscode/openocd-helpers.tcl",
"interface/stlink.cfg",
"target/stm32f3x.cfg"
],
"svdFile": "${env:HOME}/.svd/STM32F303.svd",
"swoConfig": {
"enabled": true,
"cpuFrequency": 8000000,
"swoFrequency": 2000000,
"source": "probe",
"decoders": [
{ "type": "console", "label": "ITM", "port": 0 }
]
}
},
{
/* Launches debug session for currently open example */
"type": "cortex-debug",
"request": "launch",
"name": "Release",
"servertype": "openocd",
"cwd": "${workspaceRoot}",
"preLaunchTask": "cargo build",
"runToEntryPoint": "true",
"executable": "./target/thumbv7em-none-eabihf/release/satrs-example-stm32f3-disco",
"preLaunchCommands": ["break rust_begin_unwind"],
"device": "STM32F303VCT6",
"configFiles": [
"${workspaceRoot}/.vscode/openocd-helpers.tcl",
"interface/stlink.cfg",
"target/stm32f3x.cfg"
],
"svdFile": "${env:HOME}/.svd/STM32F303.svd",
"swoConfig": {
"enabled": true,
"cpuFrequency": 8000000,
"swoFrequency": 2000000,
"source": "probe",
"decoders": [
{ "type": "console", "label": "ITM", "port": 0 }
]
}
}
]
}

View File

@ -1,3 +0,0 @@
{
"cortex-debug.gdbPath.linux": "gdb-multiarch"
}

View File

@ -13,9 +13,9 @@ dependencies = [
[[package]] [[package]]
name = "atomic-polyfill" name = "atomic-polyfill"
version = "0.1.11" version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ff7eb3f316534d83a8a2c3d1674ace8a5a71198eba31e2e2b597833f699b28" checksum = "8cf2bce30dfe09ef0bfaef228b9d414faaf7e563035494d7fe092dba54b300f4"
dependencies = [ dependencies = [
"critical-section", "critical-section",
] ]
@ -55,20 +55,21 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]] [[package]]
name = "bxcan" name = "bxcan"
version = "0.6.2" version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b13b4b2ea9ab2ba924063ebb86ad895cb79f4a79bf90f27949eb20c335b30f9" checksum = "40ac3d0c0a542d0ab5521211f873f62706a7136df415676f676d347e5a41dd80"
dependencies = [ dependencies = [
"bitflags", "bitflags",
"nb 1.0.0", "embedded-hal 0.2.7",
"nb 1.1.0",
"vcell", "vcell",
] ]
[[package]] [[package]]
name = "byteorder" name = "byteorder"
version = "1.4.3" version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b"
[[package]] [[package]]
name = "cast" name = "cast"
@ -87,31 +88,42 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]] [[package]]
name = "chrono" name = "chrono"
version = "0.4.23" version = "0.4.35"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "16b0a3d9ed01224b22057780a37bb8c5dbfe1be8ba48678e7bf57ec4b385411f" checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a"
dependencies = [ dependencies = [
"num-integer",
"num-traits", "num-traits",
] ]
[[package]] [[package]]
name = "cortex-m" name = "cobs"
version = "0.7.6" version = "0.2.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "70858629a458fdfd39f9675c4dc309411f2a3f83bede76988d81bf1a0ecee9e0" checksum = "67ba02a97a2bd10f4b59b25c7973101c79642302776489e030cd13cdab09ed15"
[[package]]
name = "cobs"
version = "0.2.3"
source = "git+https://github.com/robamu/cobs.rs.git?branch=all_features#c70a7f30fd00a7cbdb7666dec12b437977385d40"
[[package]]
name = "cortex-m"
version = "0.7.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8ec610d8f49840a5b376c69663b6369e71f4b34484b9b2eb29fb918d92516cb9"
dependencies = [ dependencies = [
"bare-metal 0.2.5", "bare-metal 0.2.5",
"bitfield", "bitfield",
"embedded-hal", "critical-section",
"embedded-hal 0.2.7",
"volatile-register", "volatile-register",
] ]
[[package]] [[package]]
name = "cortex-m-rt" name = "cortex-m-rt"
version = "0.7.2" version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d6d3328b8b5534f0c90acd66b68950f2763b37e0173cac4d8b4937c4a80761f9" checksum = "ee84e813d593101b1723e13ec38b6ab6abbdbaaa4546553f5395ed274079ddb1"
dependencies = [ dependencies = [
"cortex-m-rt-macros", "cortex-m-rt-macros",
] ]
@ -124,48 +136,44 @@ checksum = "f0f6f3e36f203cfedbc78b357fb28730aa2c6dc1ab060ee5c2405e843988d3c7"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn 1.0.109",
] ]
[[package]] [[package]]
name = "cortex-m-rtic" name = "cortex-m-semihosting"
version = "1.1.3" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c6b82f1c39acd6c3a35c2013b6110c20f5bc534522791fabadeed49ccada2dce" checksum = "c23234600452033cc77e4b761e740e02d2c4168e11dbf36ab14a0f58973592b0"
dependencies = [ dependencies = [
"bare-metal 1.0.0",
"cortex-m", "cortex-m",
"cortex-m-rtic-macros",
"heapless",
"rtic-core",
"rtic-monotonic",
"version_check",
] ]
[[package]] [[package]]
name = "cortex-m-rtic-macros" name = "crc"
version = "1.1.5" version = "3.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e8e9645ef54bec1cf70ac33e9bf9566e6507ab5b41ae6baf3735662194e8607" checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe"
dependencies = [ dependencies = [
"proc-macro-error", "crc-catalog",
"proc-macro2",
"quote",
"rtic-syntax",
"syn",
] ]
[[package]]
name = "crc-catalog"
version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
[[package]] [[package]]
name = "critical-section" name = "critical-section"
version = "1.1.1" version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6548a0ad5d2549e111e1f6a11a6c2e2d00ce6a3dafe22948d67c2b443f775e52" checksum = "7059fff8937831a9ae6f0fe4d658ffabf58f2ca96aa9dec1c889f936f705f216"
[[package]] [[package]]
name = "darling" name = "darling"
version = "0.14.2" version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0dd3cd20dc6b5a876612a6e5accfe7f3dd883db6d07acfbf14c128f61550dfa" checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391"
dependencies = [ dependencies = [
"darling_core", "darling_core",
"darling_macro", "darling_macro",
@ -173,26 +181,102 @@ dependencies = [
[[package]] [[package]]
name = "darling_core" name = "darling_core"
version = "0.14.2" version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a784d2ccaf7c98501746bf0be29b2022ba41fd62a2e622af997a03e9f972859f" checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f"
dependencies = [ dependencies = [
"fnv", "fnv",
"ident_case", "ident_case",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn 2.0.53",
] ]
[[package]] [[package]]
name = "darling_macro" name = "darling_macro"
version = "0.14.2" version = "0.20.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7618812407e9402654622dd402b0a89dff9ba93badd6540781526117b92aab7e" checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f"
dependencies = [ dependencies = [
"darling_core", "darling_core",
"quote", "quote",
"syn", "syn 2.0.53",
]
[[package]]
name = "defmt"
version = "0.3.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3939552907426de152b3c2c6f51ed53f98f448babd26f28694c95f5906194595"
dependencies = [
"bitflags",
"defmt-macros",
]
[[package]]
name = "defmt-brtt"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2f0ac3635d0c89d12b8101fcb44a7625f5f030a1c0491124b74467eb5a58a78"
dependencies = [
"critical-section",
"defmt",
]
[[package]]
name = "defmt-macros"
version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "18bdc7a7b92ac413e19e95240e75d3a73a8d8e78aa24a594c22cbb4d44b4bbda"
dependencies = [
"defmt-parser",
"proc-macro-error",
"proc-macro2",
"quote",
"syn 2.0.53",
]
[[package]]
name = "defmt-parser"
version = "0.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff4a5fefe330e8d7f31b16a318f9ce81000d8e35e69b93eae154d16d2278f70f"
dependencies = [
"thiserror",
]
[[package]]
name = "defmt-test"
version = "0.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "290966e8c38f94b11884877242de876280d0eab934900e9642d58868e77c5df1"
dependencies = [
"cortex-m-rt",
"cortex-m-semihosting",
"defmt",
"defmt-test-macros",
]
[[package]]
name = "defmt-test-macros"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "984bc6eca246389726ac2826acc2488ca0fe5fcd6b8d9b48797021951d76a125"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
]
[[package]]
name = "delegate"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ee5df75c70b95bd3aacc8e2fd098797692fb1d54121019c4de481e42f04c8a1"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
] ]
[[package]] [[package]]
@ -214,6 +298,12 @@ dependencies = [
"void", "void",
] ]
[[package]]
name = "embedded-hal"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "361a90feb7004eca4019fb28352a9465666b24f840f5c3cddf0ff13920590b89"
[[package]] [[package]]
name = "embedded-time" name = "embedded-time"
version = "0.12.1" version = "0.12.1"
@ -225,25 +315,31 @@ dependencies = [
[[package]] [[package]]
name = "enumset" name = "enumset"
version = "1.0.12" version = "1.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19be8061a06ab6f3a6cf21106c873578bf01bd42ad15e0311a9c76161cb1c753" checksum = "226c0da7462c13fb57e5cc9e0dc8f0635e7d27f276a3a7fd30054647f669007d"
dependencies = [ dependencies = [
"enumset_derive", "enumset_derive",
] ]
[[package]] [[package]]
name = "enumset_derive" name = "enumset_derive"
version = "0.6.1" version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03e7b551eba279bf0fa88b83a46330168c1560a52a94f5126f892f0b364ab3e0" checksum = "e08b6c6ab82d70f08844964ba10c7babb716de2ecaeab9be5717918a5177d3af"
dependencies = [ dependencies = [
"darling", "darling",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn 2.0.53",
] ]
[[package]]
name = "equivalent"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5"
[[package]] [[package]]
name = "fnv" name = "fnv"
version = "1.0.7" version = "1.0.7"
@ -252,18 +348,42 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]] [[package]]
name = "fugit" name = "fugit"
version = "0.3.6" version = "0.3.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ab17bb279def6720d058cb6c052249938e7f99260ab534879281a95367a87e5" checksum = "17186ad64927d5ac8f02c1e77ccefa08ccd9eaa314d5a4772278aa204a22f7e7"
dependencies = [ dependencies = [
"gcd", "gcd",
] ]
[[package]] [[package]]
name = "gcd" name = "futures-core"
version = "2.2.0" version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4b1b088ad0a967aa29540456b82fc8903f854775d33f71e9709c4efb3dfbfd2" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d"
[[package]]
name = "futures-task"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004"
[[package]]
name = "futures-util"
version = "0.3.30"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48"
dependencies = [
"futures-core",
"futures-task",
"pin-project-lite",
"pin-utils",
]
[[package]]
name = "gcd"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d758ba1b47b00caf47f24925c0074ecb20d6dfcffe7f6d53395c0465674841a"
[[package]] [[package]]
name = "generic-array" name = "generic-array"
@ -276,9 +396,9 @@ dependencies = [
[[package]] [[package]]
name = "generic-array" name = "generic-array"
version = "0.14.6" version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [ dependencies = [
"typenum", "typenum",
"version_check", "version_check",
@ -286,29 +406,26 @@ dependencies = [
[[package]] [[package]]
name = "hash32" name = "hash32"
version = "0.2.1" version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c35f58762feb77d74ebe43bdbc3210f09be9fe6742234d573bacc26ed92b67" checksum = "47d60b12902ba28e2730cd37e95b8c9223af2808df9e902d4df49588d1470606"
dependencies = [ dependencies = [
"byteorder", "byteorder",
] ]
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.12.3" version = "0.14.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604"
[[package]] [[package]]
name = "heapless" name = "heapless"
version = "0.7.16" version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db04bc24a18b9ea980628ecf00e6c0264f3c1426dac36c00cb49b6fbad8b0743" checksum = "0bfb9eb618601c89945a70e254898da93b13be0388091d42117462b265bb3fad"
dependencies = [ dependencies = [
"atomic-polyfill",
"hash32", "hash32",
"rustc_version 0.4.0",
"spin",
"stable_deref_trait", "stable_deref_trait",
] ]
@ -320,41 +437,14 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39"
[[package]] [[package]]
name = "indexmap" name = "indexmap"
version = "1.9.2" version = "2.2.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1885e79c1fc4b10f0e172c475f458b7f7b93061064d98c3293e98c5ba0c8b399" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26"
dependencies = [ dependencies = [
"autocfg", "equivalent",
"hashbrown", "hashbrown",
] ]
[[package]]
name = "itm_logger"
version = "0.1.3-pre.0"
dependencies = [
"cortex-m",
"log",
]
[[package]]
name = "lock_api"
version = "0.4.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df"
dependencies = [
"autocfg",
"scopeguard",
]
[[package]]
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]] [[package]]
name = "lsm303dlhc" name = "lsm303dlhc"
version = "0.2.0" version = "0.2.0"
@ -362,7 +452,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5d1a5c290951321d1b0d4a40edd828537de9889134a0e67c5146542ae57706" checksum = "9e5d1a5c290951321d1b0d4a40edd828537de9889134a0e67c5146542ae57706"
dependencies = [ dependencies = [
"cast", "cast",
"embedded-hal", "embedded-hal 0.2.7",
"generic-array 0.11.2", "generic-array 0.11.2",
] ]
@ -372,7 +462,7 @@ version = "1.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bc4010833aea396656c2f91ee704d51a6f1329ec2ab56ffd00bfd56f7481ea94" checksum = "bc4010833aea396656c2f91ee704d51a6f1329ec2ab56ffd00bfd56f7481ea94"
dependencies = [ dependencies = [
"generic-array 0.14.6", "generic-array 0.14.7",
] ]
[[package]] [[package]]
@ -381,14 +471,14 @@ version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f" checksum = "801d31da0513b6ec5214e9bf433a77966320625a37860f910be265be6e18d06f"
dependencies = [ dependencies = [
"nb 1.0.0", "nb 1.1.0",
] ]
[[package]] [[package]]
name = "nb" name = "nb"
version = "1.0.0" version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "546c37ac5d9e56f55e73b677106873d9d9f5190605e41a856503623648488cae" checksum = "8d5439c4ad607c3c23abf66de8c8bf57ba8adcd1f129e699851a6e43935d339d"
[[package]] [[package]]
name = "num" name = "num"
@ -414,19 +504,18 @@ dependencies = [
[[package]] [[package]]
name = "num-integer" name = "num-integer"
version = "0.1.45" version = "0.1.46"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f"
dependencies = [ dependencies = [
"autocfg",
"num-traits", "num-traits",
] ]
[[package]] [[package]]
name = "num-iter" name = "num-iter"
version = "0.1.43" version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" checksum = "d869c01cc0c455284163fd0092f1f93835385ccab5a98a0dcc497b2f8bf055a9"
dependencies = [ dependencies = [
"autocfg", "autocfg",
"num-integer", "num-integer",
@ -446,27 +535,60 @@ dependencies = [
[[package]] [[package]]
name = "num-traits" name = "num-traits"
version = "0.2.15" version = "0.2.18"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a"
dependencies = [ dependencies = [
"autocfg", "autocfg",
] ]
[[package]] [[package]]
name = "panic-itm" name = "num_enum"
version = "0.4.2" version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d577d97d1b31268087b6dddf2470e6794ef5eee87d9dca7fcd0481695391a4c" checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845"
dependencies = [
"num_enum_derive",
]
[[package]]
name = "num_enum_derive"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
]
[[package]]
name = "panic-probe"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aa6fa5645ef5a760cd340eaa92af9c1ce131c8c09e7f8926d8a24b59d26652b9"
dependencies = [ dependencies = [
"cortex-m", "cortex-m",
"defmt",
] ]
[[package]] [[package]]
name = "paste" name = "paste"
version = "1.0.11" version = "1.0.14"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d01a5bd0424d00070b0098dd17ebca6f961a959dead1dbcbbbc1d1cd8d3deeba" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
[[package]]
name = "pin-utils"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
[[package]] [[package]]
name = "proc-macro-error" name = "proc-macro-error"
@ -477,7 +599,7 @@ dependencies = [
"proc-macro-error-attr", "proc-macro-error-attr",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn 1.0.109",
"version_check", "version_check",
] ]
@ -494,31 +616,54 @@ dependencies = [
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.49" version = "1.0.79"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57a8eca9f9c4ffde41714334dee777596264c7825420f521abc92b5b5deb63a5" checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e"
dependencies = [ dependencies = [
"unicode-ident", "unicode-ident",
] ]
[[package]] [[package]]
name = "quote" name = "quote"
version = "1.0.23" version = "1.0.35"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8856d8364d252a14d474036ea1358d63c9e6965c8e5c1885c18f73d70bff9c7b" checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
] ]
[[package]] [[package]]
name = "rtcc" name = "rtcc"
version = "0.3.0" version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3623619ce77c09a7d87cf7c61c5c887b9c7dee8805f66af6c4aa5824be4d9930" checksum = "f4fbd0d5bed2b76e27a7ef872568b34072c1af94c277cd52c17a89d54673b3fe"
dependencies = [ dependencies = [
"chrono", "chrono",
] ]
[[package]]
name = "rtic"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c443db16326376bdd64377da268f6616d5f804aba8ce799bac7d1f7f244e9d51"
dependencies = [
"atomic-polyfill",
"bare-metal 1.0.0",
"cortex-m",
"critical-section",
"rtic-core",
"rtic-macros",
]
[[package]]
name = "rtic-common"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0786b50b81ef9d2a944a000f60405bb28bf30cd45da2d182f3fe636b2321f35c"
dependencies = [
"critical-section",
]
[[package]] [[package]]
name = "rtic-core" name = "rtic-core"
version = "1.0.0" version = "1.0.0"
@ -526,21 +671,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d9369355b04d06a3780ec0f51ea2d225624db777acbc60abd8ca4832da5c1a42" checksum = "d9369355b04d06a3780ec0f51ea2d225624db777acbc60abd8ca4832da5c1a42"
[[package]] [[package]]
name = "rtic-monotonic" name = "rtic-macros"
version = "1.0.0" version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fb8b0b822d1a366470b9cea83a1d4e788392db763539dc4ba022bcc787fece82" checksum = "54053598ea24b1b74937724e366558412a1777eb2680b91ef646db540982789a"
[[package]]
name = "rtic-syntax"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3ad3ae243dd8d0a1b064615f664d4fa7e63929939074c564cbe5efdc4c503065"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"proc-macro-error",
"proc-macro2", "proc-macro2",
"quote", "quote",
"syn", "syn 2.0.53",
]
[[package]]
name = "rtic-monotonics"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "058c2397dbd5bb4c5650a0e368c3920953e458805ff5097a0511b8147b3619d7"
dependencies = [
"atomic-polyfill",
"cfg-if",
"cortex-m",
"embedded-hal 1.0.0",
"fugit",
"rtic-time",
]
[[package]]
name = "rtic-time"
version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75b232e7aebc045cfea81cdd164bc2727a10aca9a4568d406d0a5661cdfd0f19"
dependencies = [
"critical-section",
"futures-util",
"rtic-common",
] ]
[[package]] [[package]]
@ -558,31 +723,56 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366"
dependencies = [ dependencies = [
"semver 1.0.16", "semver 1.0.22",
] ]
[[package]] [[package]]
name = "sat-rs-example-stm32f-disco" name = "satrs"
version = "0.2.0-rc.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8cb19cba46a45047ff0879ebfbf9d6ae1c5b2e0e38b2e08760b10a441d4dae6"
dependencies = [
"cobs 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
"crc",
"delegate",
"num-traits",
"num_enum",
"paste",
"satrs-shared",
"smallvec",
"spacepackets",
]
[[package]]
name = "satrs-example-stm32f3-disco"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"cobs 0.2.3 (git+https://github.com/robamu/cobs.rs.git?branch=all_features)",
"cortex-m", "cortex-m",
"cortex-m-rt", "cortex-m-rt",
"cortex-m-rtic", "cortex-m-semihosting",
"embedded-hal", "defmt",
"defmt-brtt",
"defmt-test",
"embedded-hal 0.2.7",
"enumset", "enumset",
"heapless", "heapless",
"itm_logger", "panic-probe",
"panic-itm", "rtic",
"rtic-monotonics",
"satrs",
"stm32f3-discovery", "stm32f3-discovery",
"stm32f3xx-hal", "stm32f3xx-hal",
"systick-monotonic",
] ]
[[package]] [[package]]
name = "scopeguard" name = "satrs-shared"
version = "1.1.0" version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" checksum = "75a402ba556a7f5eef707035b45e64a3259b09674311e98697f3dd0508a1bf51"
dependencies = [
"spacepackets",
]
[[package]] [[package]]
name = "semver" name = "semver"
@ -595,9 +785,9 @@ dependencies = [
[[package]] [[package]]
name = "semver" name = "semver"
version = "1.0.16" version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a" checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca"
[[package]] [[package]]
name = "semver-parser" name = "semver-parser"
@ -607,17 +797,28 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3"
[[package]] [[package]]
name = "slice-group-by" name = "slice-group-by"
version = "0.3.0" version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03b634d87b960ab1a38c4fe143b508576f075e7c978bfad18217645ebfdfa2ec" checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7"
[[package]] [[package]]
name = "spin" name = "smallvec"
version = "0.9.4" version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f6002a767bff9e83f8eeecf883ecb8011875a21ae8da43bffb817a57e78cc09" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67"
[[package]]
name = "spacepackets"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28246ae2451af240c3e3ff3c51363c7b6ad565ca6aa9bad23b8c725687c485e1"
dependencies = [ dependencies = [
"lock_api", "chrono",
"crc",
"delegate",
"num-traits",
"num_enum",
"zerocopy",
] ]
[[package]] [[package]]
@ -639,9 +840,9 @@ dependencies = [
[[package]] [[package]]
name = "stm32f3" name = "stm32f3"
version = "0.14.0" version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "265cda62ac13307414de4aca58dbbbd8038ddba85cffbb335823aa216f2e3200" checksum = "b28b37228ef3fa47956af38c6abd756e912f244c1657f14e66d42fc8d74ea96f"
dependencies = [ dependencies = [
"bare-metal 1.0.0", "bare-metal 1.0.0",
"cortex-m", "cortex-m",
@ -651,7 +852,8 @@ dependencies = [
[[package]] [[package]]
name = "stm32f3-discovery" name = "stm32f3-discovery"
version = "0.8.0-pre.0" version = "0.8.0-alpha.0"
source = "git+https://github.com/robamu/stm32f3-discovery?branch=complete-dma-update-hal#5ccacae07ceff02d7d3649df67a6a0ba2a144752"
dependencies = [ dependencies = [
"accelerometer", "accelerometer",
"cortex-m", "cortex-m",
@ -663,20 +865,20 @@ dependencies = [
[[package]] [[package]]
name = "stm32f3xx-hal" name = "stm32f3xx-hal"
version = "0.9.1" version = "0.11.0-alpha.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "git+https://github.com/robamu/stm32f3xx-hal?branch=complete-dma-update#04fc76b7912649c84b57bd0ab803ea3ccf2aadae"
checksum = "4e422c5c044e8f3a068b1e14b83c071449e27c9d4bc0e24f972b552d79f2be03"
dependencies = [ dependencies = [
"bare-metal 1.0.0",
"bxcan", "bxcan",
"cfg-if", "cfg-if",
"cortex-m", "cortex-m",
"cortex-m-rt", "cortex-m-rt",
"critical-section",
"embedded-dma", "embedded-dma",
"embedded-hal", "embedded-hal 0.2.7",
"embedded-time", "embedded-time",
"enumset", "enumset",
"nb 1.0.0", "nb 1.1.0",
"num-traits",
"paste", "paste",
"rtcc", "rtcc",
"slice-group-by", "slice-group-by",
@ -691,14 +893,14 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90a4adc8cbd1726249b161898e48e0f3f1ce74d34dc784cbbc98fba4ed283fbf" checksum = "90a4adc8cbd1726249b161898e48e0f3f1ce74d34dc784cbbc98fba4ed283fbf"
dependencies = [ dependencies = [
"embedded-hal", "embedded-hal 0.2.7",
] ]
[[package]] [[package]]
name = "syn" name = "syn"
version = "1.0.107" version = "1.0.109"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f4064b5b16e03ae50984a5a8ed5d4f8803e6bc1fd170a3cda91a1be4b18e3f5" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@ -706,27 +908,47 @@ dependencies = [
] ]
[[package]] [[package]]
name = "systick-monotonic" name = "syn"
version = "1.0.1" version = "2.0.53"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "67fb822d5c615a0ae3a4795ee5b1d06381c7faf488d861c0a4fa8e6a88d5ff84" checksum = "7383cd0e49fff4b6b90ca5670bfd3e9d6a733b3f90c686605aa7eec8c4996032"
dependencies = [ dependencies = [
"cortex-m", "proc-macro2",
"fugit", "quote",
"rtic-monotonic", "unicode-ident",
]
[[package]]
name = "thiserror"
version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297"
dependencies = [
"thiserror-impl",
]
[[package]]
name = "thiserror-impl"
version = "1.0.58"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
] ]
[[package]] [[package]]
name = "typenum" name = "typenum"
version = "1.16.0" version = "1.17.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825"
[[package]] [[package]]
name = "unicode-ident" name = "unicode-ident"
version = "1.0.6" version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "84a22b9f218b40614adcb3f4ff08b703773ad44fa9423e4e0d346d5db86e4ebc" checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]] [[package]]
name = "usb-device" name = "usb-device"
@ -754,9 +976,30 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d"
[[package]] [[package]]
name = "volatile-register" name = "volatile-register"
version = "0.2.1" version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ee8f19f9d74293faf70901bc20ad067dc1ad390d2cbf1e3f75f721ffee908b6" checksum = "de437e2a6208b014ab52972a27e59b33fa2920d3e00fe05026167a1c509d19cc"
dependencies = [ dependencies = [
"vcell", "vcell",
] ]
[[package]]
name = "zerocopy"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be"
dependencies = [
"byteorder",
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.53",
]

View File

@ -2,58 +2,81 @@
name = "satrs-example-stm32f3-disco" name = "satrs-example-stm32f3-disco"
version = "0.1.0" version = "0.1.0"
edition = "2021" edition = "2021"
default-run = "satrs-example-stm32f3-disco"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
cortex-m = "0.7" cortex-m = { version = "0.7", features = ["critical-section-single-core"] }
cortex-m-rt = "0.7" cortex-m-rt = "0.7"
embedded-hal = "0.2.6" defmt = "0.3"
cortex-m-rtic = "1.0" defmt-brtt = { version = "0.1", default-features = false, features = ["rtt"] }
enumset = "1.0" panic-probe = { version = "0.3", features = ["print-defmt"] }
heapless = "0.7" embedded-hal = "0.2.7"
systick-monotonic = "1.0" cortex-m-semihosting = "0.5.0"
enumset = "1"
heapless = "0.8"
[dependencies.rtic]
version = "2"
features = ["thumbv7-backend"]
[dependencies.rtic-monotonics]
version = "1"
features = ["cortex-m-systick"]
[dependencies.cobs] [dependencies.cobs]
git = "https://github.com/robamu/cobs.rs.git" git = "https://github.com/robamu/cobs.rs.git"
branch = "all_features" branch = "all_features"
default-features = false default-features = false
[dependencies.panic-itm]
version = "0.4"
[dependencies.itm_logger]
git = "https://github.com/robamu/itm_logger.rs.git"
branch = "all_features"
version = "0.1.3-alpha.0"
[dependencies.stm32f3xx-hal] [dependencies.stm32f3xx-hal]
git = "https://github.com/robamu/stm32f3xx-hal" git = "https://github.com/robamu/stm32f3xx-hal"
version = "0.10.0-alpha.0" version = "0.11.0-alpha.0"
features = ["stm32f303xc", "rt", "enumset"] features = ["stm32f303xc", "rt", "enumset"]
branch = "all_features" branch = "complete-dma-update"
# Can be used in workspace to develop and update HAL # Can be used in workspace to develop and update HAL
# path = "../stm32f3xx-hal" # path = "../stm32f3xx-hal"
[dependencies.stm32f3-discovery] [dependencies.stm32f3-discovery]
git = "https://github.com/robamu/stm32f3-discovery" git = "https://github.com/robamu/stm32f3-discovery"
version = "0.8.0-alpha.0" version = "0.8.0-alpha.0"
branch = "all_features" branch = "complete-dma-update-hal"
# Can be used in workspace to develop and update BSP # Can be used in workspace to develop and update BSP
# path = "../stm32f3-discovery" # path = "../stm32f3-discovery"
[dependencies.satrs-core] [dependencies.satrs]
git = "https://egit.irs.uni-stuttgart.de/rust/satrs-core.git" version = "0.2.0-rc.0"
version = "0.1.0-alpha.0"
default-features = false default-features = false
# this lets you use `cargo fix`! [dev-dependencies]
# [[bin]] defmt-test = "0.3"
# name = "stm32f3-blinky"
# test = false
# bench = false
# cargo test
[profile.test]
codegen-units = 1
debug = 2
debug-assertions = true # <-
incremental = false
opt-level = "s" # <-
overflow-checks = true # <-
# cargo build/run --release
[profile.release] [profile.release]
codegen-units = 1 # better optimizations codegen-units = 1
debug = true # symbols are nice and they don't increase the size on Flash debug = 2
lto = true # better optimizations debug-assertions = false # <-
incremental = false
lto = 'fat'
opt-level = "s" # <-
overflow-checks = false # <-
# cargo test --release
[profile.bench]
codegen-units = 1
debug = 2
debug-assertions = false # <-
incremental = false
lto = 'fat'
opt-level = "s" # <-
overflow-checks = false # <-

View File

@ -2,15 +2,25 @@ sat-rs example for the STM32F3-Discovery board
======= =======
This example application shows how the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad) This example application shows how the [sat-rs framework](https://egit.irs.uni-stuttgart.de/rust/satrs-launchpad)
can be used on an embedded target. It also shows how a relatively simple OBSW could be built when no can be used on an embedded target.
standard runtime is available. It uses [RTIC](https://rtic.rs/1/book/en/) as the concurrency It also shows how a relatively simple OBSW could be built when no standard runtime is available.
framework. It uses [RTIC](https://rtic.rs/1/book/en/) as the concurrency framework and the
[defmt](https://defmt.ferrous-systems.com/) framework for logging.
The STM32F3-Discovery device was picked because it is a cheap Cortex-M4 based device which is also The STM32F3-Discovery device was picked because it is a cheap Cortex-M4 based device which is also
used by the [Rust Embedded Book](https://docs.rust-embedded.org/book/intro/hardware.html) and the used by the [Rust Embedded Book](https://docs.rust-embedded.org/book/intro/hardware.html) and the
[Rust Discovery](https://docs.rust-embedded.org/discovery/f3discovery/) book as an introduction [Rust Discovery](https://docs.rust-embedded.org/discovery/f3discovery/) book as an introduction
to embedded Rust. to embedded Rust.
## Pre-Requisites
Make sure the following tools are installed:
1. [`probe-rs`](https://probe.rs/): Application used to flash and debug the MCU.
2. Optional and recommended: [VS Code](https://code.visualstudio.com/) with
[probe-rs plugin](https://marketplace.visualstudio.com/items?itemName=probe-rs.probe-rs-debugger)
for debugging.
## Preparing Rust and the repository ## Preparing Rust and the repository
Building an application requires the `thumbv7em-none-eabihf` cross-compiler toolchain. Building an application requires the `thumbv7em-none-eabihf` cross-compiler toolchain.
@ -40,15 +50,23 @@ you can simply build the application with
cargo build cargo build
``` ```
## Flashing and Debugging from the command line ## Flashing from the command line
TODO You can flash the application from the command line using `probe-rs`:
```sh
probe-rs run --chip STM32F303VCTx
```
## Debugging with VS Code ## Debugging with VS Code
The STM32F3-Discovery comes with an on-board ST-Link so all that is required to flash and debug The STM32F3-Discovery comes with an on-board ST-Link so all that is required to flash and debug
the board is a Mini-USB cable. The code in this repository was debugged using `openocd` the board is a Mini-USB cable. The code in this repository was debugged using [`probe-rs`](https://probe.rs/docs/tools/debuggerA)
and the VS Code [`Cortex-Debug` plugin](https://marketplace.visualstudio.com/items?itemName=marus25.cortex-debug). and the VS Code [`probe-rs` plugin](https://marketplace.visualstudio.com/items?itemName=probe-rs.probe-rs-debugger).
Make sure to install this plugin first.
Sample configuration files are provided inside the `vscode` folder.
Use `cp vscode .vscode -r` to use them for your project.
Some sample configuration files for VS Code were provided as well. You can simply use `Run` and `Debug` Some sample configuration files for VS Code were provided as well. You can simply use `Run` and `Debug`
to automatically rebuild and flash your application. to automatically rebuild and flash your application.
@ -56,20 +74,32 @@ to automatically rebuild and flash your application.
The `tasks.json` and `launch.json` files are generic and you can use them immediately by opening The `tasks.json` and `launch.json` files are generic and you can use them immediately by opening
the folder in VS code or adding it to a workspace. the folder in VS code or adding it to a workspace.
If you would like to use a custom GDB application, you can specify the gdb binary in the following
configuration variables in your `settings.json`:
- `"cortex-debug.gdbPath"`
- `"cortex-debug.gdbPath.linux"`
- `"cortex-debug.gdbPath.windows"`
- `"cortex-debug.gdbPath.osx"`
## Commanding with Python ## Commanding with Python
When the SW is running on the Discovery board, you can command the MCU via a serial interface, When the SW is running on the Discovery board, you can command the MCU via a serial interface,
using COBS encoded CCSDS packets. using COBS encoded PUS packets.
TODO: It is recommended to use a virtual environment to do this. To set up one in the command line,
- How and where to connect serial interface on the MCU you can use `python3 -m venv venv` on Unix systems or `py -m venv venv` on Windows systems.
- How to set up Python venv (or at least strongly recommend it) and install deps After doing this, you can check the [venv tutorial](https://docs.python.org/3/tutorial/venv.html)
- How to copy `def_tmtc_conf.json` to `tmtc_conf.json` and adapt it for custom serial port on how to activate the environment and then use the following command to install the required
dependency:
```sh
pip install -r requirements.txt
```
The packets are exchanged using a dedicated serial interface. You can use any generic USB-to-UART
converter device with the TX pin connected to the PA3 pin and the RX pin connected to the PA2 pin.
A default configuration file for the python application is provided and can be used by running
```sh
cp def_tmtc_conf.json tmtc_conf.json
```
After that, you can for example send a ping to the MCU using the following command
```sh
./main.py -p /ping
```

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +0,0 @@
use std::env;
use std::fs::File;
use std::io::Write;
use std::path::PathBuf;
fn main() {
// Put the linker script somewhere the linker can find it
let out = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
File::create(out.join("memory.x"))
.unwrap()
.write_all(include_bytes!("memory.x"))
.unwrap();
println!("cargo:rustc-link-search={}", out.display());
// Only re-run the build script when memory.x is changed,
// instead of when any part of the source code changes.
println!("cargo:rerun-if-changed=memory.x");
}

View File

@ -24,7 +24,9 @@ break main
# # send captured ITM to the file itm.fifo # # send captured ITM to the file itm.fifo
# # (the microcontroller SWO pin must be connected to the programmer SWO pin) # # (the microcontroller SWO pin must be connected to the programmer SWO pin)
# # 8000000 must match the core clock frequency # # 8000000 must match the core clock frequency
monitor tpiu config internal itm.txt uart off 8000000 # # 2000000 is the frequency of the SWO pin. This was added for newer
# openocd versions like v0.12.0.
# monitor tpiu config internal itm.txt uart off 8000000 2000000
# # OR: make the microcontroller SWO pin output compatible with UART (8N1) # # OR: make the microcontroller SWO pin output compatible with UART (8N1)
# # 8000000 must match the core clock frequency # # 8000000 must match the core clock frequency
@ -32,7 +34,7 @@ monitor tpiu config internal itm.txt uart off 8000000
# monitor tpiu config external uart off 8000000 2000000 # monitor tpiu config external uart off 8000000 2000000
# # enable ITM port 0 # # enable ITM port 0
monitor itm port 0 on # monitor itm port 0 on
load load

View File

@ -1,4 +1,5 @@
/venv /venv
/.tmtc-history.txt
/log /log
/.idea/* /.idea/*
!/.idea/runConfigurations !/.idea/runConfigurations

View File

@ -1,4 +1,4 @@
{ {
"com_if": "serial_cobs", "com_if": "serial_cobs",
"serial_baudrate": 115200 "serial_baudrate": 115200
} }

185
satrs-example-stm32f3-disco/pyclient/main.py Normal file → Executable file
View File

@ -1,39 +1,40 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""Example client for the sat-rs example application""" """Example client for the sat-rs example application"""
import enum
import struct import struct
import logging
import sys import sys
import time import time
from typing import Optional, cast from typing import Any, Optional, cast
from prompt_toolkit.history import FileHistory, History
from spacepackets.ecss.tm import CdsShortTimestamp
import tmtccmd import tmtccmd
from spacepackets.ecss import PusTelemetry, PusTelecommand, PusVerificator from spacepackets.ecss import PusTelemetry, PusTelecommand, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm
from tmtccmd import CcsdsTmtcBackend, TcHandlerBase, ProcedureParamsWrapper from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest from tmtccmd.core.base import BackendRequest
from tmtccmd.core.ccsds_backend import QueueWrapper
from tmtccmd.logging import add_colorlog_console_logger
from tmtccmd.pus import VerificationWrapper from tmtccmd.pus import VerificationWrapper
from tmtccmd.tm import CcsdsTmHandler, SpecificApidHandlerBase from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com_if import ComInterface from tmtccmd.com import ComInterface
from tmtccmd.config import ( from tmtccmd.config import (
CmdTreeNode,
default_json_path, default_json_path,
SetupParams, SetupParams,
TmTcCfgHookBase, HookBase,
TmtcDefinitionWrapper,
CoreServiceList,
OpCodeEntry,
params_to_procedure_conversion, params_to_procedure_conversion,
) )
from tmtccmd.config.com_if import SerialCfgWrapper from tmtccmd.config.com import SerialCfgWrapper
from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper
from tmtccmd.logging import get_console_logger
from tmtccmd.logging.pus import ( from tmtccmd.logging.pus import (
RegularTmtcLogWrapper, RegularTmtcLogWrapper,
RawTmtcTimedLogWrapper, RawTmtcTimedLogWrapper,
TimedLogWhen, TimedLogWhen,
) )
from tmtccmd.tc import ( from tmtccmd.tmtc import (
TcQueueEntryType, TcQueueEntryType,
ProcedureWrapper, ProcedureWrapper,
TcProcedureType, TcProcedureType,
@ -41,27 +42,26 @@ from tmtccmd.tc import (
SendCbParams, SendCbParams,
DefaultPusQueueHelper, DefaultPusQueueHelper,
) )
from tmtccmd.tm.pus_5_event import Service5Tm from tmtccmd.pus.s5_fsfw_event import Service5Tm
from tmtccmd.util import FileSeqCountProvider, PusFileSeqCountProvider from tmtccmd.util import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT from tmtccmd.util.obj_id import ObjectIdDictT
from tmtccmd.util.tmtc_printer import FsfwTmTcPrinter _LOGGER = logging.getLogger()
LOGGER = get_console_logger()
EXAMPLE_PUS_APID = 0x02 EXAMPLE_PUS_APID = 0x02
class SatRsConfigHook(TmTcCfgHookBase): class SatRsConfigHook(HookBase):
def __init__(self, json_cfg_path: str): def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path=json_cfg_path) super().__init__(json_cfg_path=json_cfg_path)
def assign_communication_interface(self, com_if_key: str) -> Optional[ComInterface]: def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com_if import ( from tmtccmd.config.com import (
create_com_interface_default, create_com_interface_default,
create_com_interface_cfg_default, create_com_interface_cfg_default,
) )
assert self.cfg_path is not None
cfg = create_com_interface_cfg_default( cfg = create_com_interface_cfg_default(
com_if_key=com_if_key, com_if_key=com_if_key,
json_cfg_path=self.cfg_path, json_cfg_path=self.cfg_path,
@ -76,35 +76,14 @@ class SatRsConfigHook(TmTcCfgHookBase):
cfg.serial_cfg.serial_timeout = 0.5 cfg.serial_cfg.serial_timeout = 0.5
return create_com_interface_default(cfg) return create_com_interface_default(cfg)
def get_tmtc_definitions(self) -> TmtcDefinitionWrapper: def get_command_definitions(self) -> CmdTreeNode:
from tmtccmd.config.globals import get_default_tmtc_defs """This function should return the root node of the command definition tree."""
return create_cmd_definition_tree()
defs = get_default_tmtc_defs() def get_cmd_history(self) -> Optional[History]:
srv_5 = OpCodeEntry() """Optionlly return a history class for the past command paths which will be used
srv_5.add("0", "Event Test") when prompting a command path from the user in CLI mode."""
defs.add_service( return FileHistory(".tmtc-history.txt")
name=CoreServiceList.SERVICE_5.value,
info="PUS Service 5 Event",
op_code_entry=srv_5,
)
srv_17 = OpCodeEntry()
srv_17.add("0", "Ping Test")
defs.add_service(
name=CoreServiceList.SERVICE_17_ALT,
info="PUS Service 17 Test",
op_code_entry=srv_17,
)
srv_3 = OpCodeEntry()
defs.add_service(
name=CoreServiceList.SERVICE_3,
info="PUS Service 3 Housekeeping",
op_code_entry=srv_3,
)
return defs
def perform_mode_operation(self, tmtc_backend: CcsdsTmtcBackend, mode: int):
LOGGER.info("Mode operation hook was called")
pass
def get_object_ids(self) -> ObjectIdDictT: def get_object_ids(self) -> ObjectIdDictT:
from tmtccmd.config.objects import get_core_object_ids from tmtccmd.config.objects import get_core_object_ids
@ -112,74 +91,75 @@ class SatRsConfigHook(TmTcCfgHookBase):
return get_core_object_ids() return get_core_object_ids()
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
root_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
return root_node
class PusHandler(SpecificApidHandlerBase): class PusHandler(SpecificApidHandlerBase):
def __init__( def __init__(
self, self,
file_logger: logging.Logger,
verif_wrapper: VerificationWrapper, verif_wrapper: VerificationWrapper,
printer: FsfwTmTcPrinter,
raw_logger: RawTmtcTimedLogWrapper, raw_logger: RawTmtcTimedLogWrapper,
): ):
super().__init__(EXAMPLE_PUS_APID, None) super().__init__(EXAMPLE_PUS_APID, None)
self.printer = printer self.file_logger = file_logger
self.raw_logger = raw_logger self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: any): def handle_tm(self, packet: bytes, _user_args: Any):
time_reader = CdsShortTimestamp.empty()
try: try:
tm_packet = PusTelemetry.unpack(packet) pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
except ValueError as e: except ValueError as e:
LOGGER.warning("Could not generate PUS TM object from raw data") _LOGGER.warning("Could not generate PUS TM object from raw data")
LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}") _LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e raise e
service = tm_packet.service service = pus_tm.service
dedicated_handler = False tm_packet = None
if service == 1: if service == 1:
tm_packet = Service1Tm.unpack(data=packet, params=UnpackParams(1, 2)) tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(time_reader, 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet) res = self.verif_wrapper.add_tm(tm_packet)
if res is None: if res is None:
LOGGER.info( _LOGGER.info(
f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] " f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] "
f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}" f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}"
) )
LOGGER.warning( _LOGGER.warning(
f"No matching telecommand found for {tm_packet.tc_req_id}" f"No matching telecommand found for {tm_packet.tc_req_id}"
) )
else: else:
self.verif_wrapper.log_to_console(tm_packet, res) self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res) self.verif_wrapper.log_to_file(tm_packet, res)
dedicated_handler = True
if service == 3: if service == 3:
LOGGER.info("No handling for HK packets implemented") _LOGGER.info("No handling for HK packets implemented")
LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]") _LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet) pus_tm = PusTelemetry.unpack(packet, CdsShortTimestamp.empty())
if pus_tm.subservice == 25: if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8: if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet") raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:] json_str = pus_tm.source_data[8:]
dedicated_handler = True _LOGGER.info("received JSON string: " + json_str.decode("utf-8"))
if service == 5: if service == 5:
tm_packet = Service5Tm.unpack(packet) tm_packet = Service5Tm.unpack(packet, time_reader)
if service == 17: if service == 17:
tm_packet = Service17Tm.unpack(packet) tm_packet = Service17Tm.unpack(packet, time_reader)
dedicated_handler = True
if tm_packet.subservice == 2: if tm_packet.subservice == 2:
self.printer.file_logger.info("Received Ping Reply TM[17,2]") _LOGGER.info("Received Ping Reply TM[17,2]")
LOGGER.info("Received Ping Reply TM[17,2]")
else: else:
self.printer.file_logger.info( _LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}" f"Received Test Packet with unknown subservice {tm_packet.subservice}"
) )
if tm_packet is None: if tm_packet is None:
LOGGER.info( _LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory" f"The service {service} is not implemented in Telemetry Factory"
) )
tm_packet = PusTelemetry.unpack(packet) tm_packet = PusTelemetry.unpack(packet, time_reader)
self.raw_logger.log_tm(tm_packet) self.raw_logger.log_tm(pus_tm)
if not dedicated_handler and tm_packet is not None:
self.printer.handle_long_tm_print(packet_if=tm_packet, info_if=tm_packet)
def make_addressable_id(target_id: int, unique_id: int) -> bytes: def make_addressable_id(target_id: int, unique_id: int) -> bytes:
@ -198,8 +178,11 @@ class TcHandler(TcHandlerBase):
self.seq_count_provider = seq_count_provider self.seq_count_provider = seq_count_provider
self.verif_wrapper = verif_wrapper self.verif_wrapper = verif_wrapper
self.queue_helper = DefaultPusQueueHelper( self.queue_helper = DefaultPusQueueHelper(
queue_wrapper=None, queue_wrapper=QueueWrapper.empty(),
tc_sched_timestamp_len=7,
seq_cnt_provider=seq_count_provider, seq_cnt_provider=seq_count_provider,
pus_verificator=verif_wrapper.pus_verificator,
default_pus_apid=EXAMPLE_PUS_APID,
) )
def send_cb(self, send_params: SendCbParams): def send_cb(self, send_params: SendCbParams):
@ -212,61 +195,55 @@ class TcHandler(TcHandlerBase):
) )
self.verif_wrapper.add_tc(pus_tc_wrapper.pus_tc) self.verif_wrapper.add_tc(pus_tc_wrapper.pus_tc)
raw_tc = pus_tc_wrapper.pus_tc.pack() raw_tc = pus_tc_wrapper.pus_tc.pack()
LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}") _LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}")
send_params.com_if.send(raw_tc) send_params.com_if.send(raw_tc)
elif entry_helper.entry_type == TcQueueEntryType.LOG: elif entry_helper.entry_type == TcQueueEntryType.LOG:
log_entry = entry_helper.to_log_entry() log_entry = entry_helper.to_log_entry()
LOGGER.info(log_entry.log_str) _LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, helper: ProcedureWrapper): def queue_finished_cb(self, info: ProcedureWrapper):
if helper.proc_type == TcProcedureType.DEFAULT: if info.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure() def_proc = info.to_def_procedure()
LOGGER.info( _LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
f"Queue handling finished for service {def_proc.service} and "
f"op code {def_proc.op_code}"
)
def feed_cb(self, helper: ProcedureWrapper, wrapper: FeedWrapper): def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper q.queue_wrapper = wrapper.queue_wrapper
if helper.proc_type == TcProcedureType.DEFAULT: if info.proc_type == TcProcedureType.DEFAULT:
def_proc = helper.to_def_procedure() def_proc = info.to_def_procedure()
service = def_proc.service cmd_path = def_proc.cmd_path
op_code = def_proc.op_code if cmd_path == "/ping":
if (
service == CoreServiceList.SERVICE_17
or service == CoreServiceList.SERVICE_17_ALT
):
q.add_log_cmd("Sending PUS ping telecommand") q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(PusTelecommand(service=17, subservice=1)) q.add_pus_tc(PusTelecommand(service=17, subservice=1))
def main(): def main():
add_colorlog_console_logger(_LOGGER)
tmtccmd.init_printout(False) tmtccmd.init_printout(False)
hook_obj = SatRsConfigHook(json_cfg_path=default_json_path()) hook_obj = SatRsConfigHook(json_cfg_path=default_json_path())
parser_wrapper = PreArgsParsingWrapper() parser_wrapper = PreArgsParsingWrapper()
parser_wrapper.create_default_parent_parser() parser_wrapper.create_default_parent_parser()
parser_wrapper.create_default_parser() parser_wrapper.create_default_parser()
parser_wrapper.add_def_proc_args() parser_wrapper.add_def_proc_args()
post_args_wrapper = parser_wrapper.parse(hook_obj)
params = SetupParams() params = SetupParams()
post_args_wrapper = parser_wrapper.parse(hook_obj, params)
proc_wrapper = ProcedureParamsWrapper() proc_wrapper = ProcedureParamsWrapper()
if post_args_wrapper.use_gui: if post_args_wrapper.use_gui:
post_args_wrapper.set_params_without_prompts(params, proc_wrapper) post_args_wrapper.set_params_without_prompts(proc_wrapper)
else: else:
post_args_wrapper.set_params_with_prompts(params, proc_wrapper) post_args_wrapper.set_params_with_prompts(proc_wrapper)
params.apid = EXAMPLE_PUS_APID params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper( setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
) )
# Create console logger helper and file loggers # Create console logger helper and file loggers
tmtc_logger = RegularTmtcLogWrapper() tmtc_logger = RegularTmtcLogWrapper()
printer = FsfwTmTcPrinter(tmtc_logger.logger) file_logger = tmtc_logger.logger
raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1) raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1)
verificator = PusVerificator() verificator = PusVerificator()
verification_wrapper = VerificationWrapper(verificator, LOGGER, printer.file_logger) verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler # Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(verification_wrapper, printer, raw_logger) tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None) ccsds_handler = CcsdsTmHandler(generic_handler=None)
ccsds_handler.add_apid_handler(tm_handler) ccsds_handler.add_apid_handler(tm_handler)
@ -288,7 +265,7 @@ def main():
if state.request == BackendRequest.TERMINATION_NO_ERROR: if state.request == BackendRequest.TERMINATION_NO_ERROR:
sys.exit(0) sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE: elif state.request == BackendRequest.DELAY_IDLE:
LOGGER.info("TMTC Client in IDLE mode") _LOGGER.info("TMTC Client in IDLE mode")
time.sleep(3.0) time.sleep(3.0)
elif state.request == BackendRequest.DELAY_LISTENER: elif state.request == BackendRequest.DELAY_LISTENER:
time.sleep(0.8) time.sleep(0.8)

View File

@ -1,2 +1,2 @@
tmtccmd == 4.0.0a0 tmtccmd == 8.0.0rc.0
# -e git+https://github.com/robamu-org/tmtccmd.git@main#egg=tmtccmd # -e git+https://github.com/robamu-org/tmtccmd.git@main#egg=tmtccmd

View File

@ -1,17 +1,15 @@
#![no_std] #![no_std]
#![no_main] #![no_main]
use satrs_example_stm32f3_disco as _;
extern crate panic_itm; use stm32f3_discovery::leds::Leds;
use cortex_m_rt::entry;
use stm32f3_discovery::stm32f3xx_hal::delay::Delay; use stm32f3_discovery::stm32f3xx_hal::delay::Delay;
use stm32f3_discovery::stm32f3xx_hal::{pac, prelude::*}; use stm32f3_discovery::stm32f3xx_hal::{pac, prelude::*};
use stm32f3_discovery::leds::Leds;
use stm32f3_discovery::switch_hal::{OutputSwitch, ToggleableOutputSwitch}; use stm32f3_discovery::switch_hal::{OutputSwitch, ToggleableOutputSwitch};
#[entry] #[cortex_m_rt::entry]
fn main()-> ! { fn main() -> ! {
defmt::println!("STM32F3 Discovery Blinky");
let dp = pac::Peripherals::take().unwrap(); let dp = pac::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain(); let mut rcc = dp.RCC.constrain();
let cp = cortex_m::Peripherals::take().unwrap(); let cp = cortex_m::Peripherals::take().unwrap();
@ -30,49 +28,49 @@ fn main()-> ! {
gpioe.pe14, gpioe.pe14,
gpioe.pe15, gpioe.pe15,
&mut gpioe.moder, &mut gpioe.moder,
&mut gpioe.otyper &mut gpioe.otyper,
); );
let delay_ms = 200u16; let delay_ms = 200u16;
loop { loop {
leds.ld3.toggle().ok(); leds.ld3_n.toggle().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
leds.ld3.toggle().ok(); leds.ld3_n.toggle().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
//explicit on/off //explicit on/off
leds.ld4.on().ok(); leds.ld4_nw.on().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
leds.ld4.off().ok(); leds.ld4_nw.off().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
leds.ld5.on().ok(); leds.ld5_ne.on().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
leds.ld5.off().ok(); leds.ld5_ne.off().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
leds.ld6.on().ok(); leds.ld6_w.on().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
leds.ld6.off().ok(); leds.ld6_w.off().ok();
delay.delay_ms(delay_ms);
leds.ld7.on().ok();
delay.delay_ms(delay_ms);
leds.ld7.off().ok();
delay.delay_ms(delay_ms);
leds.ld8.on().ok();
delay.delay_ms(delay_ms);
leds.ld8.off().ok();
delay.delay_ms(delay_ms);
leds.ld9.on().ok();
delay.delay_ms(delay_ms);
leds.ld9.off().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
leds.ld10.on().ok(); leds.ld7_e.on().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
leds.ld10.off().ok(); leds.ld7_e.off().ok();
delay.delay_ms(delay_ms);
leds.ld8_sw.on().ok();
delay.delay_ms(delay_ms);
leds.ld8_sw.off().ok();
delay.delay_ms(delay_ms);
leds.ld9_se.on().ok();
delay.delay_ms(delay_ms);
leds.ld9_se.off().ok();
delay.delay_ms(delay_ms);
leds.ld10_s.on().ok();
delay.delay_ms(delay_ms);
leds.ld10_s.off().ok();
delay.delay_ms(delay_ms); delay.delay_ms(delay_ms);
} }
} }

View File

@ -0,0 +1,51 @@
#![no_main]
#![no_std]
use cortex_m_semihosting::debug;
use defmt_brtt as _; // global logger
use stm32f3xx_hal as _; // memory layout
use panic_probe as _;
// same panicking *behavior* as `panic-probe` but doesn't print a panic message
// this prevents the panic message being printed *twice* when `defmt::panic` is invoked
#[defmt::panic_handler]
fn panic() -> ! {
cortex_m::asm::udf()
}
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with status code 0.
pub fn exit() -> ! {
loop {
debug::exit(debug::EXIT_SUCCESS);
}
}
/// Hardfault handler.
///
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with an error. This seems better than the default, which is to spin in a
/// loop.
#[cortex_m_rt::exception]
unsafe fn HardFault(_frame: &cortex_m_rt::ExceptionFrame) -> ! {
loop {
debug::exit(debug::EXIT_FAILURE);
}
}
// defmt-test 0.3.0 has the limitation that this `#[tests]` attribute can only be used
// once within a crate. the module can be in any file but there can only be at most
// one `#[tests]` module in this library crate
#[cfg(test)]
#[defmt_test::tests]
mod unit_tests {
use defmt::assert;
#[test]
fn it_works() {
assert!(true)
}
}

View File

@ -1,38 +1,36 @@
#![no_std] #![no_std]
#![no_main] #![no_main]
extern crate panic_itm; // global logger + panicking-behavior + memory layout
use satrs_example_stm32f3_disco as _;
use rtic::app; use rtic::app;
use heapless::{ use heapless::{mpmc::Q8, Vec};
mpmc::Q16,
pool,
pool::singleton::{Box, Pool},
};
#[allow(unused_imports)] #[allow(unused_imports)]
use itm_logger::{debug, info, logger_init, warn}; use rtic_monotonics::systick::fugit::TimerInstantU32;
use satrs_core::spacepackets::{ecss::PusPacket, tm::PusTm}; use rtic_monotonics::systick::ExtU32;
use satrs_core::{ use satrs::seq_count::SequenceCountProviderCore;
pus::{EcssTmErrorWithSend, EcssTmSenderCore}, use satrs::{
seq_count::SequenceCountProviderCore, pool::StoreError,
pus::{EcssChannel, EcssTmSenderCore, EcssTmtcError, PusTmWrapper},
spacepackets::{ecss::PusPacket, ecss::WritablePusPacket},
}; };
use stm32f3xx_hal::dma::dma1; use stm32f3xx_hal::dma::dma1;
use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3}; use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3};
use stm32f3xx_hal::pac::USART2; use stm32f3xx_hal::pac::USART2;
use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent}; use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent};
use systick_monotonic::{fugit::Duration, Systick};
const UART_BAUD: u32 = 115200; const UART_BAUD: u32 = 115200;
const BLINK_FREQ_MS: u64 = 1000; const BLINK_FREQ_MS: u32 = 1000;
const TX_HANDLER_FREQ_MS: u64 = 20; const TX_HANDLER_FREQ_MS: u32 = 20;
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u16 = 5; const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u32 = 5;
const MAX_TC_LEN: usize = 200; const MAX_TC_LEN: usize = 128;
const MAX_TM_LEN: usize = 200; const MAX_TM_LEN: usize = 128;
pub const PUS_APID: u16 = 0x02; pub const PUS_APID: u16 = 0x02;
type TxType = Tx<USART2, PA2<AF7<PushPull>>>; type TxType = Tx<USART2, PA2<AF7<PushPull>>>;
type RxType = Rx<USART2, PA3<AF7<PushPull>>>; type RxType = Rx<USART2, PA3<AF7<PushPull>>>;
type MsDuration = Duration<u64, 1, 1000>; type InstantFugit = TimerInstantU32<1000>;
type TxDmaTransferType = SerialDmaTx<&'static [u8], dma1::C7, TxType>; type TxDmaTransferType = SerialDmaTx<&'static [u8], dma1::C7, TxType>;
type RxDmaTransferType = SerialDmaRx<&'static mut [u8], dma1::C6, RxType>; type RxDmaTransferType = SerialDmaRx<&'static mut [u8], dma1::C6, RxType>;
@ -51,10 +49,12 @@ static mut DMA_TX_BUF: [u8; TM_BUF_LEN] = [0; TM_BUF_LEN];
// transfer buffer. // transfer buffer.
static mut DMA_RX_BUF: [u8; TC_BUF_LEN] = [0; TC_BUF_LEN]; static mut DMA_RX_BUF: [u8; TC_BUF_LEN] = [0; TC_BUF_LEN];
static TX_REQUESTS: Q16<(Box<poolmod::TM>, usize)> = Q16::new(); type TmPacket = Vec<u8, MAX_TM_LEN>;
type TcPacket = Vec<u8, MAX_TC_LEN>;
const TC_POOL_SLOTS: usize = 12; static TM_REQUESTS: Q8<TmPacket> = Q8::new();
const TM_POOL_SLOTS: usize = 12;
use core::cell::RefCell;
use core::sync::atomic::{AtomicU16, Ordering}; use core::sync::atomic::{AtomicU16, Ordering};
pub struct SeqCountProviderAtomicRef { pub struct SeqCountProviderAtomicRef {
@ -88,70 +88,59 @@ impl SequenceCountProviderCore<u16> for SeqCountProviderAtomicRef {
static SEQ_COUNT_PROVIDER: SeqCountProviderAtomicRef = static SEQ_COUNT_PROVIDER: SeqCountProviderAtomicRef =
SeqCountProviderAtomicRef::new(Ordering::Relaxed); SeqCountProviderAtomicRef::new(Ordering::Relaxed);
// Otherwise, warnings because of heapless pool macro.
#[allow(non_camel_case_types)]
mod poolmod {
use super::*;
// Must hold full TC length including COBS overhead.
pool!(TC: [u8; TC_BUF_LEN]);
// Only encoded at the end, so no need to account for COBS overhead.
pool!(TM: [u8; MAX_TM_LEN]);
}
pub struct TxIdle { pub struct TxIdle {
tx: TxType, tx: TxType,
dma_channel: dma1::C7, dma_channel: dma1::C7,
} }
#[derive(Debug)]
pub enum TmStoreError {
StoreFull,
StoreSlotsTooSmall,
}
impl From<TmStoreError> for EcssTmErrorWithSend<TmStoreError> {
fn from(value: TmStoreError) -> Self {
Self::SendError(value)
}
}
pub struct TmSender { pub struct TmSender {
mem_block: Option<Box<poolmod::TM>>, vec: Option<RefCell<Vec<u8, MAX_TM_LEN>>>,
ctx: &'static str,
} }
impl TmSender { impl TmSender {
pub fn new(mem_block: Box<poolmod::TM>, ctx: &'static str) -> Self { pub fn new(tm_packet: TmPacket) -> Self {
Self { Self {
mem_block: Some(mem_block), vec: Some(RefCell::new(tm_packet)),
ctx,
} }
} }
} }
impl EcssChannel for TmSender {
fn id(&self) -> satrs::ChannelId {
0
}
}
impl EcssTmSenderCore for TmSender { impl EcssTmSenderCore for TmSender {
type Error = TmStoreError; fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> {
let vec = self.vec.as_ref();
fn send_tm( if vec.is_none() {
&mut self,
tm: PusTm,
) -> Result<(), satrs_core::pus::EcssTmErrorWithSend<Self::Error>> {
let mem_block = self.mem_block.take();
if mem_block.is_none() {
panic!("send_tm should only be called once"); panic!("send_tm should only be called once");
} }
let mut mem_block = mem_block.unwrap(); let vec_ref = vec.unwrap();
if tm.len_packed() > MAX_TM_LEN { let mut vec = vec_ref.borrow_mut();
return Err(EcssTmErrorWithSend::SendError( match tm {
TmStoreError::StoreSlotsTooSmall, PusTmWrapper::InStore(addr) => return Err(EcssTmtcError::CantSendAddr(addr)),
)); PusTmWrapper::Direct(tm) => {
if tm.len_written() > MAX_TM_LEN {
return Err(EcssTmtcError::Store(StoreError::DataTooLarge(
tm.len_written(),
)));
}
vec.resize(tm.len_written(), 0).expect("vec resize failed");
tm.write_to_bytes(vec.as_mut_slice())?;
defmt::info!(
"Sending TM[{},{}] with size {}",
tm.service(),
tm.subservice(),
tm.len_written()
);
drop(vec);
TM_REQUESTS
.enqueue(vec_ref.take())
.map_err(|_| EcssTmtcError::Store(StoreError::StoreFull(0)))?;
}
} }
tm.write_to_bytes(mem_block.as_mut_slice())
.map_err(|e| EcssTmErrorWithSend::EcssTmError(e.into()))?;
info!(target: self.ctx, "Sending TM[{},{}] with size {}", tm.service(), tm.subservice(), tm.len_packed());
TX_REQUESTS
.enqueue((mem_block, tm.len_packed()))
.map_err(|_| TmStoreError::StoreFull)?;
Ok(()) Ok(())
} }
} }
@ -163,33 +152,36 @@ pub enum UartTxState {
Transmitting(Option<TxDmaTransferType>), Transmitting(Option<TxDmaTransferType>),
} }
#[app(device = stm32f3xx_hal::pac, peripherals = true, dispatchers = [TIM20_BRK, TIM20_UP, TIM20_TRG_COM])] pub struct UartTxShared {
last_completed: Option<InstantFugit>,
state: UartTxState,
}
#[app(device = stm32f3xx_hal::pac, peripherals = true)]
mod app { mod app {
use super::*; use super::*;
use core::slice::Iter; use core::slice::Iter;
use cortex_m::iprintln; use rtic_monotonics::systick::Systick;
use satrs_core::pus::verification::FailParams; use rtic_monotonics::Monotonic;
use satrs_core::pus::verification::VerificationReporterCore; use satrs::pus::verification::FailParams;
use satrs_core::spacepackets::{ use satrs::pus::verification::VerificationReporterCore;
ecss::EcssEnumU16, use satrs::spacepackets::{
tc::PusTc, ecss::tc::PusTcReader, ecss::tm::PusTmCreator, ecss::tm::PusTmSecondaryHeader,
time::cds::P_FIELD_BASE, ecss::EcssEnumU16, time::cds::P_FIELD_BASE, CcsdsPacket, SpHeader,
tm::{PusTm, PusTmSecondaryHeader},
CcsdsPacket, SpHeader,
}; };
#[allow(unused_imports)] #[allow(unused_imports)]
use stm32f3_discovery::leds::Direction; use stm32f3_discovery::leds::Direction;
use stm32f3_discovery::leds::Leds; use stm32f3_discovery::leds::Leds;
use stm32f3xx_hal::prelude::*; use stm32f3xx_hal::prelude::*;
use stm32f3xx_hal::Toggle;
use stm32f3_discovery::switch_hal::OutputSwitch; use stm32f3_discovery::switch_hal::OutputSwitch;
use stm32f3xx_hal::Switch;
#[allow(dead_code)] #[allow(dead_code)]
type SerialType = Serial<USART2, (PA2<AF7<PushPull>>, PA3<AF7<PushPull>>)>; type SerialType = Serial<USART2, (PA2<AF7<PushPull>>, PA3<AF7<PushPull>>)>;
#[shared] #[shared]
struct Shared { struct Shared {
tx_transfer: UartTxState, tx_shared: UartTxShared,
rx_transfer: Option<RxDmaTransferType>, rx_transfer: Option<RxDmaTransferType>,
} }
@ -201,18 +193,14 @@ mod app {
curr_dir: Iter<'static, Direction>, curr_dir: Iter<'static, Direction>,
} }
#[monotonic(binds = SysTick, default = true)] #[init]
type MonoTimer = Systick<1000>; fn init(cx: init::Context) -> (Shared, Local) {
#[init(local = [
tc_pool_mem: [u8; TC_BUF_LEN * TC_POOL_SLOTS] = [0; TC_BUF_LEN * TC_POOL_SLOTS],
tm_pool_mem: [u8; MAX_TM_LEN * TM_POOL_SLOTS] = [0; MAX_TM_LEN * TM_POOL_SLOTS]
])]
fn init(mut cx: init::Context) -> (Shared, Local, init::Monotonics) {
let mut rcc = cx.device.RCC.constrain(); let mut rcc = cx.device.RCC.constrain();
let mono = Systick::new(cx.core.SYST, 8_000_000); // Initialize the systick interrupt & obtain the token to prove that we did
logger_init(); let systick_mono_token = rtic_monotonics::create_systick_token!();
Systick::start(cx.core.SYST, 8_000_000, systick_mono_token);
let mut flash = cx.device.FLASH.constrain(); let mut flash = cx.device.FLASH.constrain();
let clocks = rcc let clocks = rcc
.cfgr .cfgr
@ -220,15 +208,12 @@ mod app {
.sysclk(8.MHz()) .sysclk(8.MHz())
.pclk1(8.MHz()) .pclk1(8.MHz())
.freeze(&mut flash.acr); .freeze(&mut flash.acr);
// setup ITM output
iprintln!( // Set up monotonic timer.
&mut cx.core.ITM.stim[0], //let mono_timer = MonoTimer::new(cx.core.DWT, clocks, &mut cx.core.DCB);
"Starting sat-rs demo application for the STM32F3-Discovery"
); defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery");
let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb); let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb);
// Assign memory to the pools.
poolmod::TC::grow(cx.local.tc_pool_mem);
poolmod::TM::grow(cx.local.tm_pool_mem);
let verif_reporter = VerificationReporterCore::new(PUS_APID).unwrap(); let verif_reporter = VerificationReporterCore::new(PUS_APID).unwrap();
@ -264,106 +249,138 @@ mod app {
clocks, clocks,
&mut rcc.apb1, &mut rcc.apb1,
); );
usart2.configure_rx_interrupt(RxEvent::Idle, Toggle::On); usart2.configure_rx_interrupt(RxEvent::Idle, Switch::On);
// This interrupt is enabled to re-schedule new transfers in the interrupt handler immediately. // This interrupt is enabled to re-schedule new transfers in the interrupt handler immediately.
usart2.configure_tx_interrupt(TxEvent::TransmissionComplete, Toggle::On); usart2.configure_tx_interrupt(TxEvent::TransmissionComplete, Switch::On);
let dma1 = cx.device.DMA1.split(&mut rcc.ahb); let dma1 = cx.device.DMA1.split(&mut rcc.ahb);
let (tx_serial, mut rx_serial) = usart2.split(); let (mut tx_serial, mut rx_serial) = usart2.split();
// This interrupt is immediately triggered, clear it. It will only be reset // This interrupt is immediately triggered, clear it. It will only be reset
// by the hardware when data is received on RX (RXNE event) // by the hardware when data is received on RX (RXNE event)
rx_serial.clear_event(RxEvent::Idle); rx_serial.clear_event(RxEvent::Idle);
// For some reason, this is also immediately triggered..
tx_serial.clear_event(TxEvent::TransmissionComplete);
let rx_transfer = rx_serial.read_exact(unsafe { DMA_RX_BUF.as_mut_slice() }, dma1.ch6); let rx_transfer = rx_serial.read_exact(unsafe { DMA_RX_BUF.as_mut_slice() }, dma1.ch6);
info!(target: "init", "Spawning tasks"); defmt::info!("Spawning tasks");
blink::spawn().unwrap(); blink::spawn().unwrap();
serial_tx_handler::spawn().unwrap(); serial_tx_handler::spawn().unwrap();
( (
Shared { Shared {
tx_transfer: UartTxState::Idle(Some(TxIdle { tx_shared: UartTxShared {
tx: tx_serial, last_completed: None,
dma_channel: dma1.ch7, state: UartTxState::Idle(Some(TxIdle {
})), tx: tx_serial,
dma_channel: dma1.ch7,
})),
},
rx_transfer: Some(rx_transfer), rx_transfer: Some(rx_transfer),
}, },
Local { Local {
//timer: mono_timer,
leds, leds,
last_dir: Direction::North, last_dir: Direction::North,
curr_dir: Direction::iter(), curr_dir: Direction::iter(),
verif_reporter, verif_reporter,
}, },
init::Monotonics(mono),
) )
} }
#[task(local = [leds, curr_dir, last_dir])] #[task(local = [leds, curr_dir, last_dir])]
fn blink(cx: blink::Context) { async fn blink(cx: blink::Context) {
let toggle_leds = |dir: &Direction| { let blink::LocalResources {
let leds = cx.local.leds; leds,
let last_led = leds.for_direction(*cx.local.last_dir); curr_dir,
last_dir,
..
} = cx.local;
let mut toggle_leds = |dir: &Direction| {
let last_led = leds.for_direction(*last_dir);
last_led.off().ok(); last_led.off().ok();
let led = leds.for_direction(*dir); let led = leds.for_direction(*dir);
led.on().ok(); led.on().ok();
*cx.local.last_dir = *dir; *last_dir = *dir;
}; };
loop {
match cx.local.curr_dir.next() { match curr_dir.next() {
Some(dir) => { Some(dir) => {
toggle_leds(dir); toggle_leds(dir);
} }
None => { None => {
*cx.local.curr_dir = Direction::iter(); *curr_dir = Direction::iter();
toggle_leds(cx.local.curr_dir.next().unwrap()); toggle_leds(curr_dir.next().unwrap());
}
} }
Systick::delay(BLINK_FREQ_MS.millis()).await;
} }
blink::spawn_after(MsDuration::from_ticks(BLINK_FREQ_MS)).unwrap();
} }
#[task( #[task(
shared = [tx_transfer], shared = [tx_shared],
local = []
)] )]
fn serial_tx_handler(mut cx: serial_tx_handler::Context) { async fn serial_tx_handler(mut cx: serial_tx_handler::Context) {
if let Some((buf, len)) = TX_REQUESTS.dequeue() { loop {
cx.shared.tx_transfer.lock(|tx_state| match tx_state { let is_idle = cx.shared.tx_shared.lock(|tx_shared| {
UartTxState::Idle(tx) => { if let UartTxState::Idle(_) = tx_shared.state {
//debug!(target: "serial_tx_handler", "bytes: {:x?}", &buf[0..len]); return true;
// Safety: We only copy the data into the TX DMA buffer in this task.
// If the DMA is active, another branch will be taken.
let mut_tx_dma_buf = unsafe { &mut DMA_TX_BUF };
// 0 sentinel value as start marker
mut_tx_dma_buf[0] = 0;
// Should never panic, we accounted for the overhead.
// Write into transfer buffer directly, no need for intermediate
// encoding buffer.
let encoded_len = cobs::encode(&buf[0..len], &mut mut_tx_dma_buf[1..]);
// 0 end marker
mut_tx_dma_buf[encoded_len + 1] = 0;
//debug!(target: "serial_tx_handler", "Sending {} bytes", encoded_len + 2);
//debug!("sent: {:x?}", &mut_tx_dma_buf[0..encoded_len + 2]);
let tx_idle = tx.take().unwrap();
// Transfer completion and re-scheduling of new TX transfers will be done
// by the IRQ handler.
let transfer = tx_idle
.tx
.write_all(&mut_tx_dma_buf[0..encoded_len + 2], tx_idle.dma_channel);
*tx_state = UartTxState::Transmitting(Some(transfer));
// The memory block is automatically returned to the pool when it is dropped.
}
UartTxState::Transmitting(_) => {
// This is a SW configuration error. Only the ISR which
// detects transfer completion should be able to spawn a new
// task, and that ISR should set the state to IDLE.
panic!("invalid internal tx state detected")
}
})
} else {
cx.shared.tx_transfer.lock(|tx_state| {
if let UartTxState::Idle(_) = tx_state {
serial_tx_handler::spawn_after(MsDuration::from_ticks(TX_HANDLER_FREQ_MS))
.unwrap();
} }
false
}); });
if is_idle {
let last_completed = cx.shared.tx_shared.lock(|shared| shared.last_completed);
if let Some(last_completed) = last_completed {
let elapsed_ms = (Systick::now() - last_completed).to_millis();
if elapsed_ms < MIN_DELAY_BETWEEN_TX_PACKETS_MS {
Systick::delay((MIN_DELAY_BETWEEN_TX_PACKETS_MS - elapsed_ms).millis())
.await;
}
}
} else {
// Check for completion after 1 ms
Systick::delay(1.millis()).await;
continue;
}
if let Some(vec) = TM_REQUESTS.dequeue() {
cx.shared
.tx_shared
.lock(|tx_shared| match &mut tx_shared.state {
UartTxState::Idle(tx) => {
let encoded_len;
//debug!(target: "serial_tx_handler", "bytes: {:x?}", &buf[0..len]);
// Safety: We only copy the data into the TX DMA buffer in this task.
// If the DMA is active, another branch will be taken.
unsafe {
// 0 sentinel value as start marker
DMA_TX_BUF[0] = 0;
encoded_len =
cobs::encode(&vec[0..vec.len()], &mut DMA_TX_BUF[1..]);
// Should never panic, we accounted for the overhead.
// Write into transfer buffer directly, no need for intermediate
// encoding buffer.
// 0 end marker
DMA_TX_BUF[encoded_len + 1] = 0;
}
//debug!(target: "serial_tx_handler", "Sending {} bytes", encoded_len + 2);
//debug!("sent: {:x?}", &mut_tx_dma_buf[0..encoded_len + 2]);
let tx_idle = tx.take().unwrap();
// Transfer completion and re-scheduling of new TX transfers will be done
// by the IRQ handler.
// SAFETY: The DMA is the exclusive writer to the DMA buffer now.
let transfer = tx_idle.tx.write_all(
unsafe { &DMA_TX_BUF[0..encoded_len + 2] },
tx_idle.dma_channel,
);
tx_shared.state = UartTxState::Transmitting(Some(transfer));
// The memory block is automatically returned to the pool when it is dropped.
}
UartTxState::Transmitting(_) => (),
});
// Check for completion after 1 ms
Systick::delay(1.millis()).await;
continue;
}
// Nothing to do, and we are idle.
Systick::delay(TX_HANDLER_FREQ_MS.millis()).await;
} }
} }
@ -375,14 +392,13 @@ mod app {
verif_reporter verif_reporter
], ],
)] )]
fn serial_rx_handler( async fn serial_rx_handler(
cx: serial_rx_handler::Context, cx: serial_rx_handler::Context,
received_packet: Box<poolmod::TC>, received_packet: Vec<u8, MAX_TC_LEN>,
rx_len: usize,
) { ) {
let tgt: &'static str = "serial_rx_handler"; defmt::info!("running rx handler");
cx.local.stamp_buf[0] = P_FIELD_BASE; cx.local.stamp_buf[0] = P_FIELD_BASE;
info!(target: tgt, "Received packet with {} bytes", rx_len); defmt::info!("Received packet with {} bytes", received_packet.len());
let decode_buf = cx.local.decode_buf; let decode_buf = cx.local.decode_buf;
let packet = received_packet.as_slice(); let packet = received_packet.as_slice();
let mut start_idx = None; let mut start_idx = None;
@ -393,17 +409,14 @@ mod app {
} }
} }
if start_idx.is_none() { if start_idx.is_none() {
warn!( defmt::warn!("decoding error, can only process cobs encoded frames, data is all 0");
target: tgt,
"decoding error, can only process cobs encoded frames, data is all 0"
);
return; return;
} }
let start_idx = start_idx.unwrap(); let start_idx = start_idx.unwrap();
match cobs::decode(&received_packet.as_slice()[start_idx..], decode_buf) { match cobs::decode(&received_packet.as_slice()[start_idx..], decode_buf) {
Ok(len) => { Ok(len) => {
info!(target: tgt, "Decoded packet length: {}", len); defmt::info!("Decoded packet length: {}", len);
let pus_tc = PusTc::from_bytes(decode_buf); let pus_tc = PusTcReader::new(decode_buf);
let verif_reporter = cx.local.verif_reporter; let verif_reporter = cx.local.verif_reporter;
match pus_tc { match pus_tc {
Ok((tc, tc_len)) => handle_tc( Ok((tc, tc_len)) => handle_tc(
@ -412,32 +425,27 @@ mod app {
verif_reporter, verif_reporter,
cx.local.src_data_buf, cx.local.src_data_buf,
cx.local.stamp_buf, cx.local.stamp_buf,
tgt,
), ),
Err(e) => { Err(_e) => {
warn!(target: tgt, "Error unpacking PUS TC: {}", e); // TODO: Print error after API rework.
defmt::warn!("Error unpacking PUS TC");
} }
} }
} }
Err(_) => { Err(_) => {
warn!( defmt::warn!("decoding error, can only process cobs encoded frames")
target: tgt,
"decoding error, can only process cobs encoded frames"
)
} }
} }
} }
fn handle_tc( fn handle_tc(
tc: PusTc, tc: PusTcReader,
tc_len: usize, tc_len: usize,
verif_reporter: &mut VerificationReporterCore, verif_reporter: &mut VerificationReporterCore,
src_data_buf: &mut [u8; MAX_TM_LEN], src_data_buf: &mut [u8; MAX_TM_LEN],
stamp_buf: &[u8; 7], stamp_buf: &[u8; 7],
tgt: &'static str,
) { ) {
info!( defmt::info!(
target: tgt,
"Found PUS TC [{},{}] with length {}", "Found PUS TC [{},{}] with length {}",
tc.service(), tc.service(),
tc.subservice(), tc.subservice(),
@ -446,38 +454,32 @@ mod app {
let token = verif_reporter.add_tc(&tc); let token = verif_reporter.add_tc(&tc);
if tc.apid() != PUS_APID { if tc.apid() != PUS_APID {
warn!(target: tgt, "Received tc with unknown APID {}", tc.apid()); defmt::warn!("Received tc with unknown APID {}", tc.apid());
let sendable = verif_reporter let sendable = verif_reporter
.acceptance_failure( .acceptance_failure(
src_data_buf, src_data_buf,
token, token,
&SEQ_COUNT_PROVIDER, SEQ_COUNT_PROVIDER.get(),
FailParams::new(stamp_buf, &EcssEnumU16::new(0), None), 0,
FailParams::new(stamp_buf, &EcssEnumU16::new(0), &[]),
) )
.unwrap(); .unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]); let sender = TmSender::new(TmPacket::new());
let mut sender = TmSender::new(mem_block, tgt); if let Err(_e) = verif_reporter.send_acceptance_failure(sendable, &sender) {
if let Err(e) = defmt::warn!("Sending acceptance failure failed");
verif_reporter.send_acceptance_failure(sendable, &SEQ_COUNT_PROVIDER, &mut sender)
{
warn!(target: tgt, "Sending acceptance failure failed: {:?}", e.0);
}; };
return; return;
} }
let sendable = verif_reporter let sendable = verif_reporter
.acceptance_success(src_data_buf, token, &SEQ_COUNT_PROVIDER, stamp_buf) .acceptance_success(src_data_buf, token, SEQ_COUNT_PROVIDER.get(), 0, stamp_buf)
.unwrap(); .unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]); let sender = TmSender::new(TmPacket::new());
let mut sender = TmSender::new(mem_block, tgt); let accepted_token = match verif_reporter.send_acceptance_success(sendable, &sender) {
let accepted_token = match verif_reporter.send_acceptance_success(
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
Ok(token) => token, Ok(token) => token,
Err(e) => { Err(_e) => {
warn!(target: "serial_rx_handler", "Sending acceptance success failed: {:?}", e.0); // TODO: Print error as soon as EcssTmtcError has Format attr.. or rework API.
defmt::warn!("Sending acceptance success failed");
return; return;
} }
}; };
@ -485,47 +487,51 @@ mod app {
if tc.service() == 17 { if tc.service() == 17 {
if tc.subservice() == 1 { if tc.subservice() == 1 {
let sendable = verif_reporter let sendable = verif_reporter
.start_success(src_data_buf, accepted_token, &SEQ_COUNT_PROVIDER, stamp_buf) .start_success(
src_data_buf,
accepted_token,
SEQ_COUNT_PROVIDER.get(),
0,
stamp_buf,
)
.unwrap(); .unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]); // let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]);
let mut sender = TmSender::new(mem_block, tgt); let sender = TmSender::new(TmPacket::new());
let started_token = match verif_reporter.send_start_success( let started_token = match verif_reporter.send_start_success(sendable, &sender) {
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
Ok(token) => token, Ok(token) => token,
Err(e) => { Err(_e) => {
warn!(target: tgt, "Sending acceptance success failed: {:?}", e.0); // TODO: Print error as soon as EcssTmtcError has Format attr.. or rework API.
defmt::warn!("Sending acceptance success failed");
return; return;
} }
}; };
info!( defmt::info!("Received PUS ping telecommand, sending ping reply TM[17,2]");
target: tgt,
"Received PUS ping telecommand, sending ping reply TM[17,2]"
);
let mut sp_header = let mut sp_header =
SpHeader::tc_unseg(PUS_APID, SEQ_COUNT_PROVIDER.get(), 0).unwrap(); SpHeader::tc_unseg(PUS_APID, SEQ_COUNT_PROVIDER.get(), 0).unwrap();
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, stamp_buf); let sec_header = PusTmSecondaryHeader::new_simple(17, 2, stamp_buf);
let ping_reply = PusTm::new(&mut sp_header, sec_header, None, true); let ping_reply = PusTmCreator::new(&mut sp_header, sec_header, &[], true);
let mut mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]); let mut tm_packet = TmPacket::new();
let reply_len = ping_reply.write_to_bytes(mem_block.as_mut_slice()).unwrap(); tm_packet
if TX_REQUESTS.enqueue((mem_block, reply_len)).is_err() { .resize(ping_reply.len_written(), 0)
warn!(target: tgt, "TC queue full"); .expect("vec resize failed");
ping_reply.write_to_bytes(&mut tm_packet).unwrap();
if TM_REQUESTS.enqueue(tm_packet).is_err() {
defmt::warn!("TC queue full");
return; return;
} }
SEQ_COUNT_PROVIDER.increment(); SEQ_COUNT_PROVIDER.increment();
let sendable = verif_reporter let sendable = verif_reporter
.completion_success(src_data_buf, started_token, &SEQ_COUNT_PROVIDER, stamp_buf) .completion_success(
src_data_buf,
started_token,
SEQ_COUNT_PROVIDER.get(),
0,
stamp_buf,
)
.unwrap(); .unwrap();
let mem_block = poolmod::TM::alloc().unwrap().init([0u8; MAX_TM_LEN]); let sender = TmSender::new(TmPacket::new());
let mut sender = TmSender::new(mem_block, tgt); if let Err(_e) = verif_reporter.send_step_or_completion_success(sendable, &sender) {
if let Err(e) = verif_reporter.send_step_or_completion_success( defmt::warn!("Sending completion success failed");
sendable,
&SEQ_COUNT_PROVIDER,
&mut sender,
) {
warn!(target: tgt, "Sending completion success failed: {:?}", e.0);
} }
} else { } else {
// TODO: Invalid subservice // TODO: Invalid subservice
@ -535,26 +541,26 @@ mod app {
#[task(binds = DMA1_CH6, shared = [rx_transfer])] #[task(binds = DMA1_CH6, shared = [rx_transfer])]
fn rx_dma_isr(mut cx: rx_dma_isr::Context) { fn rx_dma_isr(mut cx: rx_dma_isr::Context) {
let mut tc_packet = TcPacket::new();
cx.shared.rx_transfer.lock(|rx_transfer| { cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_ref = rx_transfer.as_ref().unwrap(); let rx_ref = rx_transfer.as_ref().unwrap();
if rx_ref.is_complete() { if rx_ref.is_complete() {
let uart_rx_owned = rx_transfer.take().unwrap(); let uart_rx_owned = rx_transfer.take().unwrap();
let (buf, c, rx) = uart_rx_owned.stop(); let (buf, c, rx) = uart_rx_owned.stop();
// The received data is transferred to another task now to avoid any processing overhead // The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a memory pool here // during the interrupt. There are multiple ways to do this, we use a stack allocaed vector here
// to do this. // to do this.
let mut mem_block = poolmod::TC::alloc() tc_packet.resize(buf.len(), 0).expect("vec resize failed");
.expect("allocating memory block for rx failed") tc_packet.copy_from_slice(buf);
.init([0u8; TC_BUF_LEN]);
// Copy data into memory pool. // Start the next transfer as soon as possible.
mem_block.copy_from_slice(buf);
*rx_transfer = Some(rx.read_exact(buf, c)); *rx_transfer = Some(rx.read_exact(buf, c));
// Only send owning pointer to pool memory and the received packet length.
serial_rx_handler::spawn(mem_block, TC_BUF_LEN) // Send the vector to a regular task.
.expect("spawning rx handler task failed"); serial_rx_handler::spawn(tc_packet).expect("spawning rx handler task failed");
// If this happens, there is a high chance that the maximum packet length was // If this happens, there is a high chance that the maximum packet length was
// exceeded. Circular mode is not used here, so data might be missed. // exceeded. Circular mode is not used here, so data might be missed.
warn!( defmt::warn!(
"rx transfer with maximum length {}, might miss data", "rx transfer with maximum length {}, might miss data",
TC_BUF_LEN TC_BUF_LEN
); );
@ -562,23 +568,26 @@ mod app {
}); });
} }
#[task(binds = USART2_EXTI26, shared = [rx_transfer, tx_transfer])] #[task(binds = USART2_EXTI26, shared = [rx_transfer, tx_shared])]
fn serial_isr(mut cx: serial_isr::Context) { fn serial_isr(mut cx: serial_isr::Context) {
cx.shared.tx_transfer.lock(|tx_state| match tx_state { cx.shared
UartTxState::Idle(_) => (), .tx_shared
UartTxState::Transmitting(transfer) => { .lock(|tx_shared| match &mut tx_shared.state {
let transfer_ref = transfer.as_ref().unwrap(); UartTxState::Idle(_) => (),
if transfer_ref.is_complete() { UartTxState::Transmitting(transfer) => {
let transfer = transfer.take().unwrap(); let transfer_ref = transfer.as_ref().unwrap();
let (_, dma_channel, tx) = transfer.stop(); if transfer_ref.is_complete() {
*tx_state = UartTxState::Idle(Some(TxIdle { tx, dma_channel })); let transfer = transfer.take().unwrap();
serial_tx_handler::spawn_after(MsDuration::from_ticks( let (_, dma_channel, mut tx) = transfer.stop();
MIN_DELAY_BETWEEN_TX_PACKETS_MS.into(), tx.clear_event(TxEvent::TransmissionComplete);
)) tx_shared.state = UartTxState::Idle(Some(TxIdle { tx, dma_channel }));
.unwrap(); // We cache the last completed time to ensure that there is a minimum delay between consecutive
// transferred packets.
tx_shared.last_completed = Some(Systick::now());
}
} }
} });
}); let mut tc_packet = TcPacket::new();
cx.shared.rx_transfer.lock(|rx_transfer| { cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_transfer_ref = rx_transfer.as_ref().unwrap(); let rx_transfer_ref = rx_transfer.as_ref().unwrap();
// Received a partial packet. // Received a partial packet.
@ -586,17 +595,14 @@ mod app {
let rx_transfer_owned = rx_transfer.take().unwrap(); let rx_transfer_owned = rx_transfer.take().unwrap();
let (buf, ch, mut rx, rx_len) = rx_transfer_owned.stop_and_return_received_bytes(); let (buf, ch, mut rx, rx_len) = rx_transfer_owned.stop_and_return_received_bytes();
// The received data is transferred to another task now to avoid any processing overhead // The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a memory pool here // during the interrupt. There are multiple ways to do this, we use a stack
// to do this. // allocated vector to do this.
let mut mem_block = poolmod::TC::alloc() tc_packet
.expect("allocating memory block for rx failed") .resize(rx_len as usize, 0)
.init([0u8; TC_BUF_LEN]); .expect("vec resize failed");
// Copy data into memory pool. tc_packet[0..rx_len as usize].copy_from_slice(&buf[0..rx_len as usize]);
mem_block[0..rx_len as usize].copy_from_slice(&buf[0..rx_len as usize]);
rx.clear_event(RxEvent::Idle); rx.clear_event(RxEvent::Idle);
// Only send owning pointer to pool memory and the received packet length. serial_rx_handler::spawn(tc_packet).expect("spawning rx handler failed");
serial_rx_handler::spawn(mem_block, rx_len as usize)
.expect("spawning rx handler task failed");
*rx_transfer = Some(rx.read_exact(buf, ch)); *rx_transfer = Some(rx.read_exact(buf, ch));
} }
}); });

View File

@ -5,8 +5,8 @@
// List of extensions which should be recommended for users of this workspace. // List of extensions which should be recommended for users of this workspace.
"recommendations": [ "recommendations": [
"rust-lang.rust", "rust-lang.rust",
"marus25.cortex-debug", "probe-rs.probe-rs-debugger"
], ],
// List of extensions recommended by VS Code that should not be recommended for users of this workspace. // List of extensions recommended by VS Code that should not be recommended for users of this workspace.
"unwantedRecommendations": [] "unwantedRecommendations": []
} }

View File

@ -0,0 +1,22 @@
{
"version": "0.2.0",
"configurations": [
{
"preLaunchTask": "${defaultBuildTask}",
"type": "probe-rs-debug",
"request": "launch",
"name": "probe-rs Debugging ",
"flashingConfig": {
"flashingEnabled": true
},
"chip": "STM32F303VCTx",
"coreConfigs": [
{
"programBinary": "${workspaceFolder}/target/thumbv7em-none-eabihf/debug/satrs-example-stm32f3-disco",
"rttEnabled": true,
"svdFile": "STM32F303.svd"
}
]
}
]
}

View File

@ -11,7 +11,8 @@ proc CDSWOConfigure { CDCPUFreqHz CDSWOFreqHz CDSWOOutput } {
# Alternative option: Pipe ITM output into itm.txt file # Alternative option: Pipe ITM output into itm.txt file
# tpiu config internal itm.txt uart off $CDCPUFreqHz # tpiu config internal itm.txt uart off $CDCPUFreqHz
# Default option so SWO display of VS code works. # Default option so SWO display of VS code works. Please note that this might not be required
# anymore starting at openocd v0.12.0
tpiu config internal $CDSWOOutput uart off $CDCPUFreqHz $CDSWOFreqHz tpiu config internal $CDSWOOutput uart off $CDCPUFreqHz $CDSWOFreqHz
itm port 0 on itm port 0 on
} }

View File

@ -1,6 +1,6 @@
[package] [package]
name = "satrs-example" name = "satrs-example"
version = "0.1.0" version = "0.1.1"
edition = "2021" edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"] authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
default-run = "satrs-example" default-run = "satrs-example"
@ -17,16 +17,23 @@ zerocopy = "0.6"
csv = "1" csv = "1"
num_enum = "0.7" num_enum = "0.7"
thiserror = "1" thiserror = "1"
lazy_static = "1"
strum = { version = "0.26", features = ["derive"] }
derive-new = "0.5" derive-new = "0.5"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
[dependencies.satrs] [dependencies.satrs]
# version = "0.1.1"
path = "../satrs" path = "../satrs"
features = ["test_util"]
[dependencies.satrs-mib] [dependencies.satrs-mib]
# version = "0.1.0" version = "0.1.1"
path = "../satrs-mib" path = "../satrs-mib"
[features] [features]
dyn_tmtc = [] dyn_tmtc = []
default = ["dyn_tmtc"] default = ["dyn_tmtc"]
[dev-dependencies]
env_logger = "0.11"

View File

@ -4,11 +4,12 @@ import dataclasses
import enum import enum
import struct import struct
from spacepackets.ecss.tc import PacketId, PacketType
EXAMPLE_PUS_APID = 0x02 class Apid(enum.IntEnum):
EXAMPLE_PUS_PACKET_ID_TM = PacketId(PacketType.TM, True, EXAMPLE_PUS_APID) SCHED = 1
TM_PACKET_IDS = [EXAMPLE_PUS_PACKET_ID_TM] GENERIC_PUS = 2
ACS = 3
CFDP = 4
class EventSeverity(enum.IntEnum): class EventSeverity(enum.IntEnum):
@ -36,8 +37,8 @@ class EventU32:
) )
class RequestTargetId(enum.IntEnum): class AcsId(enum.IntEnum):
ACS = 1 MGM_0 = 0
class AcsHkIds(enum.IntEnum): class AcsHkIds(enum.IntEnum):

View File

@ -3,10 +3,11 @@
import logging import logging
import sys import sys
import time import time
from typing import Optional from typing import Any, Optional
from prompt_toolkit.history import History from prompt_toolkit.history import History
from prompt_toolkit.history import FileHistory from prompt_toolkit.history import FileHistory
from spacepackets.ccsds import PacketId, PacketType
import tmtccmd import tmtccmd
from spacepackets.ecss import PusTelemetry, PusVerificator from spacepackets.ecss import PusTelemetry, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm from spacepackets.ecss.pus_17_test import Service17Tm
@ -16,7 +17,7 @@ from spacepackets.ccsds.time import CdsShortTimestamp
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest from tmtccmd.core.base import BackendRequest
from tmtccmd.pus import VerificationWrapper from tmtccmd.pus import VerificationWrapper
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase from tmtccmd.tmtc import CcsdsTmHandler, GenericApidHandlerBase
from tmtccmd.com import ComInterface from tmtccmd.com import ComInterface
from tmtccmd.config import ( from tmtccmd.config import (
CmdTreeNode, CmdTreeNode,
@ -46,7 +47,7 @@ from tmtccmd.util.obj_id import ObjectIdDictT
import pus_tc import pus_tc
from common import EXAMPLE_PUS_APID, TM_PACKET_IDS, EventU32 from common import Apid, EventU32
_LOGGER = logging.getLogger() _LOGGER = logging.getLogger()
@ -62,10 +63,13 @@ class SatRsConfigHook(HookBase):
) )
assert self.cfg_path is not None assert self.cfg_path is not None
packet_id_list = []
for apid in Apid:
packet_id_list.append(PacketId(PacketType.TM, True, apid))
cfg = create_com_interface_cfg_default( cfg = create_com_interface_cfg_default(
com_if_key=com_if_key, com_if_key=com_if_key,
json_cfg_path=self.cfg_path, json_cfg_path=self.cfg_path,
space_packet_ids=TM_PACKET_IDS, space_packet_ids=packet_id_list,
) )
assert cfg is not None assert cfg is not None
return create_com_interface_default(cfg) return create_com_interface_default(cfg)
@ -85,19 +89,19 @@ class SatRsConfigHook(HookBase):
return get_core_object_ids() return get_core_object_ids()
class PusHandler(SpecificApidHandlerBase): class PusHandler(GenericApidHandlerBase):
def __init__( def __init__(
self, self,
file_logger: logging.Logger, file_logger: logging.Logger,
verif_wrapper: VerificationWrapper, verif_wrapper: VerificationWrapper,
raw_logger: RawTmtcTimedLogWrapper, raw_logger: RawTmtcTimedLogWrapper,
): ):
super().__init__(EXAMPLE_PUS_APID, None) super().__init__(None)
self.file_logger = file_logger self.file_logger = file_logger
self.raw_logger = raw_logger self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: any): def handle_tm(self, apid: int, packet: bytes, _user_args: Any):
try: try:
pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty()) pus_tm = PusTelemetry.unpack(packet, time_reader=CdsShortTimestamp.empty())
except ValueError as e: except ValueError as e:
@ -177,7 +181,7 @@ class TcHandler(TcHandlerBase):
tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE, tc_sched_timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE,
seq_cnt_provider=seq_count_provider, seq_cnt_provider=seq_count_provider,
pus_verificator=self.verif_wrapper.pus_verificator, pus_verificator=self.verif_wrapper.pus_verificator,
default_pus_apid=EXAMPLE_PUS_APID, default_pus_apid=None,
) )
def send_cb(self, send_params: SendCbParams): def send_cb(self, send_params: SendCbParams):
@ -221,7 +225,6 @@ def main():
post_args_wrapper.set_params_without_prompts(proc_wrapper) post_args_wrapper.set_params_without_prompts(proc_wrapper)
else: else:
post_args_wrapper.set_params_with_prompts(proc_wrapper) post_args_wrapper.set_params_with_prompts(proc_wrapper)
params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper( setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
) )
@ -233,8 +236,9 @@ def main():
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger) verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler # Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger) tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None) ccsds_handler = CcsdsTmHandler(generic_handler=tm_handler)
ccsds_handler.add_apid_handler(tm_handler) # TODO: We could add the CFDP handler for the CFDP APID at a later stage.
# ccsds_handler.add_apid_handler(tm_handler)
# Create TC handler # Create TC handler
seq_count_provider = PusFileSeqCountProvider() seq_count_provider = PusFileSeqCountProvider()

View File

@ -1,27 +1,58 @@
import datetime import datetime
import struct
import logging import logging
from spacepackets.ccsds import CdsShortTimestamp from spacepackets.ccsds import CdsShortTimestamp
from spacepackets.ecss import PusTelecommand from spacepackets.ecss import PusTelecommand
from tmtccmd.config import CmdTreeNode from tmtccmd.config import CmdTreeNode
from tmtccmd.pus.tc.s200_fsfw_mode import Mode
from tmtccmd.tmtc import DefaultPusQueueHelper from tmtccmd.tmtc import DefaultPusQueueHelper
from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd from tmtccmd.pus.s11_tc_sched import create_time_tagged_cmd
from tmtccmd.pus.tc.s3_fsfw_hk import create_request_one_hk_command from tmtccmd.pus.s200_fsfw_mode import Subservice as ModeSubservice
from common import ( from common import AcsId, Apid
EXAMPLE_PUS_APID,
make_addressable_id,
RequestTargetId,
AcsHkIds,
)
_LOGGER = logging.getLogger(__name__) _LOGGER = logging.getLogger(__name__)
def create_set_mode_cmd(
apid: int, unique_id: int, mode: int, submode: int
) -> PusTelecommand:
app_data = bytearray()
app_data.extend(struct.pack("!I", unique_id))
app_data.extend(struct.pack("!I", mode))
app_data.extend(struct.pack("!H", submode))
return PusTelecommand(
service=200,
subservice=ModeSubservice.TC_MODE_COMMAND,
apid=apid,
app_data=app_data,
)
def create_cmd_definition_tree() -> CmdTreeNode: def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node() root_node = CmdTreeNode.root_node()
hk_node = CmdTreeNode("hk", "Housekeeping Node", hide_children_for_print=True)
hk_node.add_child(CmdTreeNode("one_shot_hk", "Request One Shot HK set"))
hk_node.add_child(
CmdTreeNode("enable", "Enable periodic housekeeping data generation")
)
hk_node.add_child(
CmdTreeNode("disable", "Disable periodic housekeeping data generation")
)
mode_node = CmdTreeNode("mode", "Mode Node", hide_children_for_print=True)
set_mode_node = CmdTreeNode(
"set_mode", "Set Node", hide_children_which_are_leaves=True
)
set_mode_node.add_child(CmdTreeNode("off", "Set OFF Mode"))
set_mode_node.add_child(CmdTreeNode("on", "Set ON Mode"))
set_mode_node.add_child(CmdTreeNode("normal", "Set NORMAL Mode"))
mode_node.add_child(set_mode_node)
mode_node.add_child(CmdTreeNode("read_mode", "Read Mode"))
test_node = CmdTreeNode("test", "Test Node") test_node = CmdTreeNode("test", "Test Node")
test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC")) test_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event")) test_node.add_child(CmdTreeNode("trigger_event", "Send PUS test to trigger event"))
@ -37,7 +68,9 @@ def create_cmd_definition_tree() -> CmdTreeNode:
acs_node = CmdTreeNode("acs", "ACS Subsystem Node") acs_node = CmdTreeNode("acs", "ACS Subsystem Node")
mgm_node = CmdTreeNode("mgms", "MGM devices node") mgm_node = CmdTreeNode("mgms", "MGM devices node")
mgm_node.add_child(CmdTreeNode("one_shot_hk", "Request one shot HK")) mgm_node.add_child(mode_node)
mgm_node.add_child(hk_node)
acs_node.add_child(mgm_node) acs_node.add_child(mgm_node)
root_node.add_child(acs_node) root_node.add_child(acs_node)
@ -54,10 +87,14 @@ def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
assert len(cmd_path_list) >= 2 assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "ping": if cmd_path_list[1] == "ping":
q.add_log_cmd("Sending PUS ping telecommand") q.add_log_cmd("Sending PUS ping telecommand")
return q.add_pus_tc(PusTelecommand(service=17, subservice=1)) return q.add_pus_tc(
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=1)
)
elif cmd_path_list[1] == "trigger_event": elif cmd_path_list[1] == "trigger_event":
q.add_log_cmd("Triggering test event") q.add_log_cmd("Triggering test event")
return q.add_pus_tc(PusTelecommand(service=17, subservice=128)) return q.add_pus_tc(
PusTelecommand(apid=Apid.GENERIC_PUS, service=17, subservice=128)
)
if cmd_path_list[0] == "scheduler": if cmd_path_list[0] == "scheduler":
assert len(cmd_path_list) >= 2 assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "schedule_ping_10_secs_ahead": if cmd_path_list[1] == "schedule_ping_10_secs_ahead":
@ -69,17 +106,38 @@ def pack_pus_telecommands(q: DefaultPusQueueHelper, cmd_path: str):
create_time_tagged_cmd( create_time_tagged_cmd(
time_stamp, time_stamp,
PusTelecommand(service=17, subservice=1), PusTelecommand(service=17, subservice=1),
apid=EXAMPLE_PUS_APID, apid=Apid.SCHED,
) )
) )
if cmd_path_list[0] == "acs": if cmd_path_list[0] == "acs":
assert len(cmd_path_list) >= 2 assert len(cmd_path_list) >= 2
if cmd_path_list[1] == "mgm": if cmd_path_list[1] == "mgms":
assert len(cmd_path_list) >= 3 assert len(cmd_path_list) >= 3
if cmd_path_list[2] == "one_shot_hk": if cmd_path_list[2] == "hk":
q.add_log_cmd("Sending HK one shot request") if cmd_path_list[3] == "one_shot_hk":
q.add_pus_tc( q.add_log_cmd("Sending HK one shot request")
create_request_one_hk_command( # TODO: Fix
make_addressable_id(RequestTargetId.ACS, AcsHkIds.MGM_SET) # q.add_pus_tc(
# create_request_one_hk_command(
# make_addressable_id(Apid.ACS, AcsId.MGM_SET)
# )
# )
if cmd_path_list[2] == "mode":
if cmd_path_list[3] == "set_mode":
handle_set_mode_cmd(
q, "MGM 0", cmd_path_list[4], Apid.ACS, AcsId.MGM_0
) )
)
def handle_set_mode_cmd(
q: DefaultPusQueueHelper, target_str: str, mode_str: str, apid: int, unique_id: int
):
if mode_str == "off":
q.add_log_cmd(f"Sending Mode OFF to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.OFF, 0))
elif mode_str == "on":
q.add_log_cmd(f"Sending Mode ON to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.ON, 0))
elif mode_str == "normal":
q.add_log_cmd(f"Sending Mode NORMAL to {target_str}")
q.add_pus_tc(create_set_mode_cmd(apid, unique_id, Mode.NORMAL, 0))

View File

@ -1,118 +0,0 @@
use std::sync::mpsc::{self, TryRecvError};
use log::{info, warn};
use satrs::pus::verification::{VerificationReporterWithSender, VerificationReportingProvider};
use satrs::pus::{EcssTmSender, PusTmWrapper};
use satrs::request::TargetAndApidId;
use satrs::spacepackets::ecss::hk::Subservice as HkSubservice;
use satrs::{
hk::HkRequest,
spacepackets::{
ecss::tm::{PusTmCreator, PusTmSecondaryHeader},
time::cds::{DaysLen16Bits, TimeProvider},
SequenceFlags, SpHeader,
},
};
use satrs_example::config::{RequestTargetId, PUS_APID};
use crate::{
hk::{AcsHkIds, HkUniqueId},
requests::{Request, RequestWithToken},
update_time,
};
pub struct AcsTask {
timestamp: [u8; 7],
time_provider: TimeProvider<DaysLen16Bits>,
verif_reporter: VerificationReporterWithSender,
tm_sender: Box<dyn EcssTmSender>,
request_rx: mpsc::Receiver<RequestWithToken>,
}
impl AcsTask {
pub fn new(
tm_sender: impl EcssTmSender,
request_rx: mpsc::Receiver<RequestWithToken>,
verif_reporter: VerificationReporterWithSender,
) -> Self {
Self {
timestamp: [0; 7],
time_provider: TimeProvider::new_with_u16_days(0, 0),
verif_reporter,
tm_sender: Box::new(tm_sender),
request_rx,
}
}
fn handle_hk_request(&mut self, target_id: u32, unique_id: u32) {
assert_eq!(target_id, RequestTargetId::AcsSubsystem as u32);
if unique_id == AcsHkIds::TestMgmSet as u32 {
let mut sp_header = SpHeader::tm(PUS_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTmSecondaryHeader::new_simple(
3,
HkSubservice::TmHkPacket as u8,
&self.timestamp,
);
let mut buf: [u8; 8] = [0; 8];
let hk_id = HkUniqueId::new(target_id, unique_id);
hk_id.write_to_be_bytes(&mut buf).unwrap();
let pus_tm = PusTmCreator::new(&mut sp_header, sec_header, &buf, true);
self.tm_sender
.send_tm(PusTmWrapper::Direct(pus_tm))
.expect("Sending HK TM failed");
}
// TODO: Verification failure for invalid unique IDs.
}
pub fn try_reading_one_request(&mut self) -> bool {
match self.request_rx.try_recv() {
Ok(request) => {
info!(
"ACS thread: Received HK request {:?}",
request.targeted_request
);
let target_and_apid_id = TargetAndApidId::from(request.targeted_request.target_id);
match request.targeted_request.request {
Request::Hk(hk_req) => match hk_req {
HkRequest::OneShot(unique_id) => {
self.handle_hk_request(target_and_apid_id.target(), unique_id)
}
HkRequest::Enable(_) => {}
HkRequest::Disable(_) => {}
HkRequest::ModifyCollectionInterval(_, _) => {}
},
Request::Mode(_mode_req) => {
warn!("mode request handling not implemented yet")
}
Request::Action(_action_req) => {
warn!("action request handling not implemented yet")
}
}
let started_token = self
.verif_reporter
.start_success(request.token, &self.timestamp)
.expect("Sending start success failed");
self.verif_reporter
.completion_success(started_token, &self.timestamp)
.expect("Sending completion success failed");
true
}
Err(e) => match e {
TryRecvError::Empty => false,
TryRecvError::Disconnected => {
warn!("ACS thread: Message Queue TX disconnected!");
false
}
},
}
}
pub fn periodic_operation(&mut self) {
update_time(&mut self.time_provider, &mut self.timestamp);
loop {
if !self.try_reading_one_request() {
break;
}
}
}
}

View File

@ -0,0 +1,284 @@
use derive_new::new;
use satrs::hk::{HkRequest, HkRequestVariant};
use satrs::queue::{GenericSendError, GenericTargetedMessagingError};
use satrs::spacepackets::ecss::hk;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::SpHeader;
use satrs_example::{DeviceMode, TimeStampHelper};
use std::sync::mpsc::{self};
use std::sync::{Arc, Mutex};
use satrs::mode::{
ModeAndSubmode, ModeError, ModeProvider, ModeReply, ModeRequest, ModeRequestHandler,
};
use satrs::pus::{EcssTmSenderCore, PusTmVariant};
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs_example::config::components::PUS_MODE_SERVICE;
use crate::pus::hk::{HkReply, HkReplyVariant};
use crate::requests::CompositeRequest;
use serde::{Deserialize, Serialize};
const GAUSS_TO_MICROTESLA_FACTOR: f32 = 100.0;
// This is the selected resoltion for the STM LIS3MDL device for the 4 Gauss sensitivity setting.
const FIELD_LSB_PER_GAUSS_4_SENS: f32 = 1.0 / 6842.0;
pub trait SpiInterface {
type Error;
fn transfer(&mut self, tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error>;
}
#[derive(Default)]
pub struct SpiDummyInterface {
pub dummy_val_0: i16,
pub dummy_val_1: i16,
pub dummy_val_2: i16,
}
impl SpiInterface for SpiDummyInterface {
type Error = ();
fn transfer(&mut self, _tx: &[u8], rx: &mut [u8]) -> Result<(), Self::Error> {
rx[0..2].copy_from_slice(&self.dummy_val_0.to_be_bytes());
rx[2..4].copy_from_slice(&self.dummy_val_1.to_be_bytes());
rx[4..6].copy_from_slice(&self.dummy_val_2.to_be_bytes());
Ok(())
}
}
#[derive(Default, Debug, Copy, Clone, Serialize, Deserialize)]
pub struct MgmData {
pub valid: bool,
pub x: f32,
pub y: f32,
pub z: f32,
}
pub struct MpscModeLeafInterface {
pub request_rx: mpsc::Receiver<GenericMessage<ModeRequest>>,
pub reply_tx_to_pus: mpsc::Sender<GenericMessage<ModeReply>>,
pub reply_tx_to_parent: mpsc::Sender<GenericMessage<ModeReply>>,
}
/// Example MGM device handler strongly based on the LIS3MDL MEMS device.
#[derive(new)]
#[allow(clippy::too_many_arguments)]
pub struct MgmHandlerLis3Mdl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> {
id: UniqueApidTargetId,
dev_str: &'static str,
mode_interface: MpscModeLeafInterface,
composite_request_receiver: mpsc::Receiver<GenericMessage<CompositeRequest>>,
hk_reply_sender: mpsc::Sender<GenericMessage<HkReply>>,
tm_sender: TmSender,
com_interface: ComInterface,
shared_mgm_set: Arc<Mutex<MgmData>>,
#[new(value = "ModeAndSubmode::new(satrs_example::DeviceMode::Off as u32, 0)")]
mode_and_submode: ModeAndSubmode,
#[new(default)]
tx_buf: [u8; 12],
#[new(default)]
rx_buf: [u8; 12],
#[new(default)]
tm_buf: [u8; 16],
#[new(default)]
stamp_helper: TimeStampHelper,
}
impl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore>
MgmHandlerLis3Mdl<ComInterface, TmSender>
{
pub fn periodic_operation(&mut self) {
self.stamp_helper.update_from_now();
// Handle requests.
self.handle_composite_requests();
self.handle_mode_requests();
if self.mode() == DeviceMode::Normal as u32 {
log::trace!("polling LIS3MDL sensor {}", self.dev_str);
// Communicate with the device.
let result = self.com_interface.transfer(&self.tx_buf, &mut self.rx_buf);
assert!(result.is_ok());
// Actual data begins on the second byte, similarly to how a lot of SPI devices behave.
let x_raw = i16::from_be_bytes(self.rx_buf[1..3].try_into().unwrap());
let y_raw = i16::from_be_bytes(self.rx_buf[3..5].try_into().unwrap());
let z_raw = i16::from_be_bytes(self.rx_buf[5..7].try_into().unwrap());
// Simple scaling to retrieve the float value, assuming a sensor resolution of
let mut mgm_guard = self.shared_mgm_set.lock().unwrap();
mgm_guard.x = x_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
mgm_guard.y = y_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
mgm_guard.z = z_raw as f32 * GAUSS_TO_MICROTESLA_FACTOR * FIELD_LSB_PER_GAUSS_4_SENS;
drop(mgm_guard);
}
}
pub fn handle_composite_requests(&mut self) {
loop {
match self.composite_request_receiver.try_recv() {
Ok(ref msg) => match &msg.message {
CompositeRequest::Hk(hk_request) => {
self.handle_hk_request(&msg.requestor_info, hk_request)
}
// TODO: This object does not have actions (yet).. Still send back completion failure
// reply.
CompositeRequest::Action(_action_req) => {}
},
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::warn!(
"{}: failed to receive composite request: {:?}",
self.dev_str,
e
);
} else {
break;
}
}
}
}
}
pub fn handle_hk_request(&mut self, requestor_info: &MessageMetadata, hk_request: &HkRequest) {
match hk_request.variant {
HkRequestVariant::OneShot => {
self.hk_reply_sender
.send(GenericMessage::new(
*requestor_info,
HkReply::new(hk_request.unique_id, HkReplyVariant::Ack),
))
.expect("failed to send HK reply");
let sec_header = PusTmSecondaryHeader::new(
3,
hk::Subservice::TmHkPacket as u8,
0,
0,
self.stamp_helper.stamp(),
);
let mgm_snapshot = *self.shared_mgm_set.lock().unwrap();
// Use binary serialization here. We want the data to be tightly packed.
self.tm_buf[0] = mgm_snapshot.valid as u8;
self.tm_buf[1..5].copy_from_slice(&mgm_snapshot.x.to_be_bytes());
self.tm_buf[5..9].copy_from_slice(&mgm_snapshot.y.to_be_bytes());
self.tm_buf[9..13].copy_from_slice(&mgm_snapshot.z.to_be_bytes());
let hk_tm = PusTmCreator::new(
SpHeader::new_from_apid(self.id.apid),
sec_header,
&self.tm_buf[0..12],
true,
);
self.tm_sender
.send_tm(self.id.id(), PusTmVariant::Direct(hk_tm))
.expect("failed to send HK TM");
}
HkRequestVariant::EnablePeriodic => todo!(),
HkRequestVariant::DisablePeriodic => todo!(),
HkRequestVariant::ModifyCollectionInterval(_) => todo!(),
}
}
pub fn handle_mode_requests(&mut self) {
loop {
// TODO: Only allow one set mode request per cycle?
match self.mode_interface.request_rx.try_recv() {
Ok(msg) => {
let result = self.handle_mode_request(msg);
// TODO: Trigger event?
if result.is_err() {
log::warn!(
"{}: mode request failed with error {:?}",
self.dev_str,
result.err().unwrap()
);
}
}
Err(e) => {
if e != mpsc::TryRecvError::Empty {
log::warn!("{}: failed to receive mode request: {:?}", self.dev_str, e);
} else {
break;
}
}
}
}
}
}
impl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> ModeProvider
for MgmHandlerLis3Mdl<ComInterface, TmSender>
{
fn mode_and_submode(&self) -> ModeAndSubmode {
self.mode_and_submode
}
}
impl<ComInterface: SpiInterface, TmSender: EcssTmSenderCore> ModeRequestHandler
for MgmHandlerLis3Mdl<ComInterface, TmSender>
{
type Error = ModeError;
fn start_transition(
&mut self,
requestor: MessageMetadata,
mode_and_submode: ModeAndSubmode,
) -> Result<(), satrs::mode::ModeError> {
log::info!(
"{}: transitioning to mode {:?}",
self.dev_str,
mode_and_submode
);
self.mode_and_submode = mode_and_submode;
self.handle_mode_reached(Some(requestor))?;
Ok(())
}
fn announce_mode(&self, _requestor_info: Option<MessageMetadata>, _recursive: bool) {
log::info!(
"{} announcing mode: {:?}",
self.dev_str,
self.mode_and_submode
);
}
fn handle_mode_reached(
&mut self,
requestor: Option<MessageMetadata>,
) -> Result<(), Self::Error> {
self.announce_mode(requestor, false);
if let Some(requestor) = requestor {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
);
} else {
self.send_mode_reply(requestor, ModeReply::ModeReply(self.mode_and_submode()))?;
}
}
Ok(())
}
fn send_mode_reply(
&self,
requestor: MessageMetadata,
reply: ModeReply,
) -> Result<(), Self::Error> {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
);
}
self.mode_interface
.reply_tx_to_pus
.send(GenericMessage::new(requestor, reply))
.map_err(|_| GenericTargetedMessagingError::Send(GenericSendError::RxDisconnected))?;
Ok(())
}
fn handle_mode_info(
&mut self,
_requestor_info: MessageMetadata,
_info: ModeAndSubmode,
) -> Result<(), Self::Error> {
Ok(())
}
}

View File

@ -0,0 +1 @@
pub mod mgm;

View File

@ -12,8 +12,7 @@ use std::time::Duration;
fn main() { fn main() {
let mut buf = [0; 32]; let mut buf = [0; 32];
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); let pus_tc = PusTcCreator::new_simple(SpHeader::new_from_apid(0x02), 17, 1, &[], true);
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true);
let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed"); let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed");
let tc_req_id = RequestId::new(&pus_tc); let tc_req_id = RequestId::new(&pus_tc);
println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}"); println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}");

View File

@ -1,7 +1,9 @@
use satrs::pus::ReceivesEcssPusTc; use satrs::pus::ReceivesEcssPusTc;
use satrs::spacepackets::{CcsdsPacket, SpHeader}; use satrs::spacepackets::{CcsdsPacket, SpHeader};
use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc}; use satrs::tmtc::{CcsdsPacketHandler, ReceivesCcsdsTc};
use satrs_example::config::PUS_APID; use satrs::ValidatorU16Id;
use satrs_example::config::components::Apid;
use satrs_example::config::APID_VALIDATOR;
#[derive(Clone)] #[derive(Clone)]
pub struct CcsdsReceiver< pub struct CcsdsReceiver<
@ -11,6 +13,16 @@ pub struct CcsdsReceiver<
pub tc_source: TcSource, pub tc_source: TcSource,
} }
impl<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
E: 'static,
> ValidatorU16Id for CcsdsReceiver<TcSource, E>
{
fn validate(&self, apid: u16) -> bool {
APID_VALIDATOR.contains(&apid)
}
}
impl< impl<
TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static, TcSource: ReceivesCcsdsTc<Error = E> + ReceivesEcssPusTc<Error = E> + Clone + 'static,
E: 'static, E: 'static,
@ -18,27 +30,24 @@ impl<
{ {
type Error = E; type Error = E;
fn valid_apids(&self) -> &'static [u16] { fn handle_packet_with_valid_apid(
&[PUS_APID]
}
fn handle_known_apid(
&mut self, &mut self,
sp_header: &SpHeader, sp_header: &SpHeader,
tc_raw: &[u8], tc_raw: &[u8],
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
if sp_header.apid() == PUS_APID { if sp_header.apid() == Apid::Cfdp as u16 {
} else {
return self.tc_source.pass_ccsds(sp_header, tc_raw); return self.tc_source.pass_ccsds(sp_header, tc_raw);
} }
Ok(()) Ok(())
} }
fn handle_unknown_apid( fn handle_packet_with_unknown_apid(
&mut self, &mut self,
sp_header: &SpHeader, sp_header: &SpHeader,
_tc_raw: &[u8], _tc_raw: &[u8],
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
println!("Unknown APID 0x{:x?} detected", sp_header.apid()); log::warn!("unknown APID 0x{:x?} detected", sp_header.apid());
Ok(()) Ok(())
} }
} }

View File

@ -1,7 +1,12 @@
use satrs::res_code::ResultU16; use lazy_static::lazy_static;
use satrs::{
res_code::ResultU16,
spacepackets::{PacketId, PacketType},
};
use satrs_mib::res_code::ResultU16Info; use satrs_mib::res_code::ResultU16Info;
use satrs_mib::resultcode; use satrs_mib::resultcode;
use std::net::Ipv4Addr; use std::{collections::HashSet, net::Ipv4Addr};
use strum::IntoEnumIterator;
use num_enum::{IntoPrimitive, TryFromPrimitive}; use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::{ use satrs::{
@ -9,8 +14,6 @@ use satrs::{
pool::{StaticMemoryPool, StaticPoolConfig}, pool::{StaticMemoryPool, StaticPoolConfig},
}; };
pub const PUS_APID: u16 = 0x02;
#[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)] #[derive(Copy, Clone, PartialEq, Eq, Debug, TryFromPrimitive, IntoPrimitive)]
#[repr(u8)] #[repr(u8)]
pub enum CustomPusServiceId { pub enum CustomPusServiceId {
@ -29,6 +32,7 @@ pub const AOCS_APID: u16 = 1;
pub enum GroupId { pub enum GroupId {
Tmtc = 0, Tmtc = 0,
Hk = 1, Hk = 1,
Mode = 2,
} }
pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED; pub const OBSW_SERVER_ADDR: Ipv4Addr = Ipv4Addr::UNSPECIFIED;
@ -37,6 +41,23 @@ pub const SERVER_PORT: u16 = 7301;
pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(0, 0); EventU32TypedSev::<SeverityInfo>::const_new(0, 0);
lazy_static! {
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, id as u16));
}
set
};
pub static ref APID_VALIDATOR: HashSet<u16> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(id as u16);
}
set
};
}
pub mod tmtc_err { pub mod tmtc_err {
use super::*; use super::*;
@ -53,6 +74,8 @@ pub mod tmtc_err {
pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 4); pub const UNKNOWN_TARGET_ID: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 4);
#[resultcode] #[resultcode]
pub const ROUTING_ERROR: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 5); pub const ROUTING_ERROR: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 5);
#[resultcode(info = "Request timeout for targeted PUS request. P1: Request ID. P2: Target ID")]
pub const REQUEST_TIMEOUT: ResultU16 = ResultU16::new(GroupId::Tmtc as u8, 6);
#[resultcode( #[resultcode(
info = "Not enough data inside the TC application data field. Optionally includes: \ info = "Not enough data inside the TC application data field. Optionally includes: \
@ -92,27 +115,59 @@ pub mod hk_err {
]; ];
} }
#[allow(clippy::enum_variant_names)] pub mod mode_err {
#[derive(Copy, Clone, PartialEq, Eq)] use super::*;
pub enum TmSenderId {
PusVerification = 0, #[resultcode]
PusTest = 1, pub const WRONG_MODE: ResultU16 = ResultU16::new(GroupId::Mode as u8, 0);
PusEvent = 2,
PusHk = 3,
PusAction = 4,
PusSched = 5,
AllEvents = 6,
AcsSubsystem = 7,
} }
#[derive(Copy, Clone, PartialEq, Eq)] pub mod components {
pub enum TcReceiverId { use satrs::request::UniqueApidTargetId;
PusTest = 1, use strum::EnumIter;
PusEvent = 2,
PusHk = 3, #[derive(Copy, Clone, PartialEq, Eq, EnumIter)]
PusAction = 4, pub enum Apid {
PusSched = 5, Sched = 1,
GenericPus = 2,
Acs = 3,
Cfdp = 4,
}
// Component IDs for components with the PUS APID.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum PusId {
PusEventManagement = 0,
PusRouting = 1,
PusTest = 2,
PusAction = 3,
PusMode = 4,
PusHk = 5,
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum AcsId {
Mgm0 = 0,
}
pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32);
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, 0);
pub const PUS_ROUTING_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32);
pub const PUS_TEST_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32);
pub const PUS_MODE_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32);
pub const PUS_HK_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32);
pub const PUS_SCHED_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Sched as u16, 0);
pub const MGM_HANDLER_0: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
} }
pub mod pool { pub mod pool {
use super::*; use super::*;
pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) { pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) {

View File

@ -1,71 +1,87 @@
use std::sync::mpsc::{self, SendError}; use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter;
use satrs::event_man::{EventMessageU32, EventRoutingError};
use satrs::params::WritableToBeBytes;
use satrs::pus::event::EventTmHookProvider;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::EcssTmSenderCore;
use satrs::request::UniqueApidTargetId;
use satrs::{ use satrs::{
event_man::{ event_man::{
EventManager, EventManagerWithMpscQueue, MpscEventReceiver, MpscEventU32SendProvider, EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded,
SendEventProvider, MpscEventReceiver,
}, },
events::EventU32,
params::Params,
pus::{ pus::{
event_man::{ event_man::{
DefaultPusMgmtBackendProvider, EventReporter, EventRequest, EventRequestWithToken, DefaultPusEventU32Dispatcher, EventReporter, EventRequest, EventRequestWithToken,
PusEventDispatcher,
}, },
verification::{ verification::{TcStateStarted, VerificationReportingProvider, VerificationToken},
TcStateStarted, VerificationReporterWithSender, VerificationReportingProvider,
VerificationToken,
},
EcssTmSender,
}, },
spacepackets::time::cds::{self, TimeProvider}, spacepackets::time::cds::CdsTime,
}; };
use satrs_example::config::PUS_APID; use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
use crate::update_time; use crate::update_time;
pub type MpscEventManager = EventManager<SendError<(EventU32, Option<Params>)>>; // This helper sets the APID of the event sender for the PUS telemetry.
#[derive(Default)]
pub struct PusEventHandler { pub struct EventApidSetter {
event_request_rx: mpsc::Receiver<EventRequestWithToken>, pub next_apid: u16,
pus_event_dispatcher: PusEventDispatcher<(), EventU32>,
pus_event_man_rx: mpsc::Receiver<(EventU32, Option<Params>)>,
tm_sender: Box<dyn EcssTmSender>,
time_provider: TimeProvider,
timestamp: [u8; 7],
verif_handler: VerificationReporterWithSender,
} }
/*
*/
impl PusEventHandler { impl EventTmHookProvider for EventApidSetter {
fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) {
tm.set_apid(self.next_apid);
}
}
/// The PUS event handler subscribes for all events and converts them into ECSS PUS 5 event
/// packets. It also handles the verification completion of PUS event service requests.
pub struct PusEventHandler<TmSender: EcssTmSenderCore> {
event_request_rx: mpsc::Receiver<EventRequestWithToken>,
pus_event_dispatcher: DefaultPusEventU32Dispatcher<()>,
pus_event_man_rx: mpsc::Receiver<EventMessageU32>,
tm_sender: TmSender,
time_provider: CdsTime,
timestamp: [u8; 7],
verif_handler: VerificationReporter,
event_apid_setter: EventApidSetter,
}
impl<TmSender: EcssTmSenderCore> PusEventHandler<TmSender> {
pub fn new( pub fn new(
verif_handler: VerificationReporterWithSender, tm_sender: TmSender,
event_manager: &mut MpscEventManager, verif_handler: VerificationReporter,
event_manager: &mut EventManagerWithBoundedMpsc,
event_request_rx: mpsc::Receiver<EventRequestWithToken>, event_request_rx: mpsc::Receiver<EventRequestWithToken>,
tm_sender: impl EcssTmSender,
) -> Self { ) -> Self {
let (pus_event_man_tx, pus_event_man_rx) = mpsc::channel(); let event_queue_cap = 30;
let (pus_event_man_tx, pus_event_man_rx) = mpsc::sync_channel(event_queue_cap);
// All events sent to the manager are routed to the PUS event manager, which generates PUS event // All events sent to the manager are routed to the PUS event manager, which generates PUS event
// telemetry for each event. // telemetry for each event.
let event_reporter = EventReporter::new(PUS_APID, 128).unwrap(); let event_reporter = EventReporter::new(PUS_EVENT_MANAGEMENT.raw(), 0, 0, 128).unwrap();
let pus_tm_backend = DefaultPusMgmtBackendProvider::<EventU32>::default();
let pus_event_dispatcher = let pus_event_dispatcher =
PusEventDispatcher::new(event_reporter, Box::new(pus_tm_backend)); DefaultPusEventU32Dispatcher::new_with_default_backend(event_reporter);
let pus_event_man_send_provider = MpscEventU32SendProvider::new(1, pus_event_man_tx); let pus_event_man_send_provider = EventU32SenderMpscBounded::new(
PUS_EVENT_MANAGEMENT.raw(),
pus_event_man_tx,
event_queue_cap,
);
event_manager.subscribe_all(pus_event_man_send_provider.id()); event_manager.subscribe_all(pus_event_man_send_provider.target_id());
event_manager.add_sender(pus_event_man_send_provider); event_manager.add_sender(pus_event_man_send_provider);
Self { Self {
event_request_rx, event_request_rx,
pus_event_dispatcher, pus_event_dispatcher,
pus_event_man_rx, pus_event_man_rx,
time_provider: cds::TimeProvider::new_with_u16_days(0, 0), time_provider: CdsTime::new_with_u16_days(0, 0),
timestamp: [0; 7], timestamp: [0; 7],
verif_handler, verif_handler,
tm_sender: Box::new(tm_sender), tm_sender,
event_apid_setter: EventApidSetter::default(),
} }
} }
@ -76,7 +92,7 @@ impl PusEventHandler {
.try_into() .try_into()
.expect("expected start verification token"); .expect("expected start verification token");
self.verif_handler self.verif_handler
.completion_success(started_token, timestamp) .completion_success(&self.tm_sender, started_token, timestamp)
.expect("Sending completion success failed"); .expect("Sending completion success failed");
}; };
// handle event requests // handle event requests
@ -102,23 +118,29 @@ impl PusEventHandler {
pub fn generate_pus_event_tm(&mut self) { pub fn generate_pus_event_tm(&mut self) {
// Perform the generation of PUS event packets // Perform the generation of PUS event packets
if let Ok((event, _param)) = self.pus_event_man_rx.try_recv() { if let Ok(event_msg) = self.pus_event_man_rx.try_recv() {
update_time(&mut self.time_provider, &mut self.timestamp); update_time(&mut self.time_provider, &mut self.timestamp);
let param_vec = event_msg.params().map_or(Vec::new(), |param| {
param.to_vec().expect("failed to convert params to vec")
});
self.event_apid_setter.next_apid = UniqueApidTargetId::from(event_msg.sender_id()).apid;
self.pus_event_dispatcher self.pus_event_dispatcher
.generate_pus_event_tm_generic( .generate_pus_event_tm_generic(
self.tm_sender.upcast_mut(), &self.tm_sender,
&self.timestamp, &self.timestamp,
event, event_msg.event(),
None, Some(&param_vec),
) )
.expect("Sending TM as event failed"); .expect("Sending TM as event failed");
} }
} }
} }
/// This is a thin wrapper around the event manager which also caches the sender component
/// used to send events to the event manager.
pub struct EventManagerWrapper { pub struct EventManagerWrapper {
event_manager: MpscEventManager, event_manager: EventManagerWithBoundedMpsc,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>, event_sender: mpsc::Sender<EventMessageU32>,
} }
impl EventManagerWrapper { impl EventManagerWrapper {
@ -126,46 +148,51 @@ impl EventManagerWrapper {
// The sender handle is the primary sender handle for all components which want to create events. // The sender handle is the primary sender handle for all components which want to create events.
// The event manager will receive the RX handle to receive all the events. // The event manager will receive the RX handle to receive all the events.
let (event_sender, event_man_rx) = mpsc::channel(); let (event_sender, event_man_rx) = mpsc::channel();
let event_recv = MpscEventReceiver::<EventU32>::new(event_man_rx); let event_recv = MpscEventReceiver::new(event_man_rx);
Self { Self {
event_manager: EventManagerWithMpscQueue::new(Box::new(event_recv)), event_manager: EventManagerWithBoundedMpsc::new(event_recv),
event_sender, event_sender,
} }
} }
pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option<Params>)> { // Returns a cached event sender to send events to the event manager for routing.
pub fn clone_event_sender(&self) -> mpsc::Sender<EventMessageU32> {
self.event_sender.clone() self.event_sender.clone()
} }
pub fn event_manager(&mut self) -> &mut MpscEventManager { pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
&mut self.event_manager &mut self.event_manager
} }
pub fn try_event_routing(&mut self) { pub fn try_event_routing(&mut self) {
let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| {
self.routing_error_handler(event_msg, error)
};
// Perform the event routing. // Perform the event routing.
self.event_manager self.event_manager.try_event_handling(error_handler);
.try_event_handling() }
.expect("event handling failed");
pub fn routing_error_handler(&self, event_msg: &EventMessageU32, error: EventRoutingError) {
log::warn!("event routing error for event {event_msg:?}: {error:?}");
} }
} }
pub struct EventHandler { pub struct EventHandler<TmSender: EcssTmSenderCore> {
pub event_man_wrapper: EventManagerWrapper, pub event_man_wrapper: EventManagerWrapper,
pub pus_event_handler: PusEventHandler, pub pus_event_handler: PusEventHandler<TmSender>,
} }
impl EventHandler { impl<TmSender: EcssTmSenderCore> EventHandler<TmSender> {
pub fn new( pub fn new(
tm_sender: impl EcssTmSender, tm_sender: TmSender,
verif_handler: VerificationReporterWithSender,
event_request_rx: mpsc::Receiver<EventRequestWithToken>, event_request_rx: mpsc::Receiver<EventRequestWithToken>,
) -> Self { ) -> Self {
let mut event_man_wrapper = EventManagerWrapper::new(); let mut event_man_wrapper = EventManagerWrapper::new();
let pus_event_handler = PusEventHandler::new( let pus_event_handler = PusEventHandler::new(
verif_handler, tm_sender,
create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
event_man_wrapper.event_manager(), event_man_wrapper.event_manager(),
event_request_rx, event_request_rx,
tm_sender,
); );
Self { Self {
event_man_wrapper, event_man_wrapper,
@ -173,12 +200,12 @@ impl EventHandler {
} }
} }
pub fn clone_event_sender(&self) -> mpsc::Sender<(EventU32, Option<Params>)> { pub fn clone_event_sender(&self) -> mpsc::Sender<EventMessageU32> {
self.event_man_wrapper.clone_event_sender() self.event_man_wrapper.clone_event_sender()
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn event_manager(&mut self) -> &mut MpscEventManager { pub fn event_manager(&mut self) -> &mut EventManagerWithBoundedMpsc {
self.event_man_wrapper.event_manager() self.event_man_wrapper.event_manager()
} }

View File

@ -1,27 +1,25 @@
use derive_new::new; use derive_new::new;
use satrs::hk::UniqueId;
use satrs::request::UniqueApidTargetId;
use satrs::spacepackets::ByteConversionError; use satrs::spacepackets::ByteConversionError;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum AcsHkIds {
TestMgmSet = 1,
}
#[derive(Debug, new, Copy, Clone)] #[derive(Debug, new, Copy, Clone)]
pub struct HkUniqueId { pub struct HkUniqueId {
target_id: u32, target_id: UniqueApidTargetId,
set_id: u32, set_id: UniqueId,
} }
impl HkUniqueId { impl HkUniqueId {
#[allow(dead_code)] #[allow(dead_code)]
pub fn target_id(&self) -> u32 { pub fn target_id(&self) -> UniqueApidTargetId {
self.target_id self.target_id
} }
#[allow(dead_code)] #[allow(dead_code)]
pub fn set_id(&self) -> u32 { pub fn set_id(&self) -> UniqueId {
self.set_id self.set_id
} }
#[allow(dead_code)]
pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < 8 { if buf.len() < 8 {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
@ -29,7 +27,7 @@ impl HkUniqueId {
expected: 8, expected: 8,
}); });
} }
buf[0..4].copy_from_slice(&self.target_id.to_be_bytes()); buf[0..4].copy_from_slice(&self.target_id.unique_id.to_be_bytes());
buf[4..8].copy_from_slice(&self.set_id.to_be_bytes()); buf[4..8].copy_from_slice(&self.set_id.to_be_bytes());
Ok(8) Ok(8)

View File

@ -1 +1,39 @@
use satrs::spacepackets::time::{cds::CdsTime, TimeWriter};
pub mod config; pub mod config;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum DeviceMode {
Off = 0,
On = 1,
Normal = 2,
}
pub struct TimeStampHelper {
stamper: CdsTime,
time_stamp: [u8; 7],
}
impl TimeStampHelper {
pub fn stamp(&self) -> &[u8] {
&self.time_stamp
}
pub fn update_from_now(&mut self) {
self.stamper
.update_from_now()
.expect("Updating timestamp failed");
self.stamper
.write_to_bytes(&mut self.time_stamp)
.expect("Writing timestamp failed");
}
}
impl Default for TimeStampHelper {
fn default() -> Self {
Self {
stamper: CdsTime::now_with_u16_days().expect("creating time stamper failed"),
time_stamp: Default::default(),
}
}
}

View File

@ -17,50 +17,44 @@ use log::info;
use pus::test::create_test_service_dynamic; use pus::test::create_test_service_dynamic;
use satrs::hal::std::tcp_server::ServerConfig; use satrs::hal::std::tcp_server::ServerConfig;
use satrs::hal::std::udp_server::UdpTcServer; use satrs::hal::std::udp_server::UdpTcServer;
use satrs::request::TargetAndApidId; use satrs::request::GenericMessage;
use satrs::tmtc::tm_helper::SharedTmPool; use satrs::tmtc::tm_helper::SharedTmPool;
use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools}; use satrs_example::config::pool::{create_sched_tc_pool, create_static_pools};
use satrs_example::config::tasks::{ use satrs_example::config::tasks::{
FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, FREQ_MS_AOCS, FREQ_MS_EVENT_HANDLING, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC,
}; };
use satrs_example::config::{RequestTargetId, TmSenderId, OBSW_SERVER_ADDR, PUS_APID, SERVER_PORT}; use satrs_example::config::{OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT};
use tmtc::PusTcSourceProviderDynamic; use tmtc::PusTcSourceProviderDynamic;
use udp::DynamicUdpTmHandler; use udp::DynamicUdpTmHandler;
use crate::acs::AcsTask; use crate::acs::mgm::{MgmHandlerLis3Mdl, MpscModeLeafInterface, SpiDummyInterface};
use crate::ccsds::CcsdsReceiver; use crate::ccsds::CcsdsReceiver;
use crate::logger::setup_logger; use crate::logger::setup_logger;
use crate::pus::action::{create_action_service_dynamic, create_action_service_static}; use crate::pus::action::{create_action_service_dynamic, create_action_service_static};
use crate::pus::event::{create_event_service_dynamic, create_event_service_static}; use crate::pus::event::{create_event_service_dynamic, create_event_service_static};
use crate::pus::hk::{create_hk_service_dynamic, create_hk_service_static}; use crate::pus::hk::{create_hk_service_dynamic, create_hk_service_static};
use crate::pus::mode::{create_mode_service_dynamic, create_mode_service_static};
use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_service_static}; use crate::pus::scheduler::{create_scheduler_service_dynamic, create_scheduler_service_static};
use crate::pus::test::create_test_service_static; use crate::pus::test::create_test_service_static;
use crate::pus::{PusReceiver, PusTcMpscRouter}; use crate::pus::{PusReceiver, PusTcMpscRouter};
use crate::requests::{GenericRequestRouter, RequestWithToken}; use crate::requests::{CompositeRequest, GenericRequestRouter};
use crate::tcp::{SyncTcpTmSource, TcpTask}; use crate::tcp::{SyncTcpTmSource, TcpTask};
use crate::tmtc::{ use crate::tmtc::{
PusTcSourceProviderSharedPool, SharedTcPool, TcSourceTaskDynamic, TcSourceTaskStatic, PusTcSourceProviderSharedPool, SharedTcPool, TcSourceTaskDynamic, TcSourceTaskStatic,
}; };
use crate::udp::{StaticUdpTmHandler, UdpTmtcServer}; use crate::udp::{StaticUdpTmHandler, UdpTmtcServer};
use satrs::mode::ModeRequest;
use satrs::pus::event_man::EventRequestWithToken; use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::verification::{VerificationReporterCfg, VerificationReporterWithSender}; use satrs::pus::TmInSharedPoolSender;
use satrs::pus::{EcssTmSender, MpscTmAsVecSender, MpscTmInSharedPoolSender}; use satrs::spacepackets::{time::cds::CdsTime, time::TimeWriter};
use satrs::spacepackets::{time::cds::TimeProvider, time::TimeWriter};
use satrs::tmtc::CcsdsDistributor; use satrs::tmtc::CcsdsDistributor;
use satrs::ChannelId; use satrs_example::config::components::MGM_HANDLER_0;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::mpsc::{self, channel}; use std::sync::mpsc;
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::thread; use std::thread;
use std::time::Duration; use std::time::Duration;
fn create_verification_reporter(verif_sender: impl EcssTmSender) -> VerificationReporterWithSender {
let verif_cfg = VerificationReporterCfg::new(PUS_APID, 1, 2, 8).unwrap();
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
VerificationReporterWithSender::new(&verif_cfg, Box::new(verif_sender))
}
#[allow(dead_code)] #[allow(dead_code)]
fn static_tmtc_pool_main() { fn static_tmtc_pool_main() {
let (tm_pool, tc_pool) = create_static_pools(); let (tm_pool, tc_pool) = create_static_pools();
@ -68,24 +62,25 @@ fn static_tmtc_pool_main() {
let shared_tc_pool = SharedTcPool { let shared_tc_pool = SharedTcPool {
pool: Arc::new(RwLock::new(tc_pool)), pool: Arc::new(RwLock::new(tc_pool)),
}; };
let (tc_source_tx, tc_source_rx) = channel(); let (tc_source_tx, tc_source_rx) = mpsc::sync_channel(50);
let (tm_funnel_tx, tm_funnel_rx) = channel(); let (tm_funnel_tx, tm_funnel_rx) = mpsc::sync_channel(50);
let (tm_server_tx, tm_server_rx) = channel(); let (tm_server_tx, tm_server_rx) = mpsc::sync_channel(50);
// Every software component which needs to generate verification telemetry, receives a cloned let tm_funnel_tx_sender =
// verification reporter. TmInSharedPoolSender::new(shared_tm_pool.clone(), tm_funnel_tx.clone());
let verif_reporter = create_verification_reporter(MpscTmInSharedPoolSender::new(
TmSenderId::PusVerification as ChannelId, let (mgm_handler_composite_tx, mgm_handler_composite_rx) =
"verif_sender", mpsc::channel::<GenericMessage<CompositeRequest>>();
shared_tm_pool.clone(), let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::<GenericMessage<ModeRequest>>();
tm_funnel_tx.clone(),
));
let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32);
let (acs_thread_tx, acs_thread_rx) = channel::<RequestWithToken>();
// Some request are targetable. This map is used to retrieve sender handles based on a target ID. // Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let mut request_map = GenericRequestRouter::default(); let mut request_map = GenericRequestRouter::default();
request_map.0.insert(acs_target_id.into(), acs_thread_tx); request_map
.composite_router_map
.insert(MGM_HANDLER_0.id(), mgm_handler_composite_tx);
request_map
.mode_router_map
.insert(MGM_HANDLER_0.id(), mgm_handler_mode_tx);
// This helper structure is used by all telecommand providers which need to send telecommands // This helper structure is used by all telecommand providers which need to send telecommands
// to the TC source. // to the TC source.
@ -101,86 +96,84 @@ fn static_tmtc_pool_main() {
// The event task is the core handler to perform the event routing and TM handling as specified // The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation. // in the sat-rs documentation.
let mut event_handler = EventHandler::new( let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_request_rx);
MpscTmInSharedPoolSender::new(
TmSenderId::AllEvents as ChannelId, let (pus_test_tx, pus_test_rx) = mpsc::channel();
"ALL_EVENTS_TX", let (pus_event_tx, pus_event_rx) = mpsc::channel();
shared_tm_pool.clone(), let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
tm_funnel_tx.clone(), let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
), let (pus_action_tx, pus_action_rx) = mpsc::channel();
verif_reporter.clone(), let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
event_request_rx,
); let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
let (pus_test_tx, pus_test_rx) = channel();
let (pus_event_tx, pus_event_rx) = channel();
let (pus_sched_tx, pus_sched_rx) = channel();
let (pus_hk_tx, pus_hk_rx) = channel();
let (pus_action_tx, pus_action_rx) = channel();
let pus_router = PusTcMpscRouter { let pus_router = PusTcMpscRouter {
test_service_receiver: pus_test_tx, test_tc_sender: pus_test_tx,
event_service_receiver: pus_event_tx, event_tc_sender: pus_event_tx,
sched_service_receiver: pus_sched_tx, sched_tc_sender: pus_sched_tx,
hk_service_receiver: pus_hk_tx, hk_tc_sender: pus_hk_tx,
action_service_receiver: pus_action_tx, action_tc_sender: pus_action_tx,
mode_tc_sender: pus_mode_tx,
}; };
let pus_test_service = create_test_service_static( let pus_test_service = create_test_service_static(
shared_tm_pool.clone(), tm_funnel_tx_sender.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.pool.clone(),
event_handler.clone_event_sender(), event_handler.clone_event_sender(),
pus_test_rx, pus_test_rx,
); );
let pus_scheduler_service = create_scheduler_service_static( let pus_scheduler_service = create_scheduler_service_static(
shared_tm_pool.clone(), tm_funnel_tx_sender.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
tc_source.clone(), tc_source.clone(),
pus_sched_rx, pus_sched_rx,
create_sched_tc_pool(), create_sched_tc_pool(),
); );
let pus_event_service = create_event_service_static( let pus_event_service = create_event_service_static(
shared_tm_pool.clone(), tm_funnel_tx_sender.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.pool.clone(),
pus_event_rx, pus_event_rx,
event_request_tx, event_request_tx,
); );
let pus_action_service = create_action_service_static( let pus_action_service = create_action_service_static(
shared_tm_pool.clone(), tm_funnel_tx_sender.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.pool.clone(),
pus_action_rx, pus_action_rx,
request_map.clone(), request_map.clone(),
pus_action_reply_rx,
); );
let pus_hk_service = create_hk_service_static( let pus_hk_service = create_hk_service_static(
shared_tm_pool.clone(), tm_funnel_tx_sender.clone(),
tm_funnel_tx.clone(),
verif_reporter.clone(),
shared_tc_pool.pool.clone(), shared_tc_pool.pool.clone(),
pus_hk_rx, pus_hk_rx,
request_map.clone(),
pus_hk_reply_rx,
);
let pus_mode_service = create_mode_service_static(
tm_funnel_tx_sender.clone(),
shared_tc_pool.pool.clone(),
pus_mode_rx,
request_map, request_map,
pus_mode_reply_rx,
); );
let mut pus_stack = PusStack::new( let mut pus_stack = PusStack::new(
pus_test_service,
pus_hk_service, pus_hk_service,
pus_event_service, pus_event_service,
pus_action_service, pus_action_service,
pus_scheduler_service, pus_scheduler_service,
pus_test_service, pus_mode_service,
); );
let ccsds_receiver = CcsdsReceiver { tc_source }; let ccsds_receiver = CcsdsReceiver { tc_source };
let mut tmtc_task = TcSourceTaskStatic::new( let mut tmtc_task = TcSourceTaskStatic::new(
shared_tc_pool.clone(), shared_tc_pool.clone(),
tc_source_rx, tc_source_rx,
PusReceiver::new(verif_reporter.clone(), pus_router), PusReceiver::new(tm_funnel_tx_sender, pus_router),
); );
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver.clone())); let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone());
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor)) let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
.expect("creating UDP TMTC server failed"); .expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer { let mut udp_tmtc_server = UdpTmtcServer {
@ -191,27 +184,17 @@ fn static_tmtc_pool_main() {
}, },
}; };
let tcp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver)); let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver);
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192); let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
let sync_tm_tcp_source = SyncTcpTmSource::new(200); let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new( let mut tcp_server = TcpTask::new(
tcp_server_cfg, tcp_server_cfg,
sync_tm_tcp_source.clone(), sync_tm_tcp_source.clone(),
tcp_ccsds_distributor, tcp_ccsds_distributor,
PACKET_ID_VALIDATOR.clone(),
) )
.expect("tcp server creation failed"); .expect("tcp server creation failed");
let mut acs_task = AcsTask::new(
MpscTmInSharedPoolSender::new(
TmSenderId::AcsSubsystem as ChannelId,
"ACS_TASK_SENDER",
shared_tm_pool.clone(),
tm_funnel_tx.clone(),
),
acs_thread_rx,
verif_reporter,
);
let mut tm_funnel = TmFunnelStatic::new( let mut tm_funnel = TmFunnelStatic::new(
shared_tm_pool, shared_tm_pool,
sync_tm_tcp_source, sync_tm_tcp_source,
@ -219,6 +202,27 @@ fn static_tmtc_pool_main() {
tm_server_tx, tm_server_tx,
); );
let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) =
mpsc::channel();
let dummy_spi_interface = SpiDummyInterface::default();
let shared_mgm_set = Arc::default();
let mode_leaf_interface = MpscModeLeafInterface {
request_rx: mgm_handler_mode_rx,
reply_tx_to_pus: pus_mode_reply_tx,
reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx,
};
let mut mgm_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_0,
"MGM_0",
mode_leaf_interface,
mgm_handler_composite_rx,
pus_hk_reply_tx,
tm_funnel_tx,
dummy_spi_interface,
shared_mgm_set,
);
info!("Starting TMTC and UDP task"); info!("Starting TMTC and UDP task");
let jh_udp_tmtc = thread::Builder::new() let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string()) .name("TMTC and UDP".to_string())
@ -264,7 +268,7 @@ fn static_tmtc_pool_main() {
let jh_aocs = thread::Builder::new() let jh_aocs = thread::Builder::new()
.name("AOCS".to_string()) .name("AOCS".to_string())
.spawn(move || loop { .spawn(move || loop {
acs_task.periodic_operation(); mgm_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_AOCS)); thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
}) })
.unwrap(); .unwrap();
@ -298,22 +302,23 @@ fn static_tmtc_pool_main() {
#[allow(dead_code)] #[allow(dead_code)]
fn dyn_tmtc_pool_main() { fn dyn_tmtc_pool_main() {
let (tc_source_tx, tc_source_rx) = channel(); let (tc_source_tx, tc_source_rx) = mpsc::channel();
let (tm_funnel_tx, tm_funnel_rx) = channel(); let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (tm_server_tx, tm_server_rx) = channel(); let (tm_server_tx, tm_server_rx) = mpsc::channel();
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter. // Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let verif_reporter = create_verification_reporter(MpscTmAsVecSender::new( let (mgm_handler_composite_tx, mgm_handler_composite_rx) =
TmSenderId::PusVerification as ChannelId, mpsc::channel::<GenericMessage<CompositeRequest>>();
"verif_sender", let (mgm_handler_mode_tx, mgm_handler_mode_rx) = mpsc::channel::<GenericMessage<ModeRequest>>();
tm_funnel_tx.clone(),
));
let acs_target_id = TargetAndApidId::new(PUS_APID, RequestTargetId::AcsSubsystem as u32);
let (acs_thread_tx, acs_thread_rx) = channel::<RequestWithToken>();
// Some request are targetable. This map is used to retrieve sender handles based on a target ID. // Some request are targetable. This map is used to retrieve sender handles based on a target ID.
let mut request_map = GenericRequestRouter::default(); let mut request_map = GenericRequestRouter::default();
request_map.0.insert(acs_target_id.into(), acs_thread_tx); request_map
.composite_router_map
.insert(MGM_HANDLER_0.raw(), mgm_handler_composite_tx);
request_map
.mode_router_map
.insert(MGM_HANDLER_0.raw(), mgm_handler_mode_tx);
let tc_source = PusTcSourceProviderDynamic(tc_source_tx); let tc_source = PusTcSourceProviderDynamic(tc_source_tx);
@ -323,78 +328,78 @@ fn dyn_tmtc_pool_main() {
let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>(); let (event_request_tx, event_request_rx) = mpsc::channel::<EventRequestWithToken>();
// The event task is the core handler to perform the event routing and TM handling as specified // The event task is the core handler to perform the event routing and TM handling as specified
// in the sat-rs documentation. // in the sat-rs documentation.
let mut event_handler = EventHandler::new( let mut event_handler = EventHandler::new(tm_funnel_tx.clone(), event_request_rx);
MpscTmAsVecSender::new(
TmSenderId::AllEvents as ChannelId, let (pus_test_tx, pus_test_rx) = mpsc::channel();
"ALL_EVENTS_TX", let (pus_event_tx, pus_event_rx) = mpsc::channel();
tm_funnel_tx.clone(), let (pus_sched_tx, pus_sched_rx) = mpsc::channel();
), let (pus_hk_tx, pus_hk_rx) = mpsc::channel();
verif_reporter.clone(), let (pus_action_tx, pus_action_rx) = mpsc::channel();
event_request_rx, let (pus_mode_tx, pus_mode_rx) = mpsc::channel();
);
let (_pus_action_reply_tx, pus_action_reply_rx) = mpsc::channel();
let (pus_hk_reply_tx, pus_hk_reply_rx) = mpsc::channel();
let (pus_mode_reply_tx, pus_mode_reply_rx) = mpsc::channel();
let (pus_test_tx, pus_test_rx) = channel();
let (pus_event_tx, pus_event_rx) = channel();
let (pus_sched_tx, pus_sched_rx) = channel();
let (pus_hk_tx, pus_hk_rx) = channel();
let (pus_action_tx, pus_action_rx) = channel();
let pus_router = PusTcMpscRouter { let pus_router = PusTcMpscRouter {
test_service_receiver: pus_test_tx, test_tc_sender: pus_test_tx,
event_service_receiver: pus_event_tx, event_tc_sender: pus_event_tx,
sched_service_receiver: pus_sched_tx, sched_tc_sender: pus_sched_tx,
hk_service_receiver: pus_hk_tx, hk_tc_sender: pus_hk_tx,
action_service_receiver: pus_action_tx, action_tc_sender: pus_action_tx,
mode_tc_sender: pus_mode_tx,
}; };
let pus_test_service = create_test_service_dynamic( let pus_test_service = create_test_service_dynamic(
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
verif_reporter.clone(),
event_handler.clone_event_sender(), event_handler.clone_event_sender(),
pus_test_rx, pus_test_rx,
); );
let pus_scheduler_service = create_scheduler_service_dynamic( let pus_scheduler_service = create_scheduler_service_dynamic(
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
verif_reporter.clone(),
tc_source.0.clone(), tc_source.0.clone(),
pus_sched_rx, pus_sched_rx,
create_sched_tc_pool(), create_sched_tc_pool(),
); );
let pus_event_service = create_event_service_dynamic( let pus_event_service =
tm_funnel_tx.clone(), create_event_service_dynamic(tm_funnel_tx.clone(), pus_event_rx, event_request_tx);
verif_reporter.clone(),
pus_event_rx,
event_request_tx,
);
let pus_action_service = create_action_service_dynamic( let pus_action_service = create_action_service_dynamic(
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
verif_reporter.clone(),
pus_action_rx, pus_action_rx,
request_map.clone(), request_map.clone(),
pus_action_reply_rx,
); );
let pus_hk_service = create_hk_service_dynamic( let pus_hk_service = create_hk_service_dynamic(
tm_funnel_tx.clone(), tm_funnel_tx.clone(),
verif_reporter.clone(),
pus_hk_rx, pus_hk_rx,
request_map.clone(),
pus_hk_reply_rx,
);
let pus_mode_service = create_mode_service_dynamic(
tm_funnel_tx.clone(),
pus_mode_rx,
request_map, request_map,
pus_mode_reply_rx,
); );
let mut pus_stack = PusStack::new( let mut pus_stack = PusStack::new(
pus_test_service,
pus_hk_service, pus_hk_service,
pus_event_service, pus_event_service,
pus_action_service, pus_action_service,
pus_scheduler_service, pus_scheduler_service,
pus_test_service, pus_mode_service,
); );
let ccsds_receiver = CcsdsReceiver { tc_source }; let ccsds_receiver = CcsdsReceiver { tc_source };
let mut tmtc_task = TcSourceTaskDynamic::new( let mut tmtc_task = TcSourceTaskDynamic::new(
tc_source_rx, tc_source_rx,
PusReceiver::new(verif_reporter.clone(), pus_router), PusReceiver::new(tm_funnel_tx.clone(), pus_router),
); );
let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT); let sock_addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let udp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver.clone())); let udp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver.clone());
let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor)) let udp_tc_server = UdpTcServer::new(sock_addr, 2048, Box::new(udp_ccsds_distributor))
.expect("creating UDP TMTC server failed"); .expect("creating UDP TMTC server failed");
let mut udp_tmtc_server = UdpTmtcServer { let mut udp_tmtc_server = UdpTmtcServer {
@ -404,27 +409,39 @@ fn dyn_tmtc_pool_main() {
}, },
}; };
let tcp_ccsds_distributor = CcsdsDistributor::new(Box::new(ccsds_receiver)); let tcp_ccsds_distributor = CcsdsDistributor::new(ccsds_receiver);
let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192); let tcp_server_cfg = ServerConfig::new(sock_addr, Duration::from_millis(400), 4096, 8192);
let sync_tm_tcp_source = SyncTcpTmSource::new(200); let sync_tm_tcp_source = SyncTcpTmSource::new(200);
let mut tcp_server = TcpTask::new( let mut tcp_server = TcpTask::new(
tcp_server_cfg, tcp_server_cfg,
sync_tm_tcp_source.clone(), sync_tm_tcp_source.clone(),
tcp_ccsds_distributor, tcp_ccsds_distributor,
PACKET_ID_VALIDATOR.clone(),
) )
.expect("tcp server creation failed"); .expect("tcp server creation failed");
let mut acs_task = AcsTask::new(
MpscTmAsVecSender::new(
TmSenderId::AcsSubsystem as ChannelId,
"ACS_TASK_SENDER",
tm_funnel_tx.clone(),
),
acs_thread_rx,
verif_reporter,
);
let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx); let mut tm_funnel = TmFunnelDynamic::new(sync_tm_tcp_source, tm_funnel_rx, tm_server_tx);
let (mgm_handler_mode_reply_to_parent_tx, _mgm_handler_mode_reply_to_parent_rx) =
mpsc::channel();
let dummy_spi_interface = SpiDummyInterface::default();
let shared_mgm_set = Arc::default();
let mode_leaf_interface = MpscModeLeafInterface {
request_rx: mgm_handler_mode_rx,
reply_tx_to_pus: pus_mode_reply_tx,
reply_tx_to_parent: mgm_handler_mode_reply_to_parent_tx,
};
let mut mgm_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_0,
"MGM_0",
mode_leaf_interface,
mgm_handler_composite_rx,
pus_hk_reply_tx,
tm_funnel_tx,
dummy_spi_interface,
shared_mgm_set,
);
info!("Starting TMTC and UDP task"); info!("Starting TMTC and UDP task");
let jh_udp_tmtc = thread::Builder::new() let jh_udp_tmtc = thread::Builder::new()
.name("TMTC and UDP".to_string()) .name("TMTC and UDP".to_string())
@ -470,7 +487,7 @@ fn dyn_tmtc_pool_main() {
let jh_aocs = thread::Builder::new() let jh_aocs = thread::Builder::new()
.name("AOCS".to_string()) .name("AOCS".to_string())
.spawn(move || loop { .spawn(move || loop {
acs_task.periodic_operation(); mgm_handler.periodic_operation();
thread::sleep(Duration::from_millis(FREQ_MS_AOCS)); thread::sleep(Duration::from_millis(FREQ_MS_AOCS));
}) })
.unwrap(); .unwrap();
@ -511,7 +528,7 @@ fn main() {
dyn_tmtc_pool_main(); dyn_tmtc_pool_main();
} }
pub fn update_time(time_provider: &mut TimeProvider, timestamp: &mut [u8]) { pub fn update_time(time_provider: &mut CdsTime, timestamp: &mut [u8]) {
time_provider time_provider
.update_from_now() .update_from_now()
.expect("Could not get current time"); .expect("Could not get current time");

View File

@ -1,155 +1,274 @@
use log::{error, warn}; use log::{error, warn};
use satrs::action::ActionRequest; use satrs::action::{ActionRequest, ActionRequestVariant};
use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; use satrs::params::WritableToBeBytes;
use satrs::pus::action::{PusActionToRequestConverter, PusService8ActionHandler}; use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::action::{
ActionReplyVariant, ActivePusActionRequestStd, DefaultActiveActionRequestMap, PusActionReply,
};
use satrs::pus::verification::{ use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReporterWithSender, VerificationReportingProvider, FailParams, FailParamsWithStep, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationToken, VerificationReportingProvider, VerificationToken,
}; };
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult, EcssTcInVecConverter, EcssTmSenderCore, EcssTmtcError, GenericConversionError, MpscTcReceiver,
PusPacketHandlingError, PusServiceHelper, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler,
PusServiceHelper, PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::request::TargetAndApidId; use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket; use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket};
use satrs::tmtc::tm_helper::SharedTmPool; use satrs_example::config::components::PUS_ACTION_SERVICE;
use satrs::{ChannelId, TargetId}; use satrs_example::config::tmtc_err;
use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID}; use std::sync::mpsc;
use std::sync::mpsc::{self}; use std::time::Duration;
use crate::requests::GenericRequestRouter; use crate::requests::GenericRequestRouter;
use super::GenericRoutingErrorHandler; use super::{
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
PusTargetedRequestService, TargetedPusService,
};
pub struct ActionReplyHandler {
fail_data_buf: [u8; 128],
}
impl Default for ActionReplyHandler {
fn default() -> Self {
Self {
fail_data_buf: [0; 128],
}
}
}
impl PusReplyHandler<ActivePusActionRequestStd, PusActionReply> for ActionReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<PusActionReply>,
_tm_sender: &impl EcssTmSenderCore,
) -> Result<(), Self::Error> {
warn!("received unexpected reply for service 8: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<PusActionReply>,
active_request: &ActivePusActionRequestStd,
tm_sender: &(impl EcssTmSenderCore + ?Sized),
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let verif_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
let remove_entry = match &reply.message.variant {
ActionReplyVariant::CompletionFailed { error_code, params } => {
let mut fail_data_len = 0;
if let Some(params) = params {
fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?;
}
verification_handler.completion_failure(
tm_sender,
verif_token,
FailParams::new(time_stamp, error_code, &self.fail_data_buf[..fail_data_len]),
)?;
true
}
ActionReplyVariant::StepFailed {
error_code,
step,
params,
} => {
let mut fail_data_len = 0;
if let Some(params) = params {
fail_data_len = params.write_to_be_bytes(&mut self.fail_data_buf)?;
}
verification_handler.step_failure(
tm_sender,
verif_token,
FailParamsWithStep::new(
time_stamp,
&EcssEnumU16::new(*step),
error_code,
&self.fail_data_buf[..fail_data_len],
),
)?;
true
}
ActionReplyVariant::Completed => {
verification_handler.completion_success(tm_sender, verif_token, time_stamp)?;
true
}
ActionReplyVariant::StepSuccess { step } => {
verification_handler.step_success(
tm_sender,
&verif_token,
time_stamp,
EcssEnumU16::new(*step),
)?;
false
}
_ => false,
};
Ok(remove_entry)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusActionRequestStd,
tm_sender: &impl EcssTmSenderCore,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"action",
)
}
}
#[derive(Default)] #[derive(Default)]
pub struct ExampleActionRequestConverter {} pub struct ActionRequestConverter {}
impl PusActionToRequestConverter for ExampleActionRequestConverter { impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for ActionRequestConverter {
type Error = PusPacketHandlingError; type Error = GenericConversionError;
fn convert( fn convert(
&mut self, &mut self,
token: VerificationToken<TcStateAccepted>, token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader, tc: &PusTcReader,
time_stamp: &[u8], tm_sender: &(impl EcssTmSenderCore + ?Sized),
verif_reporter: &impl VerificationReportingProvider, verif_reporter: &impl VerificationReportingProvider,
) -> Result<(TargetId, ActionRequest), Self::Error> { time_stamp: &[u8],
) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> {
let subservice = tc.subservice(); let subservice = tc.subservice();
let user_data = tc.user_data(); let user_data = tc.user_data();
if user_data.len() < 8 { if user_data.len() < 8 {
verif_reporter verif_reporter
.start_failure( .start_failure(
tm_sender,
token, token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA), FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
) )
.expect("Sending start failure failed"); .expect("Sending start failure failed");
return Err(PusPacketHandlingError::NotEnoughAppData { return Err(GenericConversionError::NotEnoughAppData {
expected: 8, expected: 8,
found: user_data.len(), found: user_data.len(),
}); });
} }
let target_id = TargetAndApidId::from_pus_tc(tc).unwrap(); let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap()); let action_id = u32::from_be_bytes(user_data[4..8].try_into().unwrap());
if subservice == 128 { if subservice == 128 {
let req_variant = if user_data.len() == 8 {
ActionRequestVariant::NoData
} else {
ActionRequestVariant::VecData(user_data[8..].to_vec())
};
Ok(( Ok((
target_id.raw(), ActivePusActionRequestStd::new(
ActionRequest::UnsignedIdAndVecData {
action_id, action_id,
data: user_data[8..].to_vec(), target_id_and_apid.into(),
}, token.into(),
Duration::from_secs(30),
),
ActionRequest::new(action_id, req_variant),
)) ))
} else { } else {
verif_reporter verif_reporter
.start_failure( .start_failure(
tm_sender,
token, token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE), FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
) )
.expect("Sending start failure failed"); .expect("Sending start failure failed");
Err(PusPacketHandlingError::InvalidSubservice(subservice)) Err(GenericConversionError::InvalidSubservice(subservice))
} }
} }
} }
pub fn create_action_service_static( pub fn create_action_service_static(
shared_tm_store: SharedTmPool, tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter, action_router: GenericRequestRouter,
) -> Pus8Wrapper<EcssTcInSharedStoreConverter> { reply_receiver: mpsc::Receiver<GenericMessage<PusActionReply>>,
let action_srv_tm_sender = MpscTmInSharedPoolSender::new( ) -> ActionServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> {
TmSenderId::PusAction as ChannelId, let action_request_handler = PusTargetedRequestService::new(
"PUS_8_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let action_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusAction as ChannelId,
"PUS_8_TC_RECV",
pus_action_rx,
);
let pus_8_handler = PusService8ActionHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
Box::new(action_srv_receiver), PUS_ACTION_SERVICE.id(),
Box::new(action_srv_tm_sender), pus_action_rx,
PUS_APID, tm_sender,
verif_reporter.clone(), create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048), EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
), ),
ExampleActionRequestConverter::default(), ActionRequestConverter::default(),
// TODO: Implementation which does not use run-time allocation? Maybe something like
// a bounded wrapper which pre-allocates using [HashMap::with_capacity]..
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
action_router, action_router,
GenericRoutingErrorHandler::<8>::default(), reply_receiver,
); );
Pus8Wrapper { pus_8_handler } ActionServiceWrapper {
service: action_request_handler,
}
} }
pub fn create_action_service_dynamic( pub fn create_action_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>, tm_funnel_tx: mpsc::Sender<PusTmAsVec>,
verif_reporter: VerificationReporterWithSender,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>, pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter, action_router: GenericRequestRouter,
) -> Pus8Wrapper<EcssTcInVecConverter> { reply_receiver: mpsc::Receiver<GenericMessage<PusActionReply>>,
let action_srv_tm_sender = MpscTmAsVecSender::new( ) -> ActionServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
TmSenderId::PusAction as ChannelId, let action_request_handler = PusTargetedRequestService::new(
"PUS_8_TM_SENDER",
tm_funnel_tx.clone(),
);
let action_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusAction as ChannelId,
"PUS_8_TC_RECV",
pus_action_rx,
);
let pus_8_handler = PusService8ActionHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
Box::new(action_srv_receiver), PUS_ACTION_SERVICE.id(),
Box::new(action_srv_tm_sender), pus_action_rx,
PUS_APID, tm_funnel_tx,
verif_reporter.clone(), create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
ExampleActionRequestConverter::default(), ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
action_router, action_router,
GenericRoutingErrorHandler::<8>::default(), reply_receiver,
); );
Pus8Wrapper { pus_8_handler } ActionServiceWrapper {
service: action_request_handler,
}
} }
pub struct Pus8Wrapper<TcInMemConverter: EcssTcInMemConverter> { pub struct ActionServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
pub(crate) pus_8_handler: PusService8ActionHandler< {
pub(crate) service: PusTargetedRequestService<
MpscTcReceiver,
TmSender,
TcInMemConverter, TcInMemConverter,
VerificationReporterWithSender, VerificationReporter,
ExampleActionRequestConverter, ActionRequestConverter,
GenericRequestRouter, ActionReplyHandler,
GenericRoutingErrorHandler<8>, DefaultActiveActionRequestMap,
ActivePusActionRequestStd,
ActionRequest,
PusActionReply,
>, >,
} }
impl<TcInMemConverter: EcssTcInMemConverter> Pus8Wrapper<TcInMemConverter> { impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
pub fn handle_next_packet(&mut self) -> bool { for ActionServiceWrapper<TmSender, TcInMemConverter>
match self.pus_8_handler.handle_one_tc() { {
/// Returns [true] if the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result { Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
@ -171,4 +290,463 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus8Wrapper<TcInMemConverter> {
} }
false false
} }
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS 8: Handling reply failed with error {e:?}");
HandlingStatus::Empty
})
}
fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{
TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::pus::verification;
use satrs::pus::verification::test_util::TestVerificationReporter;
use satrs::request::MessageMetadata;
use satrs::ComponentId;
use satrs::{
res_code::ResultU16,
spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
WritablePusPacket,
},
SpHeader,
},
};
use crate::{
pus::tests::{PusConverterTestbench, ReplyHandlerTestbench, TargetedPusRequestTestbench},
requests::CompositeRequest,
};
use super::*;
impl
TargetedPusRequestTestbench<
ActionRequestConverter,
ActionReplyHandler,
DefaultActiveActionRequestMap,
ActivePusActionRequestStd,
ActionRequest,
PusActionReply,
>
{
pub fn new_for_action(owner_id: ComponentId, target_id: ComponentId) -> Self {
let _ = env_logger::builder().is_test(true).try_init();
let (tm_funnel_tx, tm_funnel_rx) = mpsc::channel();
let (pus_action_tx, pus_action_rx) = mpsc::channel();
let (action_reply_tx, action_reply_rx) = mpsc::channel();
let (action_req_tx, action_req_rx) = mpsc::channel();
let verif_reporter = TestVerificationReporter::new(owner_id);
let mut generic_req_router = GenericRequestRouter::default();
generic_req_router
.composite_router_map
.insert(target_id, action_req_tx);
Self {
service: PusTargetedRequestService::new(
PusServiceHelper::new(
owner_id,
pus_action_rx,
tm_funnel_tx.clone(),
verif_reporter,
EcssTcInVecConverter::default(),
),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
ActionReplyHandler::default(),
generic_req_router,
action_reply_rx,
),
request_id: None,
pus_packet_tx: pus_action_tx,
tm_funnel_rx,
reply_tx: action_reply_tx,
request_rx: action_req_rx,
}
}
pub fn verify_packet_started(&self) {
self.service
.service_helper
.common
.verif_reporter
.check_next_is_started_success(
self.service.service_helper.id(),
self.request_id.expect("request ID not set").into(),
);
}
pub fn verify_packet_completed(&self) {
self.service
.service_helper
.common
.verif_reporter
.check_next_is_completion_success(
self.service.service_helper.id(),
self.request_id.expect("request ID not set").into(),
);
}
pub fn verify_tm_empty(&self) {
let packet = self.tm_funnel_rx.try_recv();
if let Err(mpsc::TryRecvError::Empty) = packet {
} else {
let tm = packet.unwrap();
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0;
panic!("unexpected TM packet {unexpected_tm:?}");
}
}
pub fn verify_next_tc_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
panic!("unexpected error {:?}", e);
}
let result = result.unwrap();
match result {
PusPacketHandlerResult::RequestHandled => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_all_tcs_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_handle_next_tc(time_stamp);
if let Err(e) = result {
panic!("unexpected error {:?}", e);
}
let result = result.unwrap();
match result {
PusPacketHandlerResult::Empty => (),
_ => panic!("unexpected result {result:?}"),
}
}
pub fn verify_next_reply_is_handled_properly(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_check_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::HandledOne);
}
pub fn verify_all_replies_handled(&mut self, time_stamp: &[u8]) {
let result = self.service.poll_and_check_next_reply(time_stamp);
assert!(result.is_ok());
assert_eq!(result.unwrap(), HandlingStatus::Empty);
}
pub fn add_tc(&mut self, tc: &PusTcCreator) {
self.request_id = Some(verification::RequestId::new(tc).into());
let token = self.service.service_helper.verif_reporter_mut().add_tc(tc);
let accepted_token = self
.service
.service_helper
.verif_reporter()
.acceptance_success(self.service.service_helper.tm_sender(), token, &[0; 7])
.expect("TC acceptance failed");
self.service
.service_helper
.verif_reporter()
.check_next_was_added(accepted_token.request_id());
let id = self.service.service_helper.id();
self.service
.service_helper
.verif_reporter()
.check_next_is_acceptance_success(id, accepted_token.request_id());
self.pus_packet_tx
.send(EcssTcAndToken::new(tc.to_vec().unwrap(), accepted_token))
.unwrap();
}
}
#[test]
fn basic_request() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sp_header = SpHeader::new_from_apid(TEST_APID);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
testbench.verify_next_tc_is_handled_properly(&time_stamp);
testbench.verify_all_tcs_handled(&time_stamp);
testbench.verify_packet_started();
let possible_req = testbench.request_rx.try_recv();
assert!(possible_req.is_ok());
let req = possible_req.unwrap();
if let CompositeRequest::Action(action_req) = req.message {
assert_eq!(action_req.action_id, action_id);
assert_eq!(action_req.variant, ActionRequestVariant::NoData);
let action_reply = PusActionReply::new(action_id, ActionReplyVariant::Completed);
testbench
.reply_tx
.send(GenericMessage::new(req.requestor_info, action_reply))
.unwrap();
} else {
panic!("unexpected request type");
}
testbench.verify_next_reply_is_handled_properly(&time_stamp);
testbench.verify_all_replies_handled(&time_stamp);
testbench.verify_packet_completed();
testbench.verify_tm_empty();
}
#[test]
fn basic_request_routing_error() {
let mut testbench = TargetedPusRequestTestbench::new_for_action(
TEST_COMPONENT_ID_0.id(),
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&0_u32.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
let result = testbench.service.poll_and_handle_next_tc(&time_stamp);
assert!(result.is_err());
// Verify the correct result and completion failure.
}
#[test]
fn converter_action_req_no_data() {
let mut testbench = PusConverterTestbench::new(
TEST_COMPONENT_ID_0.raw(),
ActionRequestConverter::default(),
);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok());
let (active_req, request) = result.unwrap();
if let ActionRequestVariant::NoData = request.variant {
assert_eq!(request.action_id, action_id);
assert_eq!(active_req.action_id, action_id);
assert_eq!(
active_req.target_id(),
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0).raw()
);
assert_eq!(
active_req.token().request_id(),
testbench.request_id().unwrap()
);
} else {
panic!("unexpected action request variant");
}
}
#[test]
fn converter_action_req_with_data() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ActionRequestConverter::default());
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let action_id = 5_u32;
let mut app_data: [u8; 16] = [0; 16];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
for i in 0..8 {
app_data[i + 8] = i as u8;
}
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
assert!(result.is_ok());
let (active_req, request) = result.unwrap();
if let ActionRequestVariant::VecData(vec) = request.variant {
assert_eq!(request.action_id, action_id);
assert_eq!(active_req.action_id, action_id);
assert_eq!(vec, app_data[8..].to_vec());
} else {
panic!("unexpected action request variant");
}
}
#[test]
fn reply_handling_completion_success() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = PusActionReply::new(action_id, ActionReplyVariant::Completed);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.assert_full_completion_success(
TEST_COMPONENT_ID_0.id(),
req_id,
None,
);
}
#[test]
fn reply_handling_completion_failure() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3);
let reply = PusActionReply::new(
action_id,
ActionReplyVariant::CompletionFailed {
error_code,
params: None,
},
);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.into(),
req_id,
None,
error_code.raw() as u64,
);
}
#[test]
fn reply_handling_step_success() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let reply = PusActionReply::new(action_id, ActionReplyVariant::StepSuccess { step: 1 });
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
// Entry should not be removed, completion not done yet.
assert!(!result.unwrap());
testbench.verif_reporter.check_next_was_added(req_id);
testbench
.verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench
.verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID_0.raw(), req_id);
testbench
.verif_reporter
.check_next_is_step_success(TEST_COMPONENT_ID_0.raw(), req_id, 1);
}
#[test]
fn reply_handling_step_failure() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_req) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let active_action_req =
ActivePusActionRequestStd::new_from_common_req(action_id, active_req);
let error_code = ResultU16::new(2, 3);
let reply = PusActionReply::new(
action_id,
ActionReplyVariant::StepFailed {
error_code,
step: 1,
params: None,
},
);
let generic_reply = GenericMessage::new(MessageMetadata::new(req_id.into(), 0), reply);
let result = testbench.handle_reply(&generic_reply, &active_action_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
testbench.verif_reporter.check_next_was_added(req_id);
testbench
.verif_reporter
.check_next_is_acceptance_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench
.verif_reporter
.check_next_is_started_success(TEST_COMPONENT_ID_0.id(), req_id);
testbench.verif_reporter.check_next_is_step_failure(
TEST_COMPONENT_ID_0.id(),
req_id,
error_code.raw().into(),
);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_reply = PusActionReply::new(5_u32, ActionReplyVariant::Completed);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_id = 5_u32;
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(
&ActivePusActionRequestStd::new_from_common_req(action_id, active_request),
&[],
);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
} }

View File

@ -1,87 +1,69 @@
use std::sync::mpsc; use std::sync::mpsc;
use crate::pus::create_verification_reporter;
use log::{error, warn}; use log::{error, warn};
use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::event_man::EventRequestWithToken; use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusService5EventHandler; use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporterWithSender; use satrs::pus::verification::VerificationReporter;
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult, EcssTmSenderCore, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded,
PusServiceHelper, PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::tmtc::tm_helper::SharedTmPool; use satrs_example::config::components::PUS_EVENT_MANAGEMENT;
use satrs::ChannelId;
use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID};
pub fn create_event_service_static( pub fn create_event_service_static(
shared_tm_store: SharedTmPool, tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>, pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>, event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> Pus5Wrapper<EcssTcInSharedStoreConverter> { ) -> EventServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> {
let event_srv_tm_sender = MpscTmInSharedPoolSender::new( let pus_5_handler = PusEventServiceHandler::new(
TmSenderId::PusEvent as ChannelId,
"PUS_5_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let event_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusEvent as ChannelId,
"PUS_5_TC_RECV",
pus_event_rx,
);
let pus_5_handler = PusService5EventHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
Box::new(event_srv_receiver), PUS_EVENT_MANAGEMENT.id(),
Box::new(event_srv_tm_sender), pus_event_rx,
PUS_APID, tm_sender,
verif_reporter.clone(), create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048), EcssTcInSharedStoreConverter::new(tc_pool.clone(), 2048),
), ),
event_request_tx, event_request_tx,
); );
Pus5Wrapper { pus_5_handler } EventServiceWrapper {
handler: pus_5_handler,
}
} }
pub fn create_event_service_dynamic( pub fn create_event_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>, tm_funnel_tx: mpsc::Sender<PusTmAsVec>,
verif_reporter: VerificationReporterWithSender,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>, pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>, event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> Pus5Wrapper<EcssTcInVecConverter> { ) -> EventServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let event_srv_tm_sender = MpscTmAsVecSender::new( let pus_5_handler = PusEventServiceHandler::new(
TmSenderId::PusEvent as ChannelId,
"PUS_5_TM_SENDER",
tm_funnel_tx,
);
let event_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusEvent as ChannelId,
"PUS_5_TC_RECV",
pus_event_rx,
);
let pus_5_handler = PusService5EventHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
Box::new(event_srv_receiver), PUS_EVENT_MANAGEMENT.id(),
Box::new(event_srv_tm_sender), pus_event_rx,
PUS_APID, tm_funnel_tx,
verif_reporter.clone(), create_verification_reporter(PUS_EVENT_MANAGEMENT.id(), PUS_EVENT_MANAGEMENT.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
event_request_tx, event_request_tx,
); );
Pus5Wrapper { pus_5_handler } EventServiceWrapper {
handler: pus_5_handler,
}
} }
pub struct Pus5Wrapper<TcInMemConverter: EcssTcInMemConverter> { pub struct EventServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> {
pub pus_5_handler: PusService5EventHandler<TcInMemConverter, VerificationReporterWithSender>, pub handler:
PusEventServiceHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
} }
impl<TcInMemConverter: EcssTcInMemConverter> Pus5Wrapper<TcInMemConverter> { impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
pub fn handle_next_packet(&mut self) -> bool { EventServiceWrapper<TmSender, TcInMemConverter>
match self.pus_5_handler.handle_one_tc() { {
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool {
match self.handler.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result { Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {

View File

@ -1,47 +1,127 @@
use derive_new::new;
use log::{error, warn}; use log::{error, warn};
use satrs::hk::{CollectionIntervalFactor, HkRequest}; use satrs::hk::{CollectionIntervalFactor, HkRequest, HkRequestVariant, UniqueId};
use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::hk::{PusHkToRequestConverter, PusService3HkHandler};
use satrs::pus::verification::{ use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReporterWithSender, VerificationReportingProvider, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationToken, VerificationReportingProvider, VerificationToken,
}; };
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTmSenderCore,
PusPacketHandlingError, PusServiceHelper, EcssTmtcError, GenericConversionError, MpscTcReceiver, MpscTmAsVecSender,
MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusReplyHandler, PusServiceHelper,
PusTcToRequestConverter, PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::request::TargetAndApidId; use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket}; use satrs::spacepackets::ecss::{hk, PusPacket};
use satrs::tmtc::tm_helper::SharedTmPool; use satrs_example::config::components::PUS_HK_SERVICE;
use satrs::{ChannelId, TargetId}; use satrs_example::config::{hk_err, tmtc_err};
use satrs_example::config::{hk_err, tmtc_err, TcReceiverId, TmSenderId, PUS_APID}; use std::sync::mpsc;
use std::sync::mpsc::{self}; use std::time::Duration;
use crate::pus::{create_verification_reporter, generic_pus_request_timeout_handler};
use crate::requests::GenericRequestRouter; use crate::requests::GenericRequestRouter;
use super::GenericRoutingErrorHandler; use super::{HandlingStatus, PusTargetedRequestService};
#[derive(Clone, PartialEq, Debug, new)]
pub struct HkReply {
pub unique_id: UniqueId,
pub variant: HkReplyVariant,
}
#[derive(Clone, PartialEq, Debug)]
pub enum HkReplyVariant {
Ack,
}
#[derive(Default)] #[derive(Default)]
pub struct ExampleHkRequestConverter {} pub struct HkReplyHandler {}
impl PusHkToRequestConverter for ExampleHkRequestConverter { impl PusReplyHandler<ActivePusRequestStd, HkReply> for HkReplyHandler {
type Error = PusPacketHandlingError; type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<HkReply>,
_tm_sender: &impl EcssTmSenderCore,
) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for service 3: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<HkReply>,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSenderCore,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
match reply.message.variant {
HkReplyVariant::Ack => {
verification_handler
.completion_success(tm_sender, started_token, time_stamp)
.expect("sending completion success verification failed");
}
};
Ok(true)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSenderCore,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"HK",
)?;
Ok(())
}
}
pub struct HkRequestConverter {
timeout: Duration,
}
impl Default for HkRequestConverter {
fn default() -> Self {
Self {
timeout: Duration::from_secs(60),
}
}
}
impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConverter {
type Error = GenericConversionError;
fn convert( fn convert(
&mut self, &mut self,
token: VerificationToken<TcStateAccepted>, token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader, tc: &PusTcReader,
time_stamp: &[u8], tm_sender: &(impl EcssTmSenderCore + ?Sized),
verif_reporter: &impl VerificationReportingProvider, verif_reporter: &impl VerificationReportingProvider,
) -> Result<(TargetId, HkRequest), Self::Error> { time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, HkRequest), Self::Error> {
let user_data = tc.user_data(); let user_data = tc.user_data();
if user_data.is_empty() { if user_data.is_empty() {
let user_data_len = user_data.len() as u32; let user_data_len = user_data.len() as u32;
let user_data_len_raw = user_data_len.to_be_bytes(); let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter verif_reporter
.start_failure( .start_failure(
tm_sender,
token, token,
FailParams::new( FailParams::new(
time_stamp, time_stamp,
@ -50,7 +130,7 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter {
), ),
) )
.expect("Sending start failure TM failed"); .expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::NotEnoughAppData { return Err(GenericConversionError::NotEnoughAppData {
expected: 4, expected: 4,
found: 0, found: 0,
}); });
@ -64,155 +144,164 @@ impl PusHkToRequestConverter for ExampleHkRequestConverter {
let user_data_len = user_data.len() as u32; let user_data_len = user_data.len() as u32;
let user_data_len_raw = user_data_len.to_be_bytes(); let user_data_len_raw = user_data_len.to_be_bytes();
verif_reporter verif_reporter
.start_failure(token, FailParams::new(time_stamp, err, &user_data_len_raw)) .start_failure(
tm_sender,
token,
FailParams::new(time_stamp, err, &user_data_len_raw),
)
.expect("Sending start failure TM failed"); .expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::NotEnoughAppData { return Err(GenericConversionError::NotEnoughAppData {
expected: 8, expected: 8,
found: 4, found: 4,
}); });
} }
let subservice = tc.subservice(); let subservice = tc.subservice();
let target_id = TargetAndApidId::from_pus_tc(tc).expect("invalid tc format"); let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).expect("invalid tc format");
let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap()); let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap());
let standard_subservice = hk::Subservice::try_from(subservice); let standard_subservice = hk::Subservice::try_from(subservice);
if standard_subservice.is_err() { if standard_subservice.is_err() {
verif_reporter verif_reporter
.start_failure( .start_failure(
tm_sender,
token, token,
FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]), FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE, &[subservice]),
) )
.expect("Sending start failure TM failed"); .expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::InvalidSubservice(subservice)); return Err(GenericConversionError::InvalidSubservice(subservice));
} }
Ok(( let request = match standard_subservice.unwrap() {
target_id.into(), hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => {
match standard_subservice.unwrap() { HkRequest::new(unique_id, HkRequestVariant::EnablePeriodic)
hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => { }
HkRequest::Enable(unique_id) hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => {
} HkRequest::new(unique_id, HkRequestVariant::DisablePeriodic)
hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => { }
HkRequest::Disable(unique_id) hk::Subservice::TcReportHkReportStructures => todo!(),
} hk::Subservice::TmHkPacket => todo!(),
hk::Subservice::TcReportHkReportStructures => todo!(), hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => {
hk::Subservice::TmHkPacket => todo!(), HkRequest::new(unique_id, HkRequestVariant::OneShot)
hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => { }
HkRequest::OneShot(unique_id) hk::Subservice::TcModifyDiagCollectionInterval
} | hk::Subservice::TcModifyHkCollectionInterval => {
hk::Subservice::TcModifyDiagCollectionInterval if user_data.len() < 12 {
| hk::Subservice::TcModifyHkCollectionInterval => {
if user_data.len() < 12 {
verif_reporter
.start_failure(
token,
FailParams::new_no_fail_data(
time_stamp,
&tmtc_err::NOT_ENOUGH_APP_DATA,
),
)
.expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::NotEnoughAppData {
expected: 12,
found: user_data.len(),
});
}
HkRequest::ModifyCollectionInterval(
unique_id,
CollectionIntervalFactor::from_be_bytes(
user_data[8..12].try_into().unwrap(),
),
)
}
_ => {
verif_reporter verif_reporter
.start_failure( .start_failure(
tm_sender,
token, token,
FailParams::new( FailParams::new_no_fail_data(
time_stamp, time_stamp,
&tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED, &tmtc_err::NOT_ENOUGH_APP_DATA,
&[subservice],
), ),
) )
.expect("Sending start failure TM failed"); .expect("Sending start failure TM failed");
return Err(PusPacketHandlingError::InvalidSubservice(subservice)); return Err(GenericConversionError::NotEnoughAppData {
expected: 12,
found: user_data.len(),
});
} }
}, HkRequest::new(
unique_id,
HkRequestVariant::ModifyCollectionInterval(
CollectionIntervalFactor::from_be_bytes(
user_data[8..12].try_into().unwrap(),
),
),
)
}
_ => {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new(
time_stamp,
&tmtc_err::PUS_SUBSERVICE_NOT_IMPLEMENTED,
&[subservice],
),
)
.expect("Sending start failure TM failed");
return Err(GenericConversionError::InvalidSubservice(subservice));
}
};
Ok((
ActivePusRequestStd::new(target_id_and_apid.into(), token, self.timeout),
request,
)) ))
} }
} }
pub fn create_hk_service_static( pub fn create_hk_service_static(
shared_tm_store: SharedTmPool, tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>, pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter, request_router: GenericRequestRouter,
) -> Pus3Wrapper<EcssTcInSharedStoreConverter> { reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
let hk_srv_tm_sender = MpscTmInSharedPoolSender::new( ) -> HkServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> {
TmSenderId::PusHk as ChannelId, let pus_3_handler = PusTargetedRequestService::new(
"PUS_3_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let hk_srv_receiver =
MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
let pus_3_handler = PusService3HkHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
Box::new(hk_srv_receiver), PUS_HK_SERVICE.id(),
Box::new(hk_srv_tm_sender), pus_hk_rx,
PUS_APID, tm_sender,
verif_reporter.clone(), create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048), EcssTcInSharedStoreConverter::new(tc_pool, 2048),
), ),
ExampleHkRequestConverter::default(), HkRequestConverter::default(),
DefaultActiveRequestMap::default(),
HkReplyHandler::default(),
request_router, request_router,
GenericRoutingErrorHandler::default(), reply_receiver,
); );
Pus3Wrapper { pus_3_handler } HkServiceWrapper {
service: pus_3_handler,
}
} }
pub fn create_hk_service_dynamic( pub fn create_hk_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>, tm_funnel_tx: mpsc::Sender<PusTmAsVec>,
verif_reporter: VerificationReporterWithSender,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>, pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter, request_router: GenericRequestRouter,
) -> Pus3Wrapper<EcssTcInVecConverter> { reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
let hk_srv_tm_sender = MpscTmAsVecSender::new( ) -> HkServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
TmSenderId::PusHk as ChannelId, let pus_3_handler = PusTargetedRequestService::new(
"PUS_3_TM_SENDER",
tm_funnel_tx.clone(),
);
let hk_srv_receiver =
MpscTcReceiver::new(TcReceiverId::PusHk as ChannelId, "PUS_8_TC_RECV", pus_hk_rx);
let pus_3_handler = PusService3HkHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
Box::new(hk_srv_receiver), PUS_HK_SERVICE.id(),
Box::new(hk_srv_tm_sender), pus_hk_rx,
PUS_APID, tm_funnel_tx,
verif_reporter.clone(), create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
ExampleHkRequestConverter::default(), HkRequestConverter::default(),
DefaultActiveRequestMap::default(),
HkReplyHandler::default(),
request_router, request_router,
GenericRoutingErrorHandler::default(), reply_receiver,
); );
Pus3Wrapper { pus_3_handler } HkServiceWrapper {
service: pus_3_handler,
}
} }
pub struct Pus3Wrapper<TcInMemConverter: EcssTcInMemConverter> { pub struct HkServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> {
pub(crate) pus_3_handler: PusService3HkHandler< pub(crate) service: PusTargetedRequestService<
MpscTcReceiver,
TmSender,
TcInMemConverter, TcInMemConverter,
VerificationReporterWithSender, VerificationReporter,
ExampleHkRequestConverter, HkRequestConverter,
GenericRequestRouter, HkReplyHandler,
GenericRoutingErrorHandler<3>, DefaultActiveRequestMap<ActivePusRequestStd>,
ActivePusRequestStd,
HkRequest,
HkReply,
>, >,
} }
impl<TcInMemConverter: EcssTcInMemConverter> Pus3Wrapper<TcInMemConverter> { impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
pub fn handle_next_packet(&mut self) -> bool { HkServiceWrapper<TmSender, TcInMemConverter>
match self.pus_3_handler.handle_one_tc() { {
pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result { Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
@ -234,4 +323,242 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus3Wrapper<TcInMemConverter> {
} }
false false
} }
pub fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
// This only fails if all senders disconnected. Treat it like an empty queue.
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS 3: Handling reply failed with error {e:?}");
HandlingStatus::Empty
})
}
pub fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{
TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::request::MessageMetadata;
use satrs::{
hk::HkRequestVariant,
pus::test_util::TEST_APID,
request::GenericMessage,
spacepackets::{
ecss::{hk::Subservice, tc::PusTcCreator},
SpHeader,
},
};
use satrs_example::config::tmtc_err;
use crate::pus::{
hk::HkReplyVariant,
tests::{PusConverterTestbench, ReplyHandlerTestbench},
};
use super::{HkReply, HkReplyHandler, HkRequestConverter};
#[test]
fn hk_converter_one_shot_req() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let hk_req = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcGenerateOneShotHk as u8,
&app_data,
true,
);
let accepted_token = hk_bench.add_tc(&hk_req);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::OneShot = req.variant {
} else {
panic!("unexpected HK request")
}
}
#[test]
fn hk_converter_enable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::EnablePeriodic = req.variant {
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableHkGeneration as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableDiagGeneration as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_conversion_disable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::DisablePeriodic = req.variant {
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableHkGeneration as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableDiagGeneration as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_conversion_modify_interval() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 12] = [0; 12];
let collection_interval_factor = 5_u32;
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
app_data[8..12].copy_from_slice(&collection_interval_factor.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
let (_active_req, req) = hk_bench
.convert(accepted_token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion failed");
assert_eq!(req.unique_id, unique_id);
if let HkRequestVariant::ModifyCollectionInterval(interval_factor) = req.variant {
assert_eq!(interval_factor, collection_interval_factor);
} else {
panic!("unexpected HK request")
}
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyHkCollectionInterval as u8,
&app_data,
true,
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyDiagCollectionInterval as u8,
&app_data,
true,
);
generic_check(&tc1);
}
#[test]
fn hk_reply_handler() {
let mut reply_testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), HkReplyHandler::default());
let sender_id = 2_u64;
let apid_target_id = 3_u32;
let unique_id = 5_u32;
let (req_id, active_req) = reply_testbench.add_tc(TEST_APID, apid_target_id, &[]);
let reply = GenericMessage::new(
MessageMetadata::new(req_id.into(), sender_id),
HkReply::new(unique_id, HkReplyVariant::Ack),
);
let result = reply_testbench.handle_reply(&reply, &active_req, &[]);
assert!(result.is_ok());
assert!(result.unwrap());
reply_testbench
.verif_reporter
.assert_full_completion_success(TEST_COMPONENT_ID_0.raw(), req_id, None);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_1, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_1.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
} }

View File

@ -1,77 +1,80 @@
use crate::requests::GenericRequestRouter;
use crate::tmtc::MpscStoreAndSendError; use crate::tmtc::MpscStoreAndSendError;
use log::warn; use log::warn;
use satrs::pus::verification::{ use satrs::pus::verification::{
FailParams, StdVerifReporterWithSender, VerificationReportingProvider, self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReporterCfg, VerificationReportingProvider, VerificationToken,
}; };
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, GenericRoutingError, PusPacketHandlerResult, PusRoutingErrorHandler, TcInMemory, ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter,
EcssTcReceiverCore, EcssTmSenderCore, EcssTmtcError, GenericConversionError,
GenericRoutingError, PusPacketHandlerResult, PusPacketHandlingError, PusReplyHandler,
PusRequestRouter, PusServiceHelper, PusTcToRequestConverter, TcInMemory,
}; };
use satrs::queue::GenericReceiveError;
use satrs::request::{Apid, GenericMessage, MessageMetadata};
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusServiceId; use satrs::spacepackets::ecss::PusServiceId;
use satrs::spacepackets::time::cds::TimeProvider; use satrs::ComponentId;
use satrs::spacepackets::time::TimeWriter; use satrs_example::config::components::PUS_ROUTING_SERVICE;
use satrs_example::config::{tmtc_err, CustomPusServiceId}; use satrs_example::config::{tmtc_err, CustomPusServiceId};
use std::sync::mpsc::Sender; use satrs_example::TimeStampHelper;
use std::fmt::Debug;
use std::sync::mpsc::{self, Sender};
pub mod action; pub mod action;
pub mod event; pub mod event;
pub mod hk; pub mod hk;
pub mod mode;
pub mod scheduler; pub mod scheduler;
pub mod stack; pub mod stack;
pub mod test; pub mod test;
pub struct PusTcMpscRouter { #[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub test_service_receiver: Sender<EcssTcAndToken>, pub enum HandlingStatus {
pub event_service_receiver: Sender<EcssTcAndToken>, Empty,
pub sched_service_receiver: Sender<EcssTcAndToken>, HandledOne,
pub hk_service_receiver: Sender<EcssTcAndToken>,
pub action_service_receiver: Sender<EcssTcAndToken>,
} }
pub struct PusReceiver { pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter {
pub verif_reporter: StdVerifReporterWithSender, let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap();
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
VerificationReporter::new(owner_id, &verif_cfg)
}
/// Simple router structure which forwards PUS telecommands to dedicated handlers.
pub struct PusTcMpscRouter {
pub test_tc_sender: Sender<EcssTcAndToken>,
pub event_tc_sender: Sender<EcssTcAndToken>,
pub sched_tc_sender: Sender<EcssTcAndToken>,
pub hk_tc_sender: Sender<EcssTcAndToken>,
pub action_tc_sender: Sender<EcssTcAndToken>,
pub mode_tc_sender: Sender<EcssTcAndToken>,
}
pub struct PusReceiver<TmSender: EcssTmSenderCore> {
pub id: ComponentId,
pub tm_sender: TmSender,
pub verif_reporter: VerificationReporter,
pub pus_router: PusTcMpscRouter, pub pus_router: PusTcMpscRouter,
stamp_helper: TimeStampHelper, stamp_helper: TimeStampHelper,
} }
struct TimeStampHelper { impl<TmSender: EcssTmSenderCore> PusReceiver<TmSender> {
stamper: TimeProvider, pub fn new(tm_sender: TmSender, pus_router: PusTcMpscRouter) -> Self {
time_stamp: [u8; 7],
}
impl TimeStampHelper {
pub fn new() -> Self {
Self { Self {
stamper: TimeProvider::new_with_u16_days(0, 0), id: PUS_ROUTING_SERVICE.raw(),
time_stamp: [0; 7], tm_sender,
} verif_reporter: create_verification_reporter(
} PUS_ROUTING_SERVICE.id(),
PUS_ROUTING_SERVICE.apid,
pub fn stamp(&self) -> &[u8] { ),
&self.time_stamp
}
pub fn update_from_now(&mut self) {
self.stamper
.update_from_now()
.expect("Updating timestamp failed");
self.stamper
.write_to_bytes(&mut self.time_stamp)
.expect("Writing timestamp failed");
}
}
impl PusReceiver {
pub fn new(verif_reporter: StdVerifReporterWithSender, pus_router: PusTcMpscRouter) -> Self {
Self {
verif_reporter,
pus_router, pus_router,
stamp_helper: TimeStampHelper::new(), stamp_helper: TimeStampHelper::default(),
} }
} }
}
impl PusReceiver {
pub fn handle_tc_packet( pub fn handle_tc_packet(
&mut self, &mut self,
tc_in_memory: TcInMemory, tc_in_memory: TcInMemory,
@ -82,41 +85,34 @@ impl PusReceiver {
self.stamp_helper.update_from_now(); self.stamp_helper.update_from_now();
let accepted_token = self let accepted_token = self
.verif_reporter .verif_reporter
.acceptance_success(init_token, self.stamp_helper.stamp()) .acceptance_success(&self.tm_sender, init_token, self.stamp_helper.stamp())
.expect("Acceptance success failure"); .expect("Acceptance success failure");
let service = PusServiceId::try_from(service); let service = PusServiceId::try_from(service);
match service { match service {
Ok(standard_service) => match standard_service { Ok(standard_service) => match standard_service {
PusServiceId::Test => { PusServiceId::Test => self.pus_router.test_tc_sender.send(EcssTcAndToken {
self.pus_router.test_service_receiver.send(EcssTcAndToken { tc_in_memory,
tc_in_memory, token: Some(accepted_token.into()),
token: Some(accepted_token.into()), })?,
})?
}
PusServiceId::Housekeeping => { PusServiceId::Housekeeping => {
self.pus_router.hk_service_receiver.send(EcssTcAndToken { self.pus_router.hk_tc_sender.send(EcssTcAndToken {
tc_in_memory, tc_in_memory,
token: Some(accepted_token.into()), token: Some(accepted_token.into()),
})? })?
} }
PusServiceId::Event => { PusServiceId::Event => self.pus_router.event_tc_sender.send(EcssTcAndToken {
self.pus_router tc_in_memory,
.event_service_receiver token: Some(accepted_token.into()),
.send(EcssTcAndToken { })?,
tc_in_memory,
token: Some(accepted_token.into()),
})?
}
PusServiceId::Scheduling => { PusServiceId::Scheduling => {
self.pus_router self.pus_router.sched_tc_sender.send(EcssTcAndToken {
.sched_service_receiver tc_in_memory,
.send(EcssTcAndToken { token: Some(accepted_token.into()),
tc_in_memory, })?
token: Some(accepted_token.into()),
})?
} }
_ => { _ => {
let result = self.verif_reporter.start_failure( let result = self.verif_reporter.start_failure(
&self.tm_sender,
accepted_token, accepted_token,
FailParams::new( FailParams::new(
self.stamp_helper.stamp(), self.stamp_helper.stamp(),
@ -133,14 +129,17 @@ impl PusReceiver {
if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) { if let Ok(custom_service) = CustomPusServiceId::try_from(e.number) {
match custom_service { match custom_service {
CustomPusServiceId::Mode => { CustomPusServiceId::Mode => {
// TODO: Fix mode service. self.pus_router.mode_tc_sender.send(EcssTcAndToken {
//self.handle_mode_service(pus_tc, accepted_token) tc_in_memory,
token: Some(accepted_token.into()),
})?
} }
CustomPusServiceId::Health => {} CustomPusServiceId::Health => {}
} }
} else { } else {
self.verif_reporter self.verif_reporter
.start_failure( .start_failure(
&self.tm_sender,
accepted_token, accepted_token,
FailParams::new( FailParams::new(
self.stamp_helper.stamp(), self.stamp_helper.stamp(),
@ -156,55 +155,550 @@ impl PusReceiver {
} }
} }
#[derive(Default)] pub trait TargetedPusService {
pub struct GenericRoutingErrorHandler<const SERVICE_ID: u8> {} /// Returns [true] if the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool;
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus;
fn check_for_request_timeouts(&mut self);
}
impl<const SERVICE_ID: u8> PusRoutingErrorHandler for GenericRoutingErrorHandler<SERVICE_ID> { /// This is a generic handler class for all PUS services where a PUS telecommand is converted
type Error = satrs::pus::GenericRoutingError; /// to a targeted request.
///
/// The generic steps for this process are the following
///
/// 1. Poll for TC packets
/// 2. Convert the raw packets to a [PusTcReader].
/// 3. Convert the PUS TC to a typed request using the [PusTcToRequestConverter].
/// 4. Route the requests using the [GenericRequestRouter].
/// 5. Add the request to the active request map using the [ActiveRequestMapProvider] abstraction.
/// 6. Check for replies which complete the forwarded request. The handler takes care of
/// the verification process.
/// 7. Check for timeouts of active requests. Generally, the timeout on the service level should
/// be highest expected timeout for the given target.
///
/// The handler exposes the following API:
///
/// 1. [Self::handle_one_tc] which tries to poll and handle one TC packet, covering steps 1-5.
/// 2. [Self::check_one_reply] which tries to poll and handle one reply, covering step 6.
/// 3. [Self::check_for_request_timeouts] which checks for request timeouts, covering step 7.
pub struct PusTargetedRequestService<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
RequestType,
ReplyType,
> {
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub request_router: GenericRequestRouter,
pub request_converter: RequestConverter,
pub active_request_map: ActiveRequestMap,
pub reply_handler: ReplyHandler,
pub reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>,
}
fn handle_error( impl<
&self, TcReceiver: EcssTcReceiverCore,
target_id: satrs::TargetId, TmSender: EcssTmSenderCore,
token: satrs::pus::verification::VerificationToken< TcInMemConverter: EcssTcInMemConverter,
satrs::pus::verification::TcStateAccepted, VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
RequestType,
ReplyType,
>
PusTargetedRequestService<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
RequestConverter,
ReplyHandler,
ActiveRequestMap,
ActiveRequestInfo,
RequestType,
ReplyType,
>
where
GenericRequestRouter: PusRequestRouter<RequestType, Error = GenericRoutingError>,
{
pub fn new(
service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>, >,
_tc: &PusTcReader, request_converter: RequestConverter,
error: Self::Error, active_request_map: ActiveRequestMap,
reply_hook: ReplyHandler,
request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
) -> Self {
Self {
service_helper,
request_converter,
active_request_map,
reply_handler: reply_hook,
request_router,
reply_receiver,
phantom: std::marker::PhantomData,
}
}
pub fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
self.service_helper
.tc_in_mem_converter_mut()
.cache(&ecss_tc_and_token.tc_in_memory)?;
let tc = self.service_helper.tc_in_mem_converter().convert()?;
let (mut request_info, request) = match self.request_converter.convert(
ecss_tc_and_token.token,
&tc,
self.service_helper.tm_sender(),
&self.service_helper.common.verif_reporter,
time_stamp,
) {
Ok((info, req)) => (info, req),
Err(e) => {
self.handle_conversion_to_request_error(&e, ecss_tc_and_token.token, time_stamp);
return Err(e.into());
}
};
let accepted_token: VerificationToken<TcStateAccepted> = request_info
.token()
.try_into()
.expect("token not in expected accepted state");
let verif_request_id = verification::RequestId::new(&tc).raw();
match self.request_router.route(
MessageMetadata::new(verif_request_id, self.service_helper.id()),
request_info.target_id(),
request,
) {
Ok(()) => {
let started_token = self
.service_helper
.verif_reporter()
.start_success(
&self.service_helper.common.tm_sender,
accepted_token,
time_stamp,
)
.expect("Start success failure");
request_info.set_token(started_token.into());
self.active_request_map
.insert(&verif_request_id, request_info);
}
Err(e) => {
self.request_router.handle_error_generic(
&request_info,
&tc,
e.clone(),
self.service_helper.tm_sender(),
self.service_helper.verif_reporter(),
time_stamp,
);
return Err(e.into());
}
}
Ok(PusPacketHandlerResult::RequestHandled)
}
fn handle_conversion_to_request_error(
&mut self,
error: &GenericConversionError,
token: VerificationToken<TcStateAccepted>,
time_stamp: &[u8], time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) { ) {
warn!("Routing request for service {SERVICE_ID} failed: {error:?}");
match error { match error {
GenericRoutingError::UnknownTargetId(id) => { GenericConversionError::WrongService(service) => {
let mut fail_data: [u8; 8] = [0; 8]; let service_slice: [u8; 1] = [*service];
fail_data.copy_from_slice(&id.to_be_bytes()); self.service_helper
verif_reporter .verif_reporter()
.start_failure( .completion_failure(
self.service_helper.tm_sender(),
token, token,
FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data), FailParams::new(time_stamp, &tmtc_err::INVALID_PUS_SERVICE, &service_slice),
) )
.expect("Sending start failure failed"); .expect("Sending completion failure failed");
} }
GenericRoutingError::SendError(_) => { GenericConversionError::InvalidSubservice(subservice) => {
let mut fail_data: [u8; 8] = [0; 8]; let subservice_slice: [u8; 1] = [*subservice];
fail_data.copy_from_slice(&target_id.to_be_bytes()); self.service_helper
verif_reporter .verif_reporter()
.start_failure( .completion_failure(
self.service_helper.tm_sender(),
token, token,
FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data), FailParams::new(
time_stamp,
&tmtc_err::INVALID_PUS_SUBSERVICE,
&subservice_slice,
),
) )
.expect("Sending start failure failed"); .expect("Sending completion failure failed");
} }
GenericRoutingError::NotEnoughAppData { expected, found } => { GenericConversionError::NotEnoughAppData { expected, found } => {
let mut context_info = (found as u32).to_be_bytes().to_vec(); let mut context_info = (*found as u32).to_be_bytes().to_vec();
context_info.extend_from_slice(&(expected as u32).to_be_bytes()); context_info.extend_from_slice(&(*expected as u32).to_be_bytes());
verif_reporter self.service_helper
.start_failure( .verif_reporter()
.completion_failure(
self.service_helper.tm_sender(),
token, token,
FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info), FailParams::new(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA, &context_info),
) )
.expect("Sending start failure failed"); .expect("Sending completion failure failed");
}
// Do nothing.. this is service-level and can not be handled generically here.
GenericConversionError::InvalidAppData(_) => (),
}
}
pub fn poll_and_check_next_reply(
&mut self,
time_stamp: &[u8],
) -> Result<HandlingStatus, EcssTmtcError> {
match self.reply_receiver.try_recv() {
Ok(reply) => {
self.handle_reply(&reply, time_stamp)?;
Ok(HandlingStatus::HandledOne)
}
Err(e) => match e {
mpsc::TryRecvError::Empty => Ok(HandlingStatus::Empty),
mpsc::TryRecvError::Disconnected => Err(EcssTmtcError::Receive(
GenericReceiveError::TxDisconnected(None),
)),
},
}
}
pub fn handle_reply(
&mut self,
reply: &GenericMessage<ReplyType>,
time_stamp: &[u8],
) -> Result<(), EcssTmtcError> {
let active_req_opt = self.active_request_map.get(reply.request_id());
if active_req_opt.is_none() {
self.reply_handler
.handle_unrequested_reply(reply, &self.service_helper.common.tm_sender)?;
return Ok(());
}
let active_request = active_req_opt.unwrap();
let request_finished = self
.reply_handler
.handle_reply(
reply,
active_request,
&self.service_helper.common.tm_sender,
&self.service_helper.common.verif_reporter,
time_stamp,
)
.unwrap_or(false);
if request_finished {
self.active_request_map.remove(reply.request_id());
}
Ok(())
}
pub fn check_for_request_timeouts(&mut self) {
let mut requests_to_delete = Vec::new();
self.active_request_map
.for_each(|request_id, request_info| {
if request_info.has_timed_out() {
requests_to_delete.push(*request_id);
}
});
if !requests_to_delete.is_empty() {
for request_id in requests_to_delete {
self.active_request_map.remove(request_id);
} }
} }
} }
} }
/// Generic timeout handling: Handle the verification failure with a dedicated return code
/// and also log the error.
pub fn generic_pus_request_timeout_handler(
sender: &(impl EcssTmSenderCore + ?Sized),
active_request: &(impl ActiveRequestProvider + Debug),
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
service_str: &'static str,
) -> Result<(), EcssTmtcError> {
log::warn!("timeout for active request {active_request:?} on {service_str} service");
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("token not in expected started state");
verification_handler.completion_failure(
sender,
started_token,
FailParams::new(time_stamp, &tmtc_err::REQUEST_TIMEOUT, &[]),
)?;
Ok(())
}
#[cfg(test)]
pub(crate) mod tests {
use std::time::Duration;
use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::{MpscTmAsVecSender, PusTmAsVec, PusTmVariant};
use satrs::request::RequestId;
use satrs::{
pus::{
verification::test_util::TestVerificationReporter, ActivePusRequestStd,
ActiveRequestMapProvider, EcssTcInVecConverter, MpscTcReceiver,
},
request::UniqueApidTargetId,
spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
WritablePusPacket,
},
SpHeader,
},
};
use crate::requests::CompositeRequest;
use super::*;
// Testbench dedicated to the testing of [PusReplyHandler]s
pub struct ReplyHandlerTestbench<
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
ActiveRequestInfo: ActiveRequestProvider,
Reply,
> {
pub id: ComponentId,
pub verif_reporter: TestVerificationReporter,
pub reply_handler: ReplyHandler,
pub tm_receiver: mpsc::Receiver<PusTmAsVec>,
pub default_timeout: Duration,
tm_sender: MpscTmAsVecSender,
phantom: std::marker::PhantomData<(ActiveRequestInfo, Reply)>,
}
impl<
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
ActiveRequestInfo: ActiveRequestProvider,
Reply,
> ReplyHandlerTestbench<ReplyHandler, ActiveRequestInfo, Reply>
{
pub fn new(owner_id: ComponentId, reply_handler: ReplyHandler) -> Self {
let test_verif_reporter = TestVerificationReporter::new(owner_id);
let (tm_sender, tm_receiver) = mpsc::channel();
Self {
id: TEST_COMPONENT_ID_0.raw(),
verif_reporter: test_verif_reporter,
reply_handler,
default_timeout: Duration::from_secs(30),
tm_sender,
tm_receiver,
phantom: std::marker::PhantomData,
}
}
pub fn add_tc(
&mut self,
apid: u16,
apid_target: u32,
time_stamp: &[u8],
) -> (verification::RequestId, ActivePusRequestStd) {
let sp_header = SpHeader::new_from_apid(apid);
let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0);
let init = self.verif_reporter.add_tc(&PusTcCreator::new(
sp_header,
sec_header_dummy,
&[],
true,
));
let accepted = self
.verif_reporter
.acceptance_success(&self.tm_sender, init, time_stamp)
.expect("acceptance failed");
let started = self
.verif_reporter
.start_success(&self.tm_sender, accepted, time_stamp)
.expect("start failed");
(
started.request_id(),
ActivePusRequestStd::new(
UniqueApidTargetId::new(apid, apid_target).raw(),
started,
self.default_timeout,
),
)
}
pub fn handle_reply(
&mut self,
reply: &GenericMessage<Reply>,
active_request: &ActiveRequestInfo,
time_stamp: &[u8],
) -> Result<bool, ReplyHandler::Error> {
self.reply_handler.handle_reply(
reply,
active_request,
&self.tm_sender,
&self.verif_reporter,
time_stamp,
)
}
pub fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<Reply>,
) -> Result<(), ReplyHandler::Error> {
self.reply_handler
.handle_unrequested_reply(reply, &self.tm_sender)
}
pub fn handle_request_timeout(
&mut self,
active_request_info: &ActiveRequestInfo,
time_stamp: &[u8],
) -> Result<(), ReplyHandler::Error> {
self.reply_handler.handle_request_timeout(
active_request_info,
&self.tm_sender,
&self.verif_reporter,
time_stamp,
)
}
}
#[derive(Default)]
pub struct DummySender {}
/// Dummy sender component which does nothing on the [Self::send_tm] call.
///
/// Useful for unit tests.
impl EcssTmSenderCore for DummySender {
fn send_tm(&self, _source_id: ComponentId, _tm: PusTmVariant) -> Result<(), EcssTmtcError> {
Ok(())
}
}
// Testbench dedicated to the testing of [PusTcToRequestConverter]s
pub struct PusConverterTestbench<
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
ActiveRequestInfo: ActiveRequestProvider,
Request,
> {
pub id: ComponentId,
pub verif_reporter: TestVerificationReporter,
pub converter: Converter,
dummy_sender: DummySender,
current_request_id: Option<verification::RequestId>,
current_packet: Option<Vec<u8>>,
phantom: std::marker::PhantomData<(ActiveRequestInfo, Request)>,
}
impl<
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
ActiveRequestInfo: ActiveRequestProvider,
Request,
> PusConverterTestbench<Converter, ActiveRequestInfo, Request>
{
pub fn new(owner_id: ComponentId, converter: Converter) -> Self {
let test_verif_reporter = TestVerificationReporter::new(owner_id);
Self {
id: owner_id,
verif_reporter: test_verif_reporter,
converter,
dummy_sender: DummySender::default(),
current_request_id: None,
current_packet: None,
phantom: std::marker::PhantomData,
}
}
pub fn add_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let token = self.verif_reporter.add_tc(tc);
self.current_request_id = Some(verification::RequestId::new(tc));
self.current_packet = Some(tc.to_vec().unwrap());
self.verif_reporter
.acceptance_success(&self.dummy_sender, token, &[])
.expect("acceptance failed")
}
pub fn request_id(&self) -> Option<verification::RequestId> {
self.current_request_id
}
pub fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
time_stamp: &[u8],
expected_apid: u16,
expected_apid_target: u32,
) -> Result<(ActiveRequestInfo, Request), Converter::Error> {
if self.current_packet.is_none() {
return Err(GenericConversionError::InvalidAppData(
"call add_tc first".to_string(),
));
}
let current_packet = self.current_packet.take().unwrap();
let tc_reader = PusTcReader::new(&current_packet).unwrap();
let (active_info, request) = self.converter.convert(
token,
&tc_reader.0,
&self.dummy_sender,
&self.verif_reporter,
time_stamp,
)?;
assert_eq!(
active_info.token().request_id(),
self.request_id().expect("no request id is set")
);
assert_eq!(
active_info.target_id(),
UniqueApidTargetId::new(expected_apid, expected_apid_target).raw()
);
Ok((active_info, request))
}
}
pub struct TargetedPusRequestTestbench<
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
RequestType,
ReplyType,
> {
pub service: PusTargetedRequestService<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
TestVerificationReporter,
RequestConverter,
ReplyHandler,
ActiveRequestMap,
ActiveRequestInfo,
RequestType,
ReplyType,
>,
pub request_id: Option<RequestId>,
pub tm_funnel_rx: mpsc::Receiver<PusTmAsVec>,
pub pus_packet_tx: mpsc::Sender<EcssTcAndToken>,
pub reply_tx: mpsc::Sender<GenericMessage<ReplyType>>,
pub request_rx: mpsc::Receiver<GenericMessage<CompositeRequest>>,
}
}

View File

@ -0,0 +1,434 @@
use derive_new::new;
use log::{error, warn};
use std::sync::mpsc;
use std::time::Duration;
use crate::requests::GenericRequestRouter;
use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter,
EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded,
PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
};
use satrs::request::GenericMessage;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::{
mode::Subservice,
verification::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider,
VerificationToken,
},
ActivePusRequestStd, ActiveRequestProvider, EcssTmSenderCore, EcssTmtcError,
GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
},
request::UniqueApidTargetId,
spacepackets::{
ecss::{
tc::PusTcReader,
tm::{PusTmCreator, PusTmSecondaryHeader},
PusPacket,
},
SpHeader,
},
ComponentId,
};
use satrs_example::config::components::PUS_MODE_SERVICE;
use satrs_example::config::{mode_err, tmtc_err};
use super::{
create_verification_reporter, generic_pus_request_timeout_handler, HandlingStatus,
PusTargetedRequestService, TargetedPusService,
};
#[derive(new)]
pub struct ModeReplyHandler {
owner_id: ComponentId,
}
impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
type Error = EcssTmtcError;
fn handle_unrequested_reply(
&mut self,
reply: &GenericMessage<ModeReply>,
_tm_sender: &impl EcssTmSenderCore,
) -> Result<(), Self::Error> {
log::warn!("received unexpected reply for mode service 5: {reply:?}");
Ok(())
}
fn handle_reply(
&mut self,
reply: &GenericMessage<ModeReply>,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSenderCore,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<bool, Self::Error> {
let started_token: VerificationToken<TcStateStarted> = active_request
.token()
.try_into()
.expect("invalid token state");
match reply.message {
ModeReply::ModeReply(mode_reply) => {
let mut source_data: [u8; 12] = [0; 12];
mode_reply
.write_to_be_bytes(&mut source_data)
.expect("writing mode reply failed");
let req_id = verification::RequestId::from(reply.request_id());
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0);
let sec_header =
PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp);
let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true);
tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?;
verification_handler.completion_success(tm_sender, started_token, time_stamp)?;
}
ModeReply::CantReachMode(error_code) => {
verification_handler.completion_failure(
tm_sender,
started_token,
FailParams::new(time_stamp, &error_code, &[]),
)?;
}
ModeReply::WrongMode { expected, reached } => {
let mut error_info: [u8; 24] = [0; 24];
let mut written_len = expected
.write_to_be_bytes(&mut error_info[0..ModeAndSubmode::RAW_LEN])
.expect("writing expected mode failed");
written_len += reached
.write_to_be_bytes(&mut error_info[ModeAndSubmode::RAW_LEN..])
.expect("writing reached mode failed");
verification_handler.completion_failure(
tm_sender,
started_token,
FailParams::new(
time_stamp,
&mode_err::WRONG_MODE,
&error_info[..written_len],
),
)?;
}
};
Ok(true)
}
fn handle_request_timeout(
&mut self,
active_request: &ActivePusRequestStd,
tm_sender: &impl EcssTmSenderCore,
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(), Self::Error> {
generic_pus_request_timeout_handler(
tm_sender,
active_request,
verification_handler,
time_stamp,
"HK",
)?;
Ok(())
}
}
#[derive(Default)]
pub struct ModeRequestConverter {}
impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestConverter {
type Error = GenericConversionError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
tm_sender: &(impl EcssTmSenderCore + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> {
let subservice = tc.subservice();
let user_data = tc.user_data();
let not_enough_app_data = |expected: usize| {
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::NOT_ENOUGH_APP_DATA),
)
.expect("Sending start failure failed");
Err(GenericConversionError::NotEnoughAppData {
expected,
found: user_data.len(),
})
};
if user_data.len() < core::mem::size_of::<u32>() {
return not_enough_app_data(4);
}
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).unwrap();
let active_request =
ActivePusRequestStd::new(target_id_and_apid.into(), token, Duration::from_secs(30));
let subservice_typed = Subservice::try_from(subservice);
let invalid_subservice = || {
// Invalid subservice
verif_reporter
.start_failure(
tm_sender,
token,
FailParams::new_no_fail_data(time_stamp, &tmtc_err::INVALID_PUS_SUBSERVICE),
)
.expect("Sending start failure failed");
Err(GenericConversionError::InvalidSubservice(subservice))
};
if subservice_typed.is_err() {
return invalid_subservice();
}
let subservice_typed = subservice_typed.unwrap();
match subservice_typed {
Subservice::TcSetMode => {
if user_data.len() < core::mem::size_of::<u32>() + ModeAndSubmode::RAW_LEN {
return not_enough_app_data(4 + ModeAndSubmode::RAW_LEN);
}
let mode_and_submode = ModeAndSubmode::from_be_bytes(&tc.user_data()[4..])
.expect("mode and submode extraction failed");
Ok((active_request, ModeRequest::SetMode(mode_and_submode)))
}
Subservice::TcReadMode => Ok((active_request, ModeRequest::ReadMode)),
Subservice::TcAnnounceMode => Ok((active_request, ModeRequest::AnnounceMode)),
Subservice::TcAnnounceModeRecursive => {
Ok((active_request, ModeRequest::AnnounceModeRecursive))
}
_ => invalid_subservice(),
}
}
}
pub fn create_mode_service_static(
tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>,
tc_pool: SharedStaticMemoryPool,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> {
let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_MODE_SERVICE.id(),
pus_action_rx,
tm_sender,
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048),
),
ModeRequestConverter::default(),
DefaultActiveRequestMap::default(),
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
mode_router,
reply_receiver,
);
ModeServiceWrapper {
service: mode_request_handler,
}
}
pub fn create_mode_service_dynamic(
tm_funnel_tx: mpsc::Sender<PusTmAsVec>,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_MODE_SERVICE.id(),
pus_action_rx,
tm_funnel_tx,
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
EcssTcInVecConverter::default(),
),
ModeRequestConverter::default(),
DefaultActiveRequestMap::default(),
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
mode_router,
reply_receiver,
);
ModeServiceWrapper {
service: mode_request_handler,
}
}
pub struct ModeServiceWrapper<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> {
pub(crate) service: PusTargetedRequestService<
MpscTcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
ModeRequestConverter,
ModeReplyHandler,
DefaultActiveRequestMap<ActivePusRequestStd>,
ActivePusRequestStd,
ModeRequest,
ModeReply,
>,
}
impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> TargetedPusService
for ModeServiceWrapper<TmSender, TcInMemConverter>
{
/// Returns [true] if the packet handling is finished.
fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool {
match self.service.poll_and_handle_next_tc(time_stamp) {
Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
warn!("PUS mode service: partial packet handling success: {e:?}")
}
PusPacketHandlerResult::CustomSubservice(invalid, _) => {
warn!("PUS mode service: invalid subservice {invalid}");
}
PusPacketHandlerResult::SubserviceNotImplemented(subservice, _) => {
warn!("PUS mode service: {subservice} not implemented");
}
PusPacketHandlerResult::Empty => {
return true;
}
},
Err(error) => {
error!("PUS mode service: packet handling error: {error:?}")
}
}
false
}
fn poll_and_handle_next_reply(&mut self, time_stamp: &[u8]) -> HandlingStatus {
self.service
.poll_and_check_next_reply(time_stamp)
.unwrap_or_else(|e| {
warn!("PUS action service: Handling reply failed with error {e:?}");
HandlingStatus::HandledOne
})
}
fn check_for_request_timeouts(&mut self) {
self.service.check_for_request_timeouts();
}
}
#[cfg(test)]
mod tests {
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};
use satrs::request::MessageMetadata;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::mode::Subservice,
request::GenericMessage,
spacepackets::{
ecss::tc::{PusTcCreator, PusTcSecondaryHeader},
SpHeader,
},
};
use satrs_example::config::tmtc_err;
use crate::pus::{
mode::ModeReplyHandler,
tests::{PusConverterTestbench, ReplyHandlerTestbench},
};
use super::ModeRequestConverter;
#[test]
fn mode_converter_read_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::ReadMode);
}
#[test]
fn mode_converter_set_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8);
let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN];
let mode_and_submode = ModeAndSubmode::new(2, 1);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
mode_and_submode
.write_to_be_bytes(&mut app_data[4..])
.unwrap();
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::SetMode(mode_and_submode));
}
#[test]
fn mode_converter_announce_mode() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceMode);
}
#[test]
fn mode_converter_announce_mode_recursively() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
.expect("conversion has failed");
assert_eq!(req, ModeRequest::AnnounceModeRecursive);
}
#[test]
fn reply_handling_unrequested_reply() {
let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1));
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);
assert!(result.is_ok());
}
#[test]
fn reply_handling_reply_timeout() {
let mut testbench = ReplyHandlerTestbench::new(
TEST_COMPONENT_ID_0.id(),
ModeReplyHandler::new(TEST_COMPONENT_ID_0.id()),
);
let (req_id, active_request) = testbench.add_tc(TEST_APID, TEST_UNIQUE_ID_0, &[]);
let result = testbench.handle_request_timeout(&active_request, &[]);
assert!(result.is_ok());
testbench.verif_reporter.assert_completion_failure(
TEST_COMPONENT_ID_0.raw(),
req_id,
None,
tmtc_err::REQUEST_TIMEOUT.raw() as u64,
);
}
}

View File

@ -1,19 +1,18 @@
use std::sync::mpsc; use std::sync::mpsc;
use std::time::Duration; use std::time::Duration;
use crate::pus::create_verification_reporter;
use log::{error, info, warn}; use log::{error, info, warn};
use satrs::pool::{PoolProvider, StaticMemoryPool, StoreAddr}; use satrs::pool::{PoolProvider, StaticMemoryPool};
use satrs::pus::scheduler::{PusScheduler, TcInfo}; use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler_srv::PusService11SchedHandler; use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporterWithSender; use satrs::pus::verification::VerificationReporter;
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter, EcssTcAndToken, EcssTcInMemConverter, EcssTcInSharedStoreConverter, EcssTcInVecConverter,
MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSender, PusPacketHandlerResult, EcssTmSenderCore, MpscTcReceiver, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded,
PusServiceHelper, PusPacketHandlerResult, PusServiceHelper, PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::tmtc::tm_helper::SharedTmPool; use satrs_example::config::components::PUS_SCHED_SERVICE;
use satrs::ChannelId;
use satrs_example::config::{TcReceiverId, TmSenderId, PUS_APID};
use crate::tmtc::PusTcSourceProviderSharedPool; use crate::tmtc::PusTcSourceProviderSharedPool;
@ -51,15 +50,25 @@ impl TcReleaser for mpsc::Sender<Vec<u8>> {
} }
} }
pub struct Pus11Wrapper<TcInMemConverter: EcssTcInMemConverter> { pub struct SchedulingServiceWrapper<
pub pus_11_handler: TmSender: EcssTmSenderCore,
PusService11SchedHandler<TcInMemConverter, VerificationReporterWithSender, PusScheduler>, TcInMemConverter: EcssTcInMemConverter,
> {
pub pus_11_handler: PusSchedServiceHandler<
MpscTcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
PusScheduler,
>,
pub sched_tc_pool: StaticMemoryPool, pub sched_tc_pool: StaticMemoryPool,
pub releaser_buf: [u8; 4096], pub releaser_buf: [u8; 4096],
pub tc_releaser: Box<dyn TcReleaser + Send>, pub tc_releaser: Box<dyn TcReleaser + Send>,
} }
impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> { impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
SchedulingServiceWrapper<TmSender, TcInMemConverter>
{
pub fn release_tcs(&mut self) { pub fn release_tcs(&mut self) {
let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool { let releaser = |enabled: bool, info: &TcInfo, tc: &[u8]| -> bool {
self.tc_releaser.release(enabled, info, tc) self.tc_releaser.release(enabled, info, tc)
@ -83,8 +92,11 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
} }
} }
pub fn handle_next_packet(&mut self) -> bool { pub fn poll_and_handle_next_tc(&mut self, time_stamp: &[u8]) -> bool {
match self.pus_11_handler.handle_one_tc(&mut self.sched_tc_pool) { match self
.pus_11_handler
.poll_and_handle_next_tc(time_stamp, &mut self.sched_tc_pool)
{
Ok(result) => match result { Ok(result) => match result {
PusPacketHandlerResult::RequestHandled => {} PusPacketHandlerResult::RequestHandled => {}
PusPacketHandlerResult::RequestHandledPartialSuccess(e) => { PusPacketHandlerResult::RequestHandledPartialSuccess(e) => {
@ -109,37 +121,24 @@ impl<TcInMemConverter: EcssTcInMemConverter> Pus11Wrapper<TcInMemConverter> {
} }
pub fn create_scheduler_service_static( pub fn create_scheduler_service_static(
shared_tm_store: SharedTmPool, tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tc_releaser: PusTcSourceProviderSharedPool, tc_releaser: PusTcSourceProviderSharedPool,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>, pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool, sched_tc_pool: StaticMemoryPool,
) -> Pus11Wrapper<EcssTcInSharedStoreConverter> { ) -> SchedulingServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> {
let sched_srv_tm_sender = MpscTmInSharedPoolSender::new(
TmSenderId::PusSched as ChannelId,
"PUS_11_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let sched_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusSched as ChannelId,
"PUS_11_TC_RECV",
pus_sched_rx,
);
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed"); .expect("Creating PUS Scheduler failed");
let pus_11_handler = PusService11SchedHandler::new( let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
Box::new(sched_srv_receiver), PUS_SCHED_SERVICE.id(),
Box::new(sched_srv_tm_sender), pus_sched_rx,
PUS_APID, tm_sender,
verif_reporter.clone(), create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_releaser.clone_backing_pool(), 2048), EcssTcInSharedStoreConverter::new(tc_releaser.clone_backing_pool(), 2048),
), ),
scheduler, scheduler,
); );
Pus11Wrapper { SchedulingServiceWrapper {
pus_11_handler, pus_11_handler,
sched_tc_pool, sched_tc_pool,
releaser_buf: [0; 4096], releaser_buf: [0; 4096],
@ -148,35 +147,26 @@ pub fn create_scheduler_service_static(
} }
pub fn create_scheduler_service_dynamic( pub fn create_scheduler_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>, tm_funnel_tx: mpsc::Sender<PusTmAsVec>,
verif_reporter: VerificationReporterWithSender,
tc_source_sender: mpsc::Sender<Vec<u8>>, tc_source_sender: mpsc::Sender<Vec<u8>>,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>, pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool, sched_tc_pool: StaticMemoryPool,
) -> Pus11Wrapper<EcssTcInVecConverter> { ) -> SchedulingServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let sched_srv_tm_sender = MpscTmAsVecSender::new( //let sched_srv_receiver =
TmSenderId::PusSched as ChannelId, //MpscTcReceiver::new(PUS_SCHED_SERVICE.raw(), "PUS_11_TC_RECV", pus_sched_rx);
"PUS_11_TM_SENDER",
tm_funnel_tx,
);
let sched_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusSched as ChannelId,
"PUS_11_TC_RECV",
pus_sched_rx,
);
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5)) let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed"); .expect("Creating PUS Scheduler failed");
let pus_11_handler = PusService11SchedHandler::new( let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new( PusServiceHelper::new(
Box::new(sched_srv_receiver), PUS_SCHED_SERVICE.id(),
Box::new(sched_srv_tm_sender), pus_sched_rx,
PUS_APID, tm_funnel_tx,
verif_reporter.clone(), create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
), ),
scheduler, scheduler,
); );
Pus11Wrapper { SchedulingServiceWrapper {
pus_11_handler, pus_11_handler,
sched_tc_pool, sched_tc_pool,
releaser_buf: [0; 4096], releaser_buf: [0; 4096],

View File

@ -1,50 +1,71 @@
use satrs::pus::EcssTcInMemConverter; use crate::pus::mode::ModeServiceWrapper;
use derive_new::new;
use super::{ use satrs::{
action::Pus8Wrapper, event::Pus5Wrapper, hk::Pus3Wrapper, scheduler::Pus11Wrapper, pus::{EcssTcInMemConverter, EcssTmSenderCore},
test::Service17CustomWrapper, spacepackets::time::{cds, TimeWriter},
}; };
pub struct PusStack<TcInMemConverter: EcssTcInMemConverter> { use super::{
event_srv: Pus5Wrapper<TcInMemConverter>, action::ActionServiceWrapper, event::EventServiceWrapper, hk::HkServiceWrapper,
hk_srv: Pus3Wrapper<TcInMemConverter>, scheduler::SchedulingServiceWrapper, test::TestCustomServiceWrapper, HandlingStatus,
action_srv: Pus8Wrapper<TcInMemConverter>, TargetedPusService,
schedule_srv: Pus11Wrapper<TcInMemConverter>, };
test_srv: Service17CustomWrapper<TcInMemConverter>,
#[derive(new)]
pub struct PusStack<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter> {
test_srv: TestCustomServiceWrapper<TmSender, TcInMemConverter>,
hk_srv_wrapper: HkServiceWrapper<TmSender, TcInMemConverter>,
event_srv: EventServiceWrapper<TmSender, TcInMemConverter>,
action_srv_wrapper: ActionServiceWrapper<TmSender, TcInMemConverter>,
schedule_srv: SchedulingServiceWrapper<TmSender, TcInMemConverter>,
mode_srv: ModeServiceWrapper<TmSender, TcInMemConverter>,
} }
impl<TcInMemConverter: EcssTcInMemConverter> PusStack<TcInMemConverter> { impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
pub fn new( PusStack<TmSender, TcInMemConverter>
hk_srv: Pus3Wrapper<TcInMemConverter>, {
event_srv: Pus5Wrapper<TcInMemConverter>,
action_srv: Pus8Wrapper<TcInMemConverter>,
schedule_srv: Pus11Wrapper<TcInMemConverter>,
test_srv: Service17CustomWrapper<TcInMemConverter>,
) -> Self {
Self {
event_srv,
action_srv,
schedule_srv,
test_srv,
hk_srv,
}
}
pub fn periodic_operation(&mut self) { pub fn periodic_operation(&mut self) {
// Release all telecommands which reached their release time before calling the service
// handlers.
self.schedule_srv.release_tcs(); self.schedule_srv.release_tcs();
let time_stamp = cds::CdsTime::now_with_u16_days()
.expect("time stamp generation error")
.to_vec()
.unwrap();
loop { loop {
let mut all_queues_empty = true; let mut nothing_to_do = true;
let mut is_srv_finished = |srv_handler_finished: bool| { let mut is_srv_finished =
if !srv_handler_finished { |tc_handling_done: bool, reply_handling_done: Option<HandlingStatus>| {
all_queues_empty = false; if !tc_handling_done
} || (reply_handling_done.is_some()
}; && reply_handling_done.unwrap() == HandlingStatus::Empty)
is_srv_finished(self.test_srv.handle_next_packet()); {
is_srv_finished(self.schedule_srv.handle_next_packet()); nothing_to_do = false;
is_srv_finished(self.event_srv.handle_next_packet()); }
is_srv_finished(self.action_srv.handle_next_packet()); };
is_srv_finished(self.hk_srv.handle_next_packet()); is_srv_finished(self.test_srv.poll_and_handle_next_packet(&time_stamp), None);
if all_queues_empty { is_srv_finished(self.schedule_srv.poll_and_handle_next_tc(&time_stamp), None);
is_srv_finished(self.event_srv.poll_and_handle_next_tc(&time_stamp), None);
is_srv_finished(
self.action_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
Some(
self.action_srv_wrapper
.poll_and_handle_next_reply(&time_stamp),
),
);
is_srv_finished(
self.hk_srv_wrapper.poll_and_handle_next_tc(&time_stamp),
Some(self.hk_srv_wrapper.poll_and_handle_next_reply(&time_stamp)),
);
is_srv_finished(
self.mode_srv.poll_and_handle_next_tc(&time_stamp),
Some(self.mode_srv.poll_and_handle_next_reply(&time_stamp)),
);
if nothing_to_do {
// Timeout checking is only done once.
self.action_srv_wrapper.check_for_request_timeouts();
self.hk_srv_wrapper.check_for_request_timeouts();
self.mode_srv.check_for_request_timeouts();
break; break;
} }
} }

View File

@ -1,93 +1,74 @@
use crate::pus::create_verification_reporter;
use log::{info, warn}; use log::{info, warn};
use satrs::params::Params; use satrs::event_man::{EventMessage, EventMessageU32};
use satrs::pool::{SharedStaticMemoryPool, StoreAddr}; use satrs::pool::SharedStaticMemoryPool;
use satrs::pus::test::PusService17TestHandler; use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{ use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
FailParams, VerificationReporterWithSender, VerificationReportingProvider, use satrs::pus::EcssTcInSharedStoreConverter;
};
use satrs::pus::{ use satrs::pus::{
EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, MpscTcReceiver, MpscTmAsVecSender, EcssTcAndToken, EcssTcInMemConverter, EcssTcInVecConverter, EcssTmSenderCore, MpscTcReceiver,
MpscTmInSharedPoolSender, PusPacketHandlerResult, PusServiceHelper, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, PusPacketHandlerResult, PusServiceHelper,
PusTmAsVec, PusTmInPool, TmInSharedPoolSender,
}; };
use satrs::spacepackets::ecss::tc::PusTcReader; use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket; use satrs::spacepackets::ecss::PusPacket;
use satrs::spacepackets::time::cds::TimeProvider; use satrs::spacepackets::time::cds::CdsTime;
use satrs::spacepackets::time::TimeWriter; use satrs::spacepackets::time::TimeWriter;
use satrs::tmtc::tm_helper::SharedTmPool; use satrs_example::config::components::PUS_TEST_SERVICE;
use satrs::ChannelId; use satrs_example::config::{tmtc_err, TEST_EVENT};
use satrs::{events::EventU32, pus::EcssTcInSharedStoreConverter}; use std::sync::mpsc;
use satrs_example::config::{tmtc_err, TcReceiverId, TmSenderId, PUS_APID, TEST_EVENT};
use std::sync::mpsc::{self, Sender};
pub fn create_test_service_static( pub fn create_test_service_static(
shared_tm_store: SharedTmPool, tm_sender: TmInSharedPoolSender<mpsc::SyncSender<PusTmInPool>>,
tm_funnel_tx: mpsc::Sender<StoreAddr>,
verif_reporter: VerificationReporterWithSender,
tc_pool: SharedStaticMemoryPool, tc_pool: SharedStaticMemoryPool,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>, event_sender: mpsc::Sender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>, pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> Service17CustomWrapper<EcssTcInSharedStoreConverter> { ) -> TestCustomServiceWrapper<MpscTmInSharedPoolSenderBounded, EcssTcInSharedStoreConverter> {
let test_srv_tm_sender = MpscTmInSharedPoolSender::new(
TmSenderId::PusTest as ChannelId,
"PUS_17_TM_SENDER",
shared_tm_store.clone(),
tm_funnel_tx.clone(),
);
let test_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusTest as ChannelId,
"PUS_17_TC_RECV",
pus_test_rx,
);
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
Box::new(test_srv_receiver), PUS_TEST_SERVICE.id(),
Box::new(test_srv_tm_sender), pus_test_rx,
PUS_APID, tm_sender,
verif_reporter.clone(), create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
EcssTcInSharedStoreConverter::new(tc_pool, 2048), EcssTcInSharedStoreConverter::new(tc_pool, 2048),
)); ));
Service17CustomWrapper { TestCustomServiceWrapper {
pus17_handler, handler: pus17_handler,
test_srv_event_sender: event_sender, test_srv_event_sender: event_sender,
} }
} }
pub fn create_test_service_dynamic( pub fn create_test_service_dynamic(
tm_funnel_tx: mpsc::Sender<Vec<u8>>, tm_funnel_tx: mpsc::Sender<PusTmAsVec>,
verif_reporter: VerificationReporterWithSender, event_sender: mpsc::Sender<EventMessageU32>,
event_sender: mpsc::Sender<(EventU32, Option<Params>)>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>, pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> Service17CustomWrapper<EcssTcInVecConverter> { ) -> TestCustomServiceWrapper<MpscTmAsVecSender, EcssTcInVecConverter> {
let test_srv_tm_sender = MpscTmAsVecSender::new(
TmSenderId::PusTest as ChannelId,
"PUS_17_TM_SENDER",
tm_funnel_tx.clone(),
);
let test_srv_receiver = MpscTcReceiver::new(
TcReceiverId::PusTest as ChannelId,
"PUS_17_TC_RECV",
pus_test_rx,
);
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new( let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
Box::new(test_srv_receiver), PUS_TEST_SERVICE.id(),
Box::new(test_srv_tm_sender), pus_test_rx,
PUS_APID, tm_funnel_tx,
verif_reporter.clone(), create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
EcssTcInVecConverter::default(), EcssTcInVecConverter::default(),
)); ));
Service17CustomWrapper { TestCustomServiceWrapper {
pus17_handler, handler: pus17_handler,
test_srv_event_sender: event_sender, test_srv_event_sender: event_sender,
} }
} }
pub struct Service17CustomWrapper<TcInMemConverter: EcssTcInMemConverter> { pub struct TestCustomServiceWrapper<
pub pus17_handler: PusService17TestHandler<TcInMemConverter, VerificationReporterWithSender>, TmSender: EcssTmSenderCore,
pub test_srv_event_sender: Sender<(EventU32, Option<Params>)>, TcInMemConverter: EcssTcInMemConverter,
> {
pub handler:
PusService17TestHandler<MpscTcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
pub test_srv_event_sender: mpsc::Sender<EventMessageU32>,
} }
impl<TcInMemConverter: EcssTcInMemConverter> Service17CustomWrapper<TcInMemConverter> { impl<TmSender: EcssTmSenderCore, TcInMemConverter: EcssTcInMemConverter>
pub fn handle_next_packet(&mut self) -> bool { TestCustomServiceWrapper<TmSender, TcInMemConverter>
let res = self.pus17_handler.handle_one_tc(); {
pub fn poll_and_handle_next_packet(&mut self, time_stamp: &[u8]) -> bool {
let res = self.handler.poll_and_handle_next_tc(time_stamp);
if res.is_err() { if res.is_err() {
warn!("PUS17 handler failed with error {:?}", res.unwrap_err()); warn!("PUS17 handler failed with error {:?}", res.unwrap_err());
return true; return true;
@ -108,40 +89,42 @@ impl<TcInMemConverter: EcssTcInMemConverter> Service17CustomWrapper<TcInMemConve
} }
PusPacketHandlerResult::CustomSubservice(subservice, token) => { PusPacketHandlerResult::CustomSubservice(subservice, token) => {
let (tc, _) = PusTcReader::new( let (tc, _) = PusTcReader::new(
self.pus17_handler self.handler
.service_helper .service_helper
.tc_in_mem_converter .tc_in_mem_converter
.tc_slice_raw(), .tc_slice_raw(),
) )
.unwrap(); .unwrap();
let time_stamper = TimeProvider::from_now_with_u16_days().unwrap(); let time_stamper = CdsTime::now_with_u16_days().unwrap();
let mut stamp_buf: [u8; 7] = [0; 7]; let mut stamp_buf: [u8; 7] = [0; 7];
time_stamper.write_to_bytes(&mut stamp_buf).unwrap(); time_stamper.write_to_bytes(&mut stamp_buf).unwrap();
if subservice == 128 { if subservice == 128 {
info!("Generating test event"); info!("Generating test event");
self.test_srv_event_sender self.test_srv_event_sender
.send((TEST_EVENT.into(), None)) .send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
.expect("Sending test event failed"); .expect("Sending test event failed");
let start_token = self let start_token = self
.pus17_handler .handler
.service_helper .service_helper
.common .verif_reporter()
.verification_handler .start_success(self.handler.service_helper.tm_sender(), token, &stamp_buf)
.start_success(token, &stamp_buf)
.expect("Error sending start success"); .expect("Error sending start success");
self.pus17_handler self.handler
.service_helper .service_helper
.common .verif_reporter()
.verification_handler .completion_success(
.completion_success(start_token, &stamp_buf) self.handler.service_helper.tm_sender(),
start_token,
&stamp_buf,
)
.expect("Error sending completion success"); .expect("Error sending completion success");
} else { } else {
let fail_data = [tc.subservice()]; let fail_data = [tc.subservice()];
self.pus17_handler self.handler
.service_helper .service_helper
.common .verif_reporter()
.verification_handler
.start_failure( .start_failure(
self.handler.service_helper.tm_sender(),
token, token,
FailParams::new( FailParams::new(
&stamp_buf, &stamp_buf,

View File

@ -1,94 +1,152 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::sync::mpsc; use std::sync::mpsc;
use derive_new::new; use log::warn;
use satrs::action::ActionRequest; use satrs::action::ActionRequest;
use satrs::hk::HkRequest; use satrs::hk::HkRequest;
use satrs::mode::ModeRequest; use satrs::mode::ModeRequest;
use satrs::pus::action::PusActionRequestRouter; use satrs::pus::verification::{
use satrs::pus::hk::PusHkRequestRouter; FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
use satrs::pus::verification::{TcStateAccepted, VerificationToken}; };
use satrs::pus::GenericRoutingError; use satrs::pus::{ActiveRequestProvider, EcssTmSenderCore, GenericRoutingError, PusRequestRouter};
use satrs::queue::GenericSendError; use satrs::queue::GenericSendError;
use satrs::TargetId; use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::ComponentId;
use satrs_example::config::components::PUS_ROUTING_SERVICE;
use satrs_example::config::tmtc_err;
#[allow(dead_code)] #[derive(Clone, Debug)]
#[derive(Clone, Eq, PartialEq, Debug)]
#[non_exhaustive] #[non_exhaustive]
pub enum Request { pub enum CompositeRequest {
Hk(HkRequest), Hk(HkRequest),
Mode(ModeRequest),
Action(ActionRequest), Action(ActionRequest),
} }
#[derive(Clone, Eq, PartialEq, Debug, new)] #[derive(Clone)]
pub struct TargetedRequest { pub struct GenericRequestRouter {
pub(crate) target_id: TargetId, pub id: ComponentId,
pub(crate) request: Request, // All messages which do not have a dedicated queue.
pub composite_router_map: HashMap<ComponentId, mpsc::Sender<GenericMessage<CompositeRequest>>>,
pub mode_router_map: HashMap<ComponentId, mpsc::Sender<GenericMessage<ModeRequest>>>,
} }
#[derive(Clone, Eq, PartialEq, Debug)] impl Default for GenericRequestRouter {
pub struct RequestWithToken { fn default() -> Self {
pub(crate) targeted_request: TargetedRequest,
pub(crate) token: VerificationToken<TcStateAccepted>,
}
impl RequestWithToken {
pub fn new(
target_id: TargetId,
request: Request,
token: VerificationToken<TcStateAccepted>,
) -> Self {
Self { Self {
targeted_request: TargetedRequest::new(target_id, request), id: PUS_ROUTING_SERVICE.raw(),
token, composite_router_map: Default::default(),
mode_router_map: Default::default(),
} }
} }
} }
impl GenericRequestRouter {
#[derive(Default, Clone)] pub(crate) fn handle_error_generic(
pub struct GenericRequestRouter(pub HashMap<TargetId, mpsc::Sender<RequestWithToken>>); &self,
active_request: &impl ActiveRequestProvider,
impl PusHkRequestRouter for GenericRequestRouter { tc: &PusTcReader,
error: GenericRoutingError,
tm_sender: &(impl EcssTmSenderCore + ?Sized),
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) {
warn!(
"Routing request for service {} failed: {error:?}",
tc.service()
);
let accepted_token: VerificationToken<TcStateAccepted> = active_request
.token()
.try_into()
.expect("token is not in accepted state");
match error {
GenericRoutingError::UnknownTargetId(id) => {
let apid_target_id = UniqueApidTargetId::from(id);
warn!("Target APID for request: {}", apid_target_id.apid);
warn!("Target Unique ID for request: {}", apid_target_id.unique_id);
let mut fail_data: [u8; 8] = [0; 8];
fail_data.copy_from_slice(&id.to_be_bytes());
verif_reporter
.completion_failure(
tm_sender,
accepted_token,
FailParams::new(time_stamp, &tmtc_err::UNKNOWN_TARGET_ID, &fail_data),
)
.expect("Sending start failure failed");
}
GenericRoutingError::Send(_) => {
let mut fail_data: [u8; 8] = [0; 8];
fail_data.copy_from_slice(&active_request.target_id().to_be_bytes());
verif_reporter
.completion_failure(
tm_sender,
accepted_token,
FailParams::new(time_stamp, &tmtc_err::ROUTING_ERROR, &fail_data),
)
.expect("Sending start failure failed");
}
}
}
}
impl PusRequestRouter<HkRequest> for GenericRequestRouter {
type Error = GenericRoutingError; type Error = GenericRoutingError;
fn route( fn route(
&self, &self,
target_id: TargetId, requestor_info: MessageMetadata,
target_id: ComponentId,
hk_request: HkRequest, hk_request: HkRequest,
token: VerificationToken<TcStateAccepted>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
if let Some(sender) = self.0.get(&target_id) { if let Some(sender) = self.composite_router_map.get(&target_id) {
sender sender
.send(RequestWithToken::new( .send(GenericMessage::new(
target_id, requestor_info,
Request::Hk(hk_request), CompositeRequest::Hk(hk_request),
token,
)) ))
.map_err(|_| GenericRoutingError::SendError(GenericSendError::RxDisconnected))?; .map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?;
return Ok(());
} }
Ok(()) Err(GenericRoutingError::UnknownTargetId(target_id))
} }
} }
impl PusActionRequestRouter for GenericRequestRouter { impl PusRequestRouter<ActionRequest> for GenericRequestRouter {
type Error = GenericRoutingError; type Error = GenericRoutingError;
fn route( fn route(
&self, &self,
target_id: TargetId, requestor_info: MessageMetadata,
target_id: ComponentId,
action_request: ActionRequest, action_request: ActionRequest,
token: VerificationToken<TcStateAccepted>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
if let Some(sender) = self.0.get(&target_id) { if let Some(sender) = self.composite_router_map.get(&target_id) {
sender sender
.send(RequestWithToken::new( .send(GenericMessage::new(
target_id, requestor_info,
Request::Action(action_request), CompositeRequest::Action(action_request),
token,
)) ))
.map_err(|_| GenericRoutingError::SendError(GenericSendError::RxDisconnected))?; .map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?;
return Ok(());
} }
Ok(()) Err(GenericRoutingError::UnknownTargetId(target_id))
}
}
impl PusRequestRouter<ModeRequest> for GenericRequestRouter {
type Error = GenericRoutingError;
fn route(
&self,
requestor_info: MessageMetadata,
target_id: ComponentId,
request: ModeRequest,
) -> Result<(), Self::Error> {
if let Some(sender) = self.mode_router_map.get(&target_id) {
sender
.send(GenericMessage::new(requestor_info, request))
.map_err(|_| GenericRoutingError::Send(GenericSendError::RxDisconnected))?;
return Ok(());
}
Err(GenericRoutingError::UnknownTargetId(target_id))
} }
} }

View File

@ -1,17 +1,17 @@
use std::{ use std::{
collections::VecDeque, collections::{HashSet, VecDeque},
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };
use log::{info, warn}; use log::{info, warn};
use satrs::{ use satrs::{
hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer}, hal::std::tcp_server::{ServerConfig, TcpSpacepacketsServer},
pus::ReceivesEcssPusTc,
spacepackets::PacketId, spacepackets::PacketId,
tmtc::{CcsdsDistributor, CcsdsError, TmPacketSourceCore}, tmtc::{CcsdsDistributor, CcsdsError, ReceivesCcsdsTc, TmPacketSourceCore},
}; };
use satrs_example::config::PUS_APID;
pub const PACKET_ID_LOOKUP: &[PacketId] = &[PacketId::const_tc(true, PUS_APID)]; use crate::ccsds::CcsdsReceiver;
#[derive(Default, Clone)] #[derive(Default, Clone)]
pub struct SyncTcpTmSource { pub struct SyncTcpTmSource {
@ -69,28 +69,42 @@ impl TmPacketSourceCore for SyncTcpTmSource {
} }
} }
pub struct TcpTask<MpscErrorType: 'static> { pub type TcpServerType<TcSource, MpscErrorType> = TcpSpacepacketsServer<
server: TcpSpacepacketsServer< (),
(), CcsdsError<MpscErrorType>,
CcsdsError<MpscErrorType>, SyncTcpTmSource,
SyncTcpTmSource, CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
CcsdsDistributor<MpscErrorType>, HashSet<PacketId>,
>, >;
pub struct TcpTask<
TcSource: ReceivesCcsdsTc<Error = MpscErrorType>
+ ReceivesEcssPusTc<Error = MpscErrorType>
+ Clone
+ Send
+ 'static,
MpscErrorType: 'static,
> {
server: TcpServerType<TcSource, MpscErrorType>,
} }
impl<MpscErrorType: 'static + core::fmt::Debug> TcpTask<MpscErrorType> { impl<
TcSource: ReceivesCcsdsTc<Error = MpscErrorType>
+ ReceivesEcssPusTc<Error = MpscErrorType>
+ Clone
+ Send
+ 'static,
MpscErrorType: 'static + core::fmt::Debug,
> TcpTask<TcSource, MpscErrorType>
{
pub fn new( pub fn new(
cfg: ServerConfig, cfg: ServerConfig,
tm_source: SyncTcpTmSource, tm_source: SyncTcpTmSource,
tc_receiver: CcsdsDistributor<MpscErrorType>, tc_receiver: CcsdsDistributor<CcsdsReceiver<TcSource, MpscErrorType>, MpscErrorType>,
packet_id_lookup: HashSet<PacketId>,
) -> Result<Self, std::io::Error> { ) -> Result<Self, std::io::Error> {
Ok(Self { Ok(Self {
server: TcpSpacepacketsServer::new( server: TcpSpacepacketsServer::new(cfg, tm_source, tc_receiver, packet_id_lookup)?,
cfg,
tm_source,
tc_receiver,
Box::new(PACKET_ID_LOOKUP),
)?,
}) })
} }

View File

@ -1,11 +1,12 @@
use std::{ use std::{
collections::HashMap, collections::HashMap,
sync::mpsc::{Receiver, Sender}, sync::mpsc::{self},
}; };
use log::info; use log::info;
use satrs::pus::{PusTmAsVec, PusTmInPool};
use satrs::{ use satrs::{
pool::{PoolProvider, StoreAddr}, pool::PoolProvider,
seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore}, seq_count::{CcsdsSimpleSeqCountProvider, SequenceCountProviderCore},
spacepackets::{ spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket}, ecss::{tm::PusTmZeroCopyWriter, PusPacket},
@ -77,16 +78,16 @@ impl TmFunnelCommon {
pub struct TmFunnelStatic { pub struct TmFunnelStatic {
common: TmFunnelCommon, common: TmFunnelCommon,
shared_tm_store: SharedTmPool, shared_tm_store: SharedTmPool,
tm_funnel_rx: Receiver<StoreAddr>, tm_funnel_rx: mpsc::Receiver<PusTmInPool>,
tm_server_tx: Sender<StoreAddr>, tm_server_tx: mpsc::SyncSender<PusTmInPool>,
} }
impl TmFunnelStatic { impl TmFunnelStatic {
pub fn new( pub fn new(
shared_tm_store: SharedTmPool, shared_tm_store: SharedTmPool,
sync_tm_tcp_source: SyncTcpTmSource, sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: Receiver<StoreAddr>, tm_funnel_rx: mpsc::Receiver<PusTmInPool>,
tm_server_tx: Sender<StoreAddr>, tm_server_tx: mpsc::SyncSender<PusTmInPool>,
) -> Self { ) -> Self {
Self { Self {
common: TmFunnelCommon::new(sync_tm_tcp_source), common: TmFunnelCommon::new(sync_tm_tcp_source),
@ -97,14 +98,14 @@ impl TmFunnelStatic {
} }
pub fn operation(&mut self) { pub fn operation(&mut self) {
if let Ok(addr) = self.tm_funnel_rx.recv() { if let Ok(pus_tm_in_pool) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update // Read the TM, set sequence counter and message counter, and finally update
// the CRC. // the CRC.
let shared_pool = self.shared_tm_store.clone_backing_pool(); let shared_pool = self.shared_tm_store.clone_backing_pool();
let mut pool_guard = shared_pool.write().expect("Locking TM pool failed"); let mut pool_guard = shared_pool.write().expect("Locking TM pool failed");
let mut tm_copy = Vec::new(); let mut tm_copy = Vec::new();
pool_guard pool_guard
.modify(&addr, |buf| { .modify(&pus_tm_in_pool.store_addr, |buf| {
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN) let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed"); .expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer); self.common.apply_packet_processing(zero_copy_writer);
@ -112,7 +113,7 @@ impl TmFunnelStatic {
}) })
.expect("Reading TM from pool failed"); .expect("Reading TM from pool failed");
self.tm_server_tx self.tm_server_tx
.send(addr) .send(pus_tm_in_pool)
.expect("Sending TM to server failed"); .expect("Sending TM to server failed");
// We could also do this step in the update closure, but I'd rather avoid this, could // We could also do this step in the update closure, but I'd rather avoid this, could
// lead to nested locking. // lead to nested locking.
@ -123,15 +124,15 @@ impl TmFunnelStatic {
pub struct TmFunnelDynamic { pub struct TmFunnelDynamic {
common: TmFunnelCommon, common: TmFunnelCommon,
tm_funnel_rx: Receiver<Vec<u8>>, tm_funnel_rx: mpsc::Receiver<PusTmAsVec>,
tm_server_tx: Sender<Vec<u8>>, tm_server_tx: mpsc::Sender<PusTmAsVec>,
} }
impl TmFunnelDynamic { impl TmFunnelDynamic {
pub fn new( pub fn new(
sync_tm_tcp_source: SyncTcpTmSource, sync_tm_tcp_source: SyncTcpTmSource,
tm_funnel_rx: Receiver<Vec<u8>>, tm_funnel_rx: mpsc::Receiver<PusTmAsVec>,
tm_server_tx: Sender<Vec<u8>>, tm_server_tx: mpsc::Sender<PusTmAsVec>,
) -> Self { ) -> Self {
Self { Self {
common: TmFunnelCommon::new(sync_tm_tcp_source), common: TmFunnelCommon::new(sync_tm_tcp_source),
@ -144,13 +145,13 @@ impl TmFunnelDynamic {
if let Ok(mut tm) = self.tm_funnel_rx.recv() { if let Ok(mut tm) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update // Read the TM, set sequence counter and message counter, and finally update
// the CRC. // the CRC.
let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm, MIN_CDS_FIELD_LEN) let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed"); .expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer); self.common.apply_packet_processing(zero_copy_writer);
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
self.tm_server_tx self.tm_server_tx
.send(tm.clone()) .send(tm)
.expect("Sending TM to server failed"); .expect("Sending TM to server failed");
self.common.sync_tm_tcp_source.add_tm(&tm);
} }
} }
} }

View File

@ -1,7 +1,9 @@
use log::warn; use log::warn;
use satrs::pus::{EcssTcAndToken, ReceivesEcssPusTc}; use satrs::pus::{
EcssTcAndToken, MpscTmAsVecSender, MpscTmInSharedPoolSenderBounded, ReceivesEcssPusTc,
};
use satrs::spacepackets::SpHeader; use satrs::spacepackets::SpHeader;
use std::sync::mpsc::{self, Receiver, SendError, Sender, TryRecvError}; use std::sync::mpsc::{self, Receiver, SendError, Sender, SyncSender, TryRecvError};
use thiserror::Error; use thiserror::Error;
use crate::pus::PusReceiver; use crate::pus::PusReceiver;
@ -37,7 +39,7 @@ impl SharedTcPool {
#[derive(Clone)] #[derive(Clone)]
pub struct PusTcSourceProviderSharedPool { pub struct PusTcSourceProviderSharedPool {
pub tc_source: Sender<StoreAddr>, pub tc_source: SyncSender<StoreAddr>,
pub shared_pool: SharedTcPool, pub shared_pool: SharedTcPool,
} }
@ -97,14 +99,14 @@ pub struct TcSourceTaskStatic {
shared_tc_pool: SharedTcPool, shared_tc_pool: SharedTcPool,
tc_receiver: Receiver<StoreAddr>, tc_receiver: Receiver<StoreAddr>,
tc_buf: [u8; 4096], tc_buf: [u8; 4096],
pus_receiver: PusReceiver, pus_receiver: PusReceiver<MpscTmInSharedPoolSenderBounded>,
} }
impl TcSourceTaskStatic { impl TcSourceTaskStatic {
pub fn new( pub fn new(
shared_tc_pool: SharedTcPool, shared_tc_pool: SharedTcPool,
tc_receiver: Receiver<StoreAddr>, tc_receiver: Receiver<StoreAddr>,
pus_receiver: PusReceiver, pus_receiver: PusReceiver<MpscTmInSharedPoolSenderBounded>,
) -> Self { ) -> Self {
Self { Self {
shared_tc_pool, shared_tc_pool,
@ -161,11 +163,14 @@ impl TcSourceTaskStatic {
// TC source components where the heap is the backing memory of the received telecommands. // TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic { pub struct TcSourceTaskDynamic {
pub tc_receiver: Receiver<Vec<u8>>, pub tc_receiver: Receiver<Vec<u8>>,
pus_receiver: PusReceiver, pus_receiver: PusReceiver<MpscTmAsVecSender>,
} }
impl TcSourceTaskDynamic { impl TcSourceTaskDynamic {
pub fn new(tc_receiver: Receiver<Vec<u8>>, pus_receiver: PusReceiver) -> Self { pub fn new(
tc_receiver: Receiver<Vec<u8>>,
pus_receiver: PusReceiver<MpscTmAsVecSender>,
) -> Self {
Self { Self {
tc_receiver, tc_receiver,
pus_receiver, pus_receiver,

View File

@ -1,12 +1,11 @@
use std::{ use std::net::{SocketAddr, UdpSocket};
net::{SocketAddr, UdpSocket}, use std::sync::mpsc;
sync::mpsc::Receiver,
};
use log::{info, warn}; use log::{info, warn};
use satrs::pus::{PusTmAsVec, PusTmInPool};
use satrs::{ use satrs::{
hal::std::udp_server::{ReceiveResult, UdpTcServer}, hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{PoolProviderWithGuards, SharedStaticMemoryPool, StoreAddr}, pool::{PoolProviderWithGuards, SharedStaticMemoryPool},
tmtc::CcsdsError, tmtc::CcsdsError,
}; };
@ -15,20 +14,20 @@ pub trait UdpTmHandler {
} }
pub struct StaticUdpTmHandler { pub struct StaticUdpTmHandler {
pub tm_rx: Receiver<StoreAddr>, pub tm_rx: mpsc::Receiver<PusTmInPool>,
pub tm_store: SharedStaticMemoryPool, pub tm_store: SharedStaticMemoryPool,
} }
impl UdpTmHandler for StaticUdpTmHandler { impl UdpTmHandler for StaticUdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, &recv_addr: &SocketAddr) { fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, &recv_addr: &SocketAddr) {
while let Ok(addr) = self.tm_rx.try_recv() { while let Ok(pus_tm_in_pool) = self.tm_rx.try_recv() {
let store_lock = self.tm_store.write(); let store_lock = self.tm_store.write();
if store_lock.is_err() { if store_lock.is_err() {
warn!("Locking TM store failed"); warn!("Locking TM store failed");
continue; continue;
} }
let mut store_lock = store_lock.unwrap(); let mut store_lock = store_lock.unwrap();
let pg = store_lock.read_with_guard(addr); let pg = store_lock.read_with_guard(pus_tm_in_pool.store_addr);
let read_res = pg.read_as_vec(); let read_res = pg.read_as_vec();
if read_res.is_err() { if read_res.is_err() {
warn!("Error reading TM pool data"); warn!("Error reading TM pool data");
@ -44,20 +43,20 @@ impl UdpTmHandler for StaticUdpTmHandler {
} }
pub struct DynamicUdpTmHandler { pub struct DynamicUdpTmHandler {
pub tm_rx: Receiver<Vec<u8>>, pub tm_rx: mpsc::Receiver<PusTmAsVec>,
} }
impl UdpTmHandler for DynamicUdpTmHandler { impl UdpTmHandler for DynamicUdpTmHandler {
fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr) { fn send_tm_to_udp_client(&mut self, socket: &UdpSocket, recv_addr: &SocketAddr) {
while let Ok(tm) = self.tm_rx.try_recv() { while let Ok(tm) = self.tm_rx.try_recv() {
if tm.len() > 9 { if tm.packet.len() > 9 {
let service = tm[7]; let service = tm.packet[7];
let subservice = tm[8]; let subservice = tm.packet[8];
info!("Sending PUS TM[{service},{subservice}]") info!("Sending PUS TM[{service},{subservice}]")
} else { } else {
info!("Sending PUS TM"); info!("Sending PUS TM");
} }
let result = socket.send_to(&tm, recv_addr); let result = socket.send_to(&tm.packet, recv_addr);
if let Err(e) = result { if let Err(e) = result {
warn!("Sending TM with UDP socket failed: {e}") warn!("Sending TM with UDP socket failed: {e}")
} }
@ -120,7 +119,7 @@ mod tests {
}, },
tmtc::ReceivesTcCore, tmtc::ReceivesTcCore,
}; };
use satrs_example::config::{OBSW_SERVER_ADDR, PUS_APID}; use satrs_example::config::{components, OBSW_SERVER_ADDR};
use super::*; use super::*;
@ -178,8 +177,8 @@ mod tests {
udp_tc_server, udp_tc_server,
tm_handler, tm_handler,
}; };
let mut sph = SpHeader::tc_unseg(PUS_APID, 0, 0).unwrap(); let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0);
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true) let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true)
.to_vec() .to_vec()
.unwrap(); .unwrap();
let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed"); let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed");

View File

@ -23,7 +23,8 @@ version = "1"
optional = true optional = true
[dependencies.satrs-shared] [dependencies.satrs-shared]
version = "0.1.2" path = "../satrs-shared"
version = "0.1.3"
features = ["serde"] features = ["serde"]
[dependencies.satrs-mib-codegen] [dependencies.satrs-mib-codegen]

View File

@ -26,7 +26,10 @@ features = ["full"]
[dev-dependencies] [dev-dependencies]
trybuild = { version = "1", features = ["diff"] } trybuild = { version = "1", features = ["diff"] }
satrs-shared = "0.1.2"
[dev-dependencies.satrs-shared]
version = "0.1.3"
path = "../../satrs-shared"
[dev-dependencies.satrs-mib] [dev-dependencies.satrs-mib]
path = ".." path = ".."

21
satrs-minisim/Cargo.toml Normal file
View File

@ -0,0 +1,21 @@
[package]
name = "satrs-minisim"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
log = "0.4"
thiserror = "1"
[dependencies.asynchronix]
version = "0.2.1"
[dependencies.satrs]
path = "../satrs"
[dev-dependencies]
delegate = "0.12"

338
satrs-minisim/src/acs.rs Normal file
View File

@ -0,0 +1,338 @@
use std::{f32::consts::PI, sync::mpsc, time::Duration};
use asynchronix::{
model::{Model, Output},
time::Scheduler,
};
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
acs::{MgmReply, MgmSensorValues, MgtDipole, MgtHkSet, MgtReply, MGT_GEN_MAGNETIC_FIELD},
SimReply,
};
use crate::time::current_millis;
// Earth magnetic field varies between -30 uT and 30 uT
const AMPLITUDE_MGM: f32 = 0.03;
// Lets start with a simple frequency here.
const FREQUENCY_MGM: f32 = 1.0;
const PHASE_X: f32 = 0.0;
// Different phases to have different values on the other axes.
const PHASE_Y: f32 = 0.1;
const PHASE_Z: f32 = 0.2;
/// Simple model for a magnetometer where the measure magnetic fields are modeled with sine waves.
///
/// Please note that that a more realistic MGM model wouold include the following components
/// which are not included here to simplify the model:
///
/// 1. It would probably generate signed [i16] values which need to be converted to SI units
/// because it is a digital sensor
/// 2. It would sample the magnetic field at a high fixed rate. This might not be possible for
/// a general purpose OS, but self self-sampling at a relatively high rate (20-40 ms) might
/// stil lbe possible.
pub struct MagnetometerModel {
pub switch_state: SwitchStateBinary,
pub periodicity: Duration,
pub external_mag_field: Option<MgmSensorValues>,
pub reply_sender: mpsc::Sender<SimReply>,
}
impl MagnetometerModel {
pub fn new(periodicity: Duration, reply_sender: mpsc::Sender<SimReply>) -> Self {
Self {
switch_state: SwitchStateBinary::Off,
periodicity,
external_mag_field: None,
reply_sender,
}
}
pub async fn switch_device(&mut self, switch_state: SwitchStateBinary) {
self.switch_state = switch_state;
}
pub async fn send_sensor_values(&mut self, _: (), scheduler: &Scheduler<Self>) {
self.reply_sender
.send(SimReply::new(MgmReply {
switch_state: self.switch_state,
sensor_values: self.calculate_current_mgm_tuple(current_millis(scheduler.time())),
}))
.expect("sending MGM sensor values failed");
}
// Devices like magnetorquers generate a strong magnetic field which overrides the default
// model for the measured magnetic field.
pub async fn apply_external_magnetic_field(&mut self, field: MgmSensorValues) {
self.external_mag_field = Some(field);
}
fn calculate_current_mgm_tuple(&self, time_ms: u64) -> MgmSensorValues {
if SwitchStateBinary::On == self.switch_state {
if let Some(ext_field) = self.external_mag_field {
return ext_field;
}
let base_sin_val = 2.0 * PI * FREQUENCY_MGM * (time_ms as f32 / 1000.0);
return MgmSensorValues {
x: AMPLITUDE_MGM * (base_sin_val + PHASE_X).sin(),
y: AMPLITUDE_MGM * (base_sin_val + PHASE_Y).sin(),
z: AMPLITUDE_MGM * (base_sin_val + PHASE_Z).sin(),
};
}
MgmSensorValues {
x: 0.0,
y: 0.0,
z: 0.0,
}
}
}
impl Model for MagnetometerModel {}
pub struct MagnetorquerModel {
switch_state: SwitchStateBinary,
torquing: bool,
torque_dipole: MgtDipole,
pub gen_magnetic_field: Output<MgmSensorValues>,
reply_sender: mpsc::Sender<SimReply>,
}
impl MagnetorquerModel {
pub fn new(reply_sender: mpsc::Sender<SimReply>) -> Self {
Self {
switch_state: SwitchStateBinary::Off,
torquing: false,
torque_dipole: MgtDipole::default(),
gen_magnetic_field: Output::new(),
reply_sender,
}
}
pub async fn apply_torque(
&mut self,
duration_and_dipole: (Duration, MgtDipole),
scheduler: &Scheduler<Self>,
) {
self.torque_dipole = duration_and_dipole.1;
self.torquing = true;
if scheduler
.schedule_event(duration_and_dipole.0, Self::clear_torque, ())
.is_err()
{
log::warn!("torque clearing can only be set for a future time.");
}
self.generate_magnetic_field(()).await;
}
pub async fn clear_torque(&mut self, _: ()) {
self.torque_dipole = MgtDipole::default();
self.torquing = false;
self.generate_magnetic_field(()).await;
}
pub async fn switch_device(&mut self, switch_state: SwitchStateBinary) {
self.switch_state = switch_state;
self.generate_magnetic_field(()).await;
}
pub async fn request_housekeeping_data(&mut self, _: (), scheduler: &Scheduler<Self>) {
if self.switch_state != SwitchStateBinary::On {
return;
}
scheduler
.schedule_event(Duration::from_millis(15), Self::send_housekeeping_data, ())
.expect("requesting housekeeping data failed")
}
pub fn send_housekeeping_data(&mut self) {
self.reply_sender
.send(SimReply::new(MgtReply::Hk(MgtHkSet {
dipole: self.torque_dipole,
torquing: self.torquing,
})))
.unwrap();
}
fn calc_magnetic_field(&self, _: MgtDipole) -> MgmSensorValues {
// Simplified model: Just returns some fixed magnetic field for now.
// Later, we could make this more fancy by incorporating the commanded dipole.
MGT_GEN_MAGNETIC_FIELD
}
/// A torquing magnetorquer generates a magnetic field. This function can be used to apply
/// the magnetic field.
async fn generate_magnetic_field(&mut self, _: ()) {
if self.switch_state != SwitchStateBinary::On || !self.torquing {
return;
}
self.gen_magnetic_field
.send(self.calc_magnetic_field(self.torque_dipole))
.await;
}
}
impl Model for MagnetorquerModel {}
#[cfg(test)]
pub mod tests {
use std::time::Duration;
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
acs::{MgmReply, MgmRequest, MgtDipole, MgtHkSet, MgtReply, MgtRequest},
eps::PcduSwitch,
SerializableSimMsgPayload, SimMessageProvider, SimRequest, SimTarget,
};
use crate::{eps::tests::switch_device_on, test_helpers::SimTestbench};
#[test]
fn test_basic_mgm_request() {
let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(MgmRequest::RequestSensorData);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
assert_eq!(sim_reply.target(), SimTarget::Mgm);
let reply = MgmReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
assert_eq!(reply.switch_state, SwitchStateBinary::Off);
assert_eq!(reply.sensor_values.x, 0.0);
assert_eq!(reply.sensor_values.y, 0.0);
assert_eq!(reply.sensor_values.z, 0.0);
}
#[test]
fn test_basic_mgm_request_switched_on() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgm);
let mut request = SimRequest::new(MgmRequest::RequestSensorData);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let mut sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let mut sim_reply = sim_reply_res.unwrap();
assert_eq!(sim_reply.target(), SimTarget::Mgm);
let first_reply = MgmReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
sim_testbench.step_by(Duration::from_millis(50));
request = SimRequest::new(MgmRequest::RequestSensorData);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
sim_reply = sim_reply_res.unwrap();
let second_reply = MgmReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
// Check that the values are changing.
assert!(first_reply != second_reply);
}
#[test]
fn test_basic_mgt_request_is_off() {
let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(MgtRequest::RequestHk);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_none());
}
#[test]
fn test_basic_mgt_request_is_on() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgt);
let request = SimRequest::new(MgtRequest::RequestHk);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let sim_reply = sim_reply_res.unwrap();
let mgt_reply = MgtReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
match mgt_reply {
MgtReply::Hk(hk) => {
assert_eq!(hk.dipole, MgtDipole::default());
assert!(!hk.torquing);
}
_ => panic!("unexpected reply"),
}
}
fn check_mgt_hk(sim_testbench: &mut SimTestbench, expected_hk_set: MgtHkSet) {
let request = SimRequest::new(MgtRequest::RequestHk);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply_res = sim_testbench.try_receive_next_reply();
assert!(sim_reply_res.is_some());
let sim_reply = sim_reply_res.unwrap();
let mgt_reply = MgtReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
match mgt_reply {
MgtReply::Hk(hk) => {
assert_eq!(hk, expected_hk_set);
}
_ => panic!("unexpected reply"),
}
}
#[test]
fn test_basic_mgt_request_is_on_and_torquing() {
let mut sim_testbench = SimTestbench::new();
switch_device_on(&mut sim_testbench, PcduSwitch::Mgt);
let commanded_dipole = MgtDipole {
x: -200,
y: 200,
z: 1000,
};
let request = SimRequest::new(MgtRequest::ApplyTorque {
duration: Duration::from_millis(100),
dipole: commanded_dipole,
});
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step_by(Duration::from_millis(5));
check_mgt_hk(
&mut sim_testbench,
MgtHkSet {
dipole: commanded_dipole,
torquing: true,
},
);
sim_testbench.step_by(Duration::from_millis(100));
check_mgt_hk(
&mut sim_testbench,
MgtHkSet {
dipole: MgtDipole::default(),
torquing: false,
},
);
}
}

View File

@ -0,0 +1,189 @@
use std::{sync::mpsc, time::Duration};
use asynchronix::{
simulation::{Address, Simulation},
time::{Clock, MonotonicTime, SystemClock},
};
use satrs_minisim::{
acs::{MgmRequest, MgtRequest},
eps::PcduRequest,
SerializableSimMsgPayload, SimCtrlReply, SimCtrlRequest, SimMessageProvider, SimReply,
SimRequest, SimRequestError, SimTarget,
};
use crate::{
acs::{MagnetometerModel, MagnetorquerModel},
eps::PcduModel,
};
// The simulation controller processes requests and drives the simulation.
pub struct SimController {
pub sys_clock: SystemClock,
pub request_receiver: mpsc::Receiver<SimRequest>,
pub reply_sender: mpsc::Sender<SimReply>,
pub simulation: Simulation,
pub mgm_addr: Address<MagnetometerModel>,
pub pcdu_addr: Address<PcduModel>,
pub mgt_addr: Address<MagnetorquerModel>,
}
impl SimController {
pub fn new(
sys_clock: SystemClock,
request_receiver: mpsc::Receiver<SimRequest>,
reply_sender: mpsc::Sender<SimReply>,
simulation: Simulation,
mgm_addr: Address<MagnetometerModel>,
pcdu_addr: Address<PcduModel>,
mgt_addr: Address<MagnetorquerModel>,
) -> Self {
Self {
sys_clock,
request_receiver,
reply_sender,
simulation,
mgm_addr,
pcdu_addr,
mgt_addr,
}
}
pub fn run(&mut self, start_time: MonotonicTime, udp_polling_interval_ms: u64) {
let mut t = start_time + Duration::from_millis(udp_polling_interval_ms);
self.sys_clock.synchronize(t);
loop {
// Check for UDP requests every millisecond. Shift the simulator ahead here to prevent
// replies lying in the past.
t += Duration::from_millis(udp_polling_interval_ms);
self.simulation
.step_until(t)
.expect("simulation step failed");
self.handle_sim_requests();
self.sys_clock.synchronize(t);
}
}
pub fn handle_sim_requests(&mut self) {
loop {
match self.request_receiver.try_recv() {
Ok(request) => {
if let Err(e) = match request.target() {
SimTarget::SimCtrl => self.handle_ctrl_request(&request),
SimTarget::Mgm => self.handle_mgm_request(&request),
SimTarget::Mgt => self.handle_mgt_request(&request),
SimTarget::Pcdu => self.handle_pcdu_request(&request),
} {
self.handle_invalid_request_with_valid_target(e, &request)
}
}
Err(e) => match e {
mpsc::TryRecvError::Empty => break,
mpsc::TryRecvError::Disconnected => {
panic!("all request sender disconnected")
}
},
}
}
}
fn handle_ctrl_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let sim_ctrl_request = SimCtrlRequest::from_sim_message(request)?;
match sim_ctrl_request {
SimCtrlRequest::Ping => {
self.reply_sender
.send(SimReply::new(SimCtrlReply::Pong))
.expect("sending reply from sim controller failed");
}
}
Ok(())
}
fn handle_mgm_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let mgm_request = MgmRequest::from_sim_message(request)?;
match mgm_request {
MgmRequest::RequestSensorData => {
self.simulation.send_event(
MagnetometerModel::send_sensor_values,
(),
&self.mgm_addr,
);
}
}
Ok(())
}
fn handle_pcdu_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let pcdu_request = PcduRequest::from_sim_message(request)?;
match pcdu_request {
PcduRequest::RequestSwitchInfo => {
self.simulation
.send_event(PcduModel::request_switch_info, (), &self.pcdu_addr);
}
PcduRequest::SwitchDevice { switch, state } => {
self.simulation.send_event(
PcduModel::switch_device,
(switch, state),
&self.pcdu_addr,
);
}
}
Ok(())
}
fn handle_mgt_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let mgt_request = MgtRequest::from_sim_message(request)?;
match mgt_request {
MgtRequest::ApplyTorque { duration, dipole } => self.simulation.send_event(
MagnetorquerModel::apply_torque,
(duration, dipole),
&self.mgt_addr,
),
MgtRequest::RequestHk => self.simulation.send_event(
MagnetorquerModel::request_housekeeping_data,
(),
&self.mgt_addr,
),
}
Ok(())
}
fn handle_invalid_request_with_valid_target(
&self,
error: SimRequestError,
request: &SimRequest,
) {
log::warn!(
"received invalid {:?} request: {:?}",
request.target(),
error
);
self.reply_sender
.send(SimReply::new(SimCtrlReply::from(error)))
.expect("sending reply from sim controller failed");
}
}
#[cfg(test)]
mod tests {
use crate::test_helpers::SimTestbench;
use super::*;
#[test]
fn test_basic_ping() {
let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(SimCtrlRequest::Ping);
sim_testbench
.send_request(request)
.expect("sending sim ctrl request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
assert_eq!(sim_reply.target(), SimTarget::SimCtrl);
let reply = SimCtrlReply::from_sim_message(&sim_reply)
.expect("failed to deserialize MGM sensor values");
assert_eq!(reply, SimCtrlReply::Pong);
}
}

185
satrs-minisim/src/eps.rs Normal file
View File

@ -0,0 +1,185 @@
use std::{collections::HashMap, sync::mpsc, time::Duration};
use asynchronix::{
model::{Model, Output},
time::Scheduler,
};
use satrs::power::SwitchStateBinary;
use satrs_minisim::{
eps::{PcduReply, PcduSwitch, SwitchMap},
SimReply,
};
pub const SWITCH_INFO_DELAY_MS: u64 = 10;
pub struct PcduModel {
pub switcher_map: SwitchMap,
pub mgm_switch: Output<SwitchStateBinary>,
pub mgt_switch: Output<SwitchStateBinary>,
pub reply_sender: mpsc::Sender<SimReply>,
}
impl PcduModel {
pub fn new(reply_sender: mpsc::Sender<SimReply>) -> Self {
let mut switcher_map = HashMap::new();
switcher_map.insert(PcduSwitch::Mgm, SwitchStateBinary::Off);
switcher_map.insert(PcduSwitch::Mgt, SwitchStateBinary::Off);
Self {
switcher_map,
mgm_switch: Output::new(),
mgt_switch: Output::new(),
reply_sender,
}
}
pub async fn request_switch_info(&mut self, _: (), scheduler: &Scheduler<Self>) {
scheduler
.schedule_event(
Duration::from_millis(SWITCH_INFO_DELAY_MS),
Self::send_switch_info,
(),
)
.expect("requesting switch info failed");
}
pub fn send_switch_info(&mut self) {
let reply = SimReply::new(PcduReply::SwitchInfo(self.switcher_map.clone()));
self.reply_sender.send(reply).unwrap();
}
pub async fn switch_device(
&mut self,
switch_and_target_state: (PcduSwitch, SwitchStateBinary),
) {
let val = self
.switcher_map
.get_mut(&switch_and_target_state.0)
.unwrap_or_else(|| panic!("switch {:?} not found", switch_and_target_state.0));
*val = switch_and_target_state.1;
match switch_and_target_state.0 {
PcduSwitch::Mgm => {
self.mgm_switch.send(switch_and_target_state.1).await;
}
PcduSwitch::Mgt => {
self.mgt_switch.send(switch_and_target_state.1).await;
}
}
}
}
impl Model for PcduModel {}
#[cfg(test)]
pub(crate) mod tests {
use super::*;
use std::time::Duration;
use satrs_minisim::{
eps::PcduRequest, SerializableSimMsgPayload, SimMessageProvider, SimRequest, SimTarget,
};
use crate::test_helpers::SimTestbench;
fn switch_device(
sim_testbench: &mut SimTestbench,
switch: PcduSwitch,
target: SwitchStateBinary,
) {
let request = SimRequest::new(PcduRequest::SwitchDevice {
switch,
state: target,
});
sim_testbench
.send_request(request)
.expect("sending MGM switch request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
}
#[allow(dead_code)]
pub(crate) fn switch_device_off(sim_testbench: &mut SimTestbench, switch: PcduSwitch) {
switch_device(sim_testbench, switch, SwitchStateBinary::Off);
}
pub(crate) fn switch_device_on(sim_testbench: &mut SimTestbench, switch: PcduSwitch) {
switch_device(sim_testbench, switch, SwitchStateBinary::On);
}
pub(crate) fn get_all_off_switch_map() -> SwitchMap {
let mut switcher_map = SwitchMap::new();
switcher_map.insert(super::PcduSwitch::Mgm, super::SwitchStateBinary::Off);
switcher_map.insert(super::PcduSwitch::Mgt, super::SwitchStateBinary::Off);
switcher_map
}
fn check_switch_state(sim_testbench: &mut SimTestbench, expected_switch_map: &SwitchMap) {
let request = SimRequest::new(PcduRequest::RequestSwitchInfo);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step();
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
assert_eq!(sim_reply.target(), SimTarget::Pcdu);
let pcdu_reply = PcduReply::from_sim_message(&sim_reply)
.expect("failed to deserialize PCDU switch info");
match pcdu_reply {
PcduReply::SwitchInfo(switch_map) => {
assert_eq!(switch_map, *expected_switch_map);
}
}
}
fn test_pcdu_switching_single_switch(switch: PcduSwitch, target: SwitchStateBinary) {
let mut sim_testbench = SimTestbench::new();
switch_device(&mut sim_testbench, switch, target);
let mut switcher_map = get_all_off_switch_map();
*switcher_map.get_mut(&switch).unwrap() = target;
check_switch_state(&mut sim_testbench, &switcher_map);
}
#[test]
fn test_pcdu_switcher_request() {
let mut sim_testbench = SimTestbench::new();
let request = SimRequest::new(PcduRequest::RequestSwitchInfo);
sim_testbench
.send_request(request)
.expect("sending MGM request failed");
sim_testbench.handle_sim_requests();
sim_testbench.step_by(Duration::from_millis(1));
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_none());
// Reply takes 20ms
sim_testbench.step_by(Duration::from_millis(25));
let sim_reply = sim_testbench.try_receive_next_reply();
assert!(sim_reply.is_some());
let sim_reply = sim_reply.unwrap();
assert_eq!(sim_reply.target(), SimTarget::Pcdu);
let pcdu_reply = PcduReply::from_sim_message(&sim_reply)
.expect("failed to deserialize PCDU switch info");
match pcdu_reply {
PcduReply::SwitchInfo(switch_map) => {
assert_eq!(switch_map, get_all_off_switch_map());
}
}
}
#[test]
fn test_pcdu_switching_mgm_on() {
test_pcdu_switching_single_switch(PcduSwitch::Mgm, SwitchStateBinary::On);
}
#[test]
fn test_pcdu_switching_mgt_on() {
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::On);
}
#[test]
fn test_pcdu_switching_mgt_off() {
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::On);
test_pcdu_switching_single_switch(PcduSwitch::Mgt, SwitchStateBinary::Off);
}
}

383
satrs-minisim/src/lib.rs Normal file
View File

@ -0,0 +1,383 @@
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimTarget {
SimCtrl,
Mgm,
Mgt,
Pcdu,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SimMessage {
pub target: SimTarget,
pub payload: String,
}
/// A generic simulation request type. Right now, the payload data is expected to be
/// JSON, which might be changed in the future.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SimRequest {
inner: SimMessage,
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimMessageType {
Request,
Reply,
}
/// Generic trait implemented by simulation request or reply payloads. It ties the request or
/// reply to a specific target and provides an API which does boilerplate tasks like checking the
/// validity of the target.
pub trait SerializableSimMsgPayload<P: SimMessageProvider>:
Serialize + DeserializeOwned + Sized
{
const TARGET: SimTarget;
fn from_sim_message(sim_message: &P) -> Result<Self, SimMessageError<P>> {
if sim_message.target() == Self::TARGET {
return Ok(serde_json::from_str(sim_message.payload())?);
}
Err(SimMessageError::TargetRequestMissmatch(sim_message.clone()))
}
}
pub trait SimMessageProvider: Serialize + DeserializeOwned + Clone + Sized {
fn msg_type(&self) -> SimMessageType;
fn target(&self) -> SimTarget;
fn payload(&self) -> &String;
fn from_raw_data(data: &[u8]) -> serde_json::Result<Self> {
serde_json::from_slice(data)
}
}
impl SimRequest {
pub fn new<T: SerializableSimMsgPayload<SimRequest>>(serializable_request: T) -> Self {
Self {
inner: SimMessage {
target: T::TARGET,
payload: serde_json::to_string(&serializable_request).unwrap(),
},
}
}
}
impl SimMessageProvider for SimRequest {
fn target(&self) -> SimTarget {
self.inner.target
}
fn payload(&self) -> &String {
&self.inner.payload
}
fn msg_type(&self) -> SimMessageType {
SimMessageType::Request
}
}
/// A generic simulation reply type. Right now, the payload data is expected to be
/// JSON, which might be changed inthe future.
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct SimReply {
inner: SimMessage,
}
impl SimReply {
pub fn new<T: SerializableSimMsgPayload<SimReply>>(serializable_reply: T) -> Self {
Self {
inner: SimMessage {
target: T::TARGET,
payload: serde_json::to_string(&serializable_reply).unwrap(),
},
}
}
}
impl SimMessageProvider for SimReply {
fn target(&self) -> SimTarget {
self.inner.target
}
fn payload(&self) -> &String {
&self.inner.payload
}
fn msg_type(&self) -> SimMessageType {
SimMessageType::Reply
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimCtrlRequest {
Ping,
}
impl SerializableSimMsgPayload<SimRequest> for SimCtrlRequest {
const TARGET: SimTarget = SimTarget::SimCtrl;
}
pub type SimReplyError = SimMessageError<SimReply>;
pub type SimRequestError = SimMessageError<SimRequest>;
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimMessageError<P> {
SerdeJson(String),
TargetRequestMissmatch(P),
}
impl<P> From<serde_json::Error> for SimMessageError<P> {
fn from(error: serde_json::Error) -> SimMessageError<P> {
SimMessageError::SerdeJson(error.to_string())
}
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum SimCtrlReply {
Pong,
InvalidRequest(SimRequestError),
}
impl SerializableSimMsgPayload<SimReply> for SimCtrlReply {
const TARGET: SimTarget = SimTarget::SimCtrl;
}
impl From<SimRequestError> for SimCtrlReply {
fn from(error: SimRequestError) -> Self {
SimCtrlReply::InvalidRequest(error)
}
}
pub mod eps {
use super::*;
use std::collections::HashMap;
use satrs::power::SwitchStateBinary;
pub type SwitchMap = HashMap<PcduSwitch, SwitchStateBinary>;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Hash)]
pub enum PcduSwitch {
Mgm = 0,
Mgt = 1,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum PcduRequest {
SwitchDevice {
switch: PcduSwitch,
state: SwitchStateBinary,
},
RequestSwitchInfo,
}
impl SerializableSimMsgPayload<SimRequest> for PcduRequest {
const TARGET: SimTarget = SimTarget::Pcdu;
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum PcduReply {
SwitchInfo(SwitchMap),
}
impl SerializableSimMsgPayload<SimReply> for PcduReply {
const TARGET: SimTarget = SimTarget::Pcdu;
}
}
pub mod acs {
use std::time::Duration;
use satrs::power::SwitchStateBinary;
use super::*;
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum MgmRequest {
RequestSensorData,
}
impl SerializableSimMsgPayload<SimRequest> for MgmRequest {
const TARGET: SimTarget = SimTarget::Mgm;
}
// Normally, small magnetometers generate their output as a signed 16 bit raw format or something
// similar which needs to be converted to a signed float value with physical units. We will
// simplify this now and generate the signed float values directly.
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct MgmSensorValues {
pub x: f32,
pub y: f32,
pub z: f32,
}
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct MgmReply {
pub switch_state: SwitchStateBinary,
pub sensor_values: MgmSensorValues,
}
impl SerializableSimMsgPayload<SimReply> for MgmReply {
const TARGET: SimTarget = SimTarget::Mgm;
}
pub const MGT_GEN_MAGNETIC_FIELD: MgmSensorValues = MgmSensorValues {
x: 0.03,
y: -0.03,
z: 0.03,
};
// Simple model using i16 values.
#[derive(Default, Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct MgtDipole {
pub x: i16,
pub y: i16,
pub z: i16,
}
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub enum MgtRequestType {
ApplyTorque,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum MgtRequest {
ApplyTorque {
duration: Duration,
dipole: MgtDipole,
},
RequestHk,
}
impl SerializableSimMsgPayload<SimRequest> for MgtRequest {
const TARGET: SimTarget = SimTarget::Mgt;
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub struct MgtHkSet {
pub dipole: MgtDipole,
pub torquing: bool,
}
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
pub enum MgtReply {
Ack(MgtRequestType),
Nak(MgtRequestType),
Hk(MgtHkSet),
}
impl SerializableSimMsgPayload<SimReply> for MgtReply {
const TARGET: SimTarget = SimTarget::Mgm;
}
}
pub mod udp {
use std::{
net::{SocketAddr, UdpSocket},
time::Duration,
};
use thiserror::Error;
use crate::{SimReply, SimRequest};
#[derive(Error, Debug)]
pub enum ReceptionError {
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
#[error("Serde JSON error: {0}")]
SerdeJson(#[from] serde_json::Error),
}
pub struct SimUdpClient {
socket: UdpSocket,
pub reply_buf: [u8; 4096],
}
impl SimUdpClient {
pub fn new(
server_addr: &SocketAddr,
non_blocking: bool,
read_timeot_ms: Option<u64>,
) -> std::io::Result<Self> {
let socket = UdpSocket::bind("127.0.0.1:0")?;
socket.set_nonblocking(non_blocking)?;
socket
.connect(server_addr)
.expect("could not connect to server addr");
if let Some(read_timeout) = read_timeot_ms {
// Set a read timeout so the test does not hang on failures.
socket.set_read_timeout(Some(Duration::from_millis(read_timeout)))?;
}
Ok(Self {
socket,
reply_buf: [0; 4096],
})
}
pub fn set_nonblocking(&self, non_blocking: bool) -> std::io::Result<()> {
self.socket.set_nonblocking(non_blocking)
}
pub fn set_read_timeout(&self, read_timeout_ms: u64) -> std::io::Result<()> {
self.socket
.set_read_timeout(Some(Duration::from_millis(read_timeout_ms)))
}
pub fn send_request(&self, sim_request: &SimRequest) -> std::io::Result<usize> {
self.socket.send(
&serde_json::to_vec(sim_request).expect("conversion of request to vector failed"),
)
}
pub fn recv_raw(&mut self) -> std::io::Result<usize> {
self.socket.recv(&mut self.reply_buf)
}
pub fn recv_sim_reply(&mut self) -> Result<SimReply, ReceptionError> {
let read_len = self.recv_raw()?;
Ok(serde_json::from_slice(&self.reply_buf[0..read_len])?)
}
}
}
#[cfg(test)]
pub mod tests {
use super::*;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum DummyRequest {
Ping,
}
impl SerializableSimMsgPayload<SimRequest> for DummyRequest {
const TARGET: SimTarget = SimTarget::SimCtrl;
}
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
pub enum DummyReply {
Pong,
}
impl SerializableSimMsgPayload<SimReply> for DummyReply {
const TARGET: SimTarget = SimTarget::SimCtrl;
}
#[test]
fn test_basic_request() {
let sim_request = SimRequest::new(DummyRequest::Ping);
assert_eq!(sim_request.target(), SimTarget::SimCtrl);
assert_eq!(sim_request.msg_type(), SimMessageType::Request);
let dummy_request =
DummyRequest::from_sim_message(&sim_request).expect("deserialization failed");
assert_eq!(dummy_request, DummyRequest::Ping);
}
#[test]
fn test_basic_reply() {
let sim_reply = SimReply::new(DummyReply::Pong);
assert_eq!(sim_reply.target(), SimTarget::SimCtrl);
assert_eq!(sim_reply.msg_type(), SimMessageType::Reply);
let dummy_request =
DummyReply::from_sim_message(&sim_reply).expect("deserialization failed");
assert_eq!(dummy_request, DummyReply::Pong);
}
}

103
satrs-minisim/src/main.rs Normal file
View File

@ -0,0 +1,103 @@
use acs::{MagnetometerModel, MagnetorquerModel};
use asynchronix::simulation::{Mailbox, SimInit};
use asynchronix::time::{MonotonicTime, SystemClock};
use controller::SimController;
use eps::PcduModel;
use satrs_minisim::{SimReply, SimRequest};
use std::sync::mpsc;
use std::thread;
use std::time::{Duration, SystemTime};
use udp::SimUdpServer;
mod acs;
mod controller;
mod eps;
#[cfg(test)]
mod test_helpers;
mod time;
mod udp;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum ThreadingModel {
Default = 0,
Single = 1,
}
fn create_sim_controller(
threading_model: ThreadingModel,
start_time: MonotonicTime,
reply_sender: mpsc::Sender<SimReply>,
request_receiver: mpsc::Receiver<SimRequest>,
) -> SimController {
// Instantiate models and their mailboxes.
let mgm_model = MagnetometerModel::new(Duration::from_millis(50), reply_sender.clone());
let mgm_mailbox = Mailbox::new();
let mgm_addr = mgm_mailbox.address();
let pcdu_mailbox = Mailbox::new();
let pcdu_addr = pcdu_mailbox.address();
let mgt_mailbox = Mailbox::new();
let mgt_addr = mgt_mailbox.address();
let mut pcdu_model = PcduModel::new(reply_sender.clone());
pcdu_model
.mgm_switch
.connect(MagnetometerModel::switch_device, &mgm_addr);
let mut mgt_model = MagnetorquerModel::new(reply_sender.clone());
// Input connections.
pcdu_model
.mgt_switch
.connect(MagnetorquerModel::switch_device, &mgt_addr);
// Output connections.
mgt_model
.gen_magnetic_field
.connect(MagnetometerModel::apply_external_magnetic_field, &mgm_addr);
// Instantiate the simulator
let sys_clock = SystemClock::from_system_time(start_time, SystemTime::now());
let sim_init = if threading_model == ThreadingModel::Single {
SimInit::with_num_threads(1)
} else {
SimInit::new()
};
let simulation = sim_init
.add_model(mgm_model, mgm_mailbox)
.add_model(pcdu_model, pcdu_mailbox)
.add_model(mgt_model, mgt_mailbox)
.init(start_time);
SimController::new(
sys_clock,
request_receiver,
reply_sender,
simulation,
mgm_addr,
pcdu_addr,
mgt_addr,
)
}
fn main() {
let (request_sender, request_receiver) = mpsc::channel();
let (reply_sender, reply_receiver) = mpsc::channel();
let t0 = MonotonicTime::EPOCH;
let mut sim_ctrl =
create_sim_controller(ThreadingModel::Default, t0, reply_sender, request_receiver);
// This thread schedules the simulator.
let sim_thread = thread::spawn(move || {
sim_ctrl.run(t0, 1);
});
let mut udp_server = SimUdpServer::new(0, request_sender, reply_receiver, 200, None)
.expect("could not create UDP request server");
// This thread manages the simulator UDP server.
let udp_tc_thread = thread::spawn(move || {
udp_server.run();
});
sim_thread.join().expect("joining simulation thread failed");
udp_tc_thread
.join()
.expect("joining UDP server thread failed");
}

View File

@ -0,0 +1,56 @@
use delegate::delegate;
use std::{sync::mpsc, time::Duration};
use asynchronix::time::MonotonicTime;
use satrs_minisim::{SimReply, SimRequest};
use crate::{controller::SimController, create_sim_controller, ThreadingModel};
pub struct SimTestbench {
pub sim_controller: SimController,
pub reply_receiver: mpsc::Receiver<SimReply>,
pub request_sender: mpsc::Sender<SimRequest>,
}
impl SimTestbench {
pub fn new() -> Self {
let (request_sender, request_receiver) = mpsc::channel();
let (reply_sender, reply_receiver) = mpsc::channel();
let t0 = MonotonicTime::EPOCH;
let sim_ctrl =
create_sim_controller(ThreadingModel::Single, t0, reply_sender, request_receiver);
Self {
sim_controller: sim_ctrl,
reply_receiver,
request_sender,
}
}
delegate! {
to self.sim_controller {
pub fn handle_sim_requests(&mut self);
}
to self.sim_controller.simulation {
pub fn step(&mut self);
pub fn step_by(&mut self, duration: Duration);
}
}
pub fn send_request(&self, request: SimRequest) -> Result<(), mpsc::SendError<SimRequest>> {
self.request_sender.send(request)
}
pub fn try_receive_next_reply(&self) -> Option<SimReply> {
match self.reply_receiver.try_recv() {
Ok(reply) => Some(reply),
Err(e) => {
if e == mpsc::TryRecvError::Empty {
None
} else {
panic!("reply_receiver disconnected");
}
}
}
}
}

View File

@ -0,0 +1,5 @@
use asynchronix::time::MonotonicTime;
pub fn current_millis(time: MonotonicTime) -> u64 {
(time.as_secs() as u64 * 1000) + (time.subsec_nanos() as u64 / 1_000_000)
}

390
satrs-minisim/src/udp.rs Normal file
View File

@ -0,0 +1,390 @@
use std::{
collections::VecDeque,
io::ErrorKind,
net::{SocketAddr, UdpSocket},
sync::{atomic::AtomicBool, mpsc, Arc},
time::Duration,
};
use satrs_minisim::{SimMessageProvider, SimReply, SimRequest};
// A UDP server which handles all TC received by a client application.
pub struct SimUdpServer {
socket: UdpSocket,
request_sender: mpsc::Sender<SimRequest>,
// shared_last_sender: SharedSocketAddr,
reply_receiver: mpsc::Receiver<SimReply>,
reply_queue: VecDeque<SimReply>,
max_num_replies: usize,
// Stop signal to stop the server. Required for unittests and useful to allow clean shutdown
// of the application.
stop_signal: Option<Arc<AtomicBool>>,
idle_sleep_period_ms: u64,
req_buf: [u8; 4096],
sender_addr: Option<SocketAddr>,
}
impl SimUdpServer {
pub fn new(
local_port: u16,
request_sender: mpsc::Sender<SimRequest>,
reply_receiver: mpsc::Receiver<SimReply>,
max_num_replies: usize,
stop_signal: Option<Arc<AtomicBool>>,
) -> std::io::Result<Self> {
let socket = UdpSocket::bind(SocketAddr::from(([0, 0, 0, 0], local_port)))?;
socket.set_nonblocking(true)?;
Ok(Self {
socket,
request_sender,
reply_receiver,
reply_queue: VecDeque::new(),
max_num_replies,
stop_signal,
idle_sleep_period_ms: 3,
req_buf: [0; 4096],
sender_addr: None,
})
}
#[allow(dead_code)]
pub fn server_addr(&self) -> std::io::Result<SocketAddr> {
self.socket.local_addr()
}
pub fn run(&mut self) {
loop {
if let Some(stop_signal) = &self.stop_signal {
if stop_signal.load(std::sync::atomic::Ordering::Relaxed) {
break;
}
}
let processed_requests = self.process_requests();
let processed_replies = self.process_replies();
let sent_replies = self.send_replies();
// Sleep for a bit if there is nothing to do to prevent burning CPU cycles. Delay
// should be kept short to ensure responsiveness of the system.
if !processed_requests && !processed_replies && !sent_replies {
std::thread::sleep(Duration::from_millis(self.idle_sleep_period_ms));
}
}
}
fn process_requests(&mut self) -> bool {
let mut processed_requests = false;
loop {
// Blocks for a certain amount of time until data is received to allow doing periodic
// work like checking the stop signal.
let (bytes_read, src) = match self.socket.recv_from(&mut self.req_buf) {
Ok((bytes_read, src)) => (bytes_read, src),
Err(e) if e.kind() == ErrorKind::WouldBlock => {
// Continue to perform regular checks like the stop signal.
break;
}
Err(e) => {
// Handle unexpected errors (e.g., socket closed) here.
log::error!("unexpected request server error: {e}");
break;
}
};
self.sender_addr = Some(src);
let sim_req = SimRequest::from_raw_data(&self.req_buf[..bytes_read]);
if sim_req.is_err() {
log::warn!(
"received UDP request with invalid format: {}",
sim_req.unwrap_err()
);
return processed_requests;
}
self.request_sender.send(sim_req.unwrap()).unwrap();
processed_requests = true;
}
processed_requests
}
fn process_replies(&mut self) -> bool {
let mut processed_replies = false;
loop {
match self.reply_receiver.try_recv() {
Ok(reply) => {
if self.reply_queue.len() >= self.max_num_replies {
self.reply_queue.pop_front();
}
self.reply_queue.push_back(reply);
processed_replies = true;
}
Err(e) => match e {
mpsc::TryRecvError::Empty => return processed_replies,
mpsc::TryRecvError::Disconnected => {
log::error!("all UDP reply senders disconnected")
}
},
}
}
}
fn send_replies(&mut self) -> bool {
if self.sender_addr.is_none() {
return false;
}
let mut sent_replies = false;
while !self.reply_queue.is_empty() {
let next_reply_to_send = self.reply_queue.pop_front().unwrap();
self.socket
.send_to(
serde_json::to_string(&next_reply_to_send)
.unwrap()
.as_bytes(),
self.sender_addr.unwrap(),
)
.expect("sending reply failed");
sent_replies = true;
}
sent_replies
}
}
#[cfg(test)]
mod tests {
use std::{
io::ErrorKind,
sync::{
atomic::{AtomicBool, Ordering},
mpsc, Arc,
},
time::Duration,
};
use satrs_minisim::{
eps::{PcduReply, PcduRequest},
udp::{ReceptionError, SimUdpClient},
SimCtrlReply, SimCtrlRequest, SimReply, SimRequest,
};
use crate::eps::tests::get_all_off_switch_map;
use delegate::delegate;
use super::SimUdpServer;
// Wait time to ensure even possibly laggy systems like CI servers can run the tests.
const SERVER_WAIT_TIME_MS: u64 = 50;
struct UdpTestbench {
client: SimUdpClient,
stop_signal: Arc<AtomicBool>,
request_receiver: mpsc::Receiver<SimRequest>,
reply_sender: mpsc::Sender<SimReply>,
}
impl UdpTestbench {
pub fn new(
client_non_blocking: bool,
client_read_timeout_ms: Option<u64>,
max_num_replies: usize,
) -> std::io::Result<(Self, SimUdpServer)> {
let (request_sender, request_receiver) = mpsc::channel();
let (reply_sender, reply_receiver) = mpsc::channel();
let stop_signal = Arc::new(AtomicBool::new(false));
let server = SimUdpServer::new(
0,
request_sender,
reply_receiver,
max_num_replies,
Some(stop_signal.clone()),
)?;
let server_addr = server.server_addr()?;
Ok((
Self {
client: SimUdpClient::new(
&server_addr,
client_non_blocking,
client_read_timeout_ms,
)?,
stop_signal,
request_receiver,
reply_sender,
},
server,
))
}
pub fn try_recv_request(&self) -> Result<SimRequest, mpsc::TryRecvError> {
self.request_receiver.try_recv()
}
pub fn stop(&self) {
self.stop_signal.store(true, Ordering::Relaxed);
}
pub fn send_reply(&self, sim_reply: &SimReply) {
self.reply_sender
.send(sim_reply.clone())
.expect("sending sim reply failed");
}
delegate! {
to self.client {
pub fn send_request(&self, sim_request: &SimRequest) -> std::io::Result<usize>;
pub fn recv_sim_reply(&mut self) -> Result<SimReply, ReceptionError>;
}
}
pub fn check_no_sim_reply_available(&mut self) {
if let Err(ReceptionError::Io(ref io_error)) = self.recv_sim_reply() {
if io_error.kind() == ErrorKind::WouldBlock {
// Continue to perform regular checks like the stop signal.
return;
} else {
// Handle unexpected errors (e.g., socket closed) here.
panic!("unexpected request server error: {io_error}");
}
}
panic!("unexpected reply available");
}
pub fn check_next_sim_reply(&mut self, expected_reply: &SimReply) {
match self.recv_sim_reply() {
Ok(received_sim_reply) => assert_eq!(expected_reply, &received_sim_reply),
Err(e) => match e {
ReceptionError::Io(ref io_error) => {
if io_error.kind() == ErrorKind::WouldBlock {
// Continue to perform regular checks like the stop signal.
panic!("no simulation reply received");
} else {
// Handle unexpected errors (e.g., socket closed) here.
panic!("unexpected request server error: {e}");
}
}
ReceptionError::SerdeJson(json_error) => {
panic!("unexpected JSON error: {json_error}");
}
},
}
}
}
#[test]
fn test_basic_udp_request_reception() {
let (udp_testbench, mut udp_server) =
UdpTestbench::new(true, Some(SERVER_WAIT_TIME_MS), 10)
.expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
let sim_request = SimRequest::new(PcduRequest::RequestSwitchInfo);
udp_testbench
.send_request(&sim_request)
.expect("sending request failed");
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
// Check that the sim request has arrives and was forwarded.
let received_sim_request = udp_testbench
.try_recv_request()
.expect("did not receive request");
assert_eq!(sim_request, received_sim_request);
// Stop the server.
udp_testbench.stop();
server_thread.join().unwrap();
}
#[test]
fn test_udp_reply_server() {
let (mut udp_testbench, mut udp_server) =
UdpTestbench::new(false, Some(SERVER_WAIT_TIME_MS), 10)
.expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
.expect("sending request failed");
let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map()));
udp_testbench.send_reply(&sim_reply);
udp_testbench.check_next_sim_reply(&sim_reply);
// Stop the server.
udp_testbench.stop();
server_thread.join().unwrap();
}
#[test]
fn test_udp_req_server_and_reply_sender() {
let (mut udp_testbench, mut udp_server) =
UdpTestbench::new(false, Some(SERVER_WAIT_TIME_MS), 10)
.expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
// Send a ping so that the server knows the address of the client.
// Do not check that the request arrives on the receiver side, is done by other test.
udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
.expect("sending request failed");
// Send a reply to the server, ensure it gets forwarded to the client.
let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map()));
udp_testbench.send_reply(&sim_reply);
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
// Now we check that the reply server can send back replies to the client.
udp_testbench.check_next_sim_reply(&sim_reply);
udp_testbench.stop();
server_thread.join().unwrap();
}
#[test]
fn test_udp_replies_client_unconnected() {
let (mut udp_testbench, mut udp_server) =
UdpTestbench::new(true, None, 10).expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
// Send a reply to the server. The client is not connected, so it won't get forwarded.
let sim_reply = SimReply::new(PcduReply::SwitchInfo(get_all_off_switch_map()));
udp_testbench.send_reply(&sim_reply);
std::thread::sleep(Duration::from_millis(10));
udp_testbench.check_no_sim_reply_available();
// Connect by sending a ping.
udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
.expect("sending request failed");
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
udp_testbench.check_next_sim_reply(&sim_reply);
// Now we check that the reply server can sent back replies to the client.
udp_testbench.stop();
server_thread.join().unwrap();
}
#[test]
fn test_udp_reply_server_old_replies_overwritten() {
let (mut udp_testbench, mut udp_server) =
UdpTestbench::new(true, None, 3).expect("could not create testbench");
let server_thread = std::thread::spawn(move || udp_server.run());
// The server only caches up to 3 replies.
let sim_reply = SimReply::new(SimCtrlReply::Pong);
for _ in 0..4 {
udp_testbench.send_reply(&sim_reply);
}
std::thread::sleep(Duration::from_millis(20));
udp_testbench.check_no_sim_reply_available();
// Connect by sending a ping.
udp_testbench
.send_request(&SimRequest::new(SimCtrlRequest::Ping))
.expect("sending request failed");
std::thread::sleep(Duration::from_millis(SERVER_WAIT_TIME_MS));
for _ in 0..3 {
udp_testbench.check_next_sim_reply(&sim_reply);
}
udp_testbench.check_no_sim_reply_available();
udp_testbench.stop();
server_thread.join().unwrap();
}
}

View File

@ -1,7 +1,7 @@
[package] [package]
name = "satrs-shared" name = "satrs-shared"
description = "Components shared by multiple sat-rs crates" description = "Components shared by multiple sat-rs crates"
version = "0.1.2" version = "0.1.3"
edition = "2021" edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"] authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/" homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
@ -18,7 +18,9 @@ default-features = false
optional = true optional = true
[dependencies.spacepackets] [dependencies.spacepackets]
version = "0.10" git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
version = "0.11.0-rc.2"
branch = "main"
default-features = false default-features = false
[features] [features]

View File

@ -8,7 +8,71 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased] # [unreleased]
# [v0.2.0] - `spacepackets` v0.11.0
## Added
- Added `params::WritableToBeBytes::to_vec`.
- New `ComponentId` (`u64` typedef for now) which replaces former `TargetId` as a generic
way to identify components.
- Various abstraction and objects for targeted requests. This includes mode request/reply
types for actions, HK and modes.
- `VerificationReportingProvider::owner_id` method.
- Introduced generic `EventMessage` which is generic over the event type and the additional
parameter type. This message also contains the sender ID which can be useful for debugging
or application layer / FDIR logic.
## Changed
- `encoding::ccsds::PacketIdValidator` renamed to `ValidatorU16Id`, which lives in the crate root.
It can be used for both CCSDS packet ID and CCSDS APID validation.
- `EventManager::try_event_handling` not expects a mutable error handling closure instead of
returning the occured errors.
- Renamed `EventManagerBase` to `EventReportCreator`
- Renamed `VerificationReporterCore` to `VerificationReportCreator`.
- Removed `VerificationReporterCore`. The high-level API exposed by `VerificationReporter` and
the low level API exposed by `VerificationReportCreator` should be sufficient for all use-cases.
- Refactored `EventManager` to heavily use generics instead of trait objects.
- `SendEventProvider` -> `EventSendProvider`. `id` trait method renamed to `channel_id`.
- `ListenerTable` -> `ListenerMapProvider`
- `SenderTable` -> `SenderMapProvider`
- There is an `EventManagerWithMpsc` and a `EventManagerWithBoundedMpsc` helper type now.
- Refactored ECSS TM sender abstractions to be generic over different message queue backends.
- Refactored Verification Reporter abstractions and implementation to be generic over the sender
instead of using trait objects.
- Renamed `WritableToBeBytes::raw_len` to `WritableToBeBytes::written_len` for consistency.
- `PusServiceProvider` renamed to `PusServiceDistributor` to make the purpose of the object
more clear
- `PusServiceProvider::handle_pus_tc_packet` renamed to `PusServiceDistributor::distribute_packet`.
- `PusServiceDistibutor` and `CcsdsDistributor` now use generics instead of trait objects.
This makes accessing the concrete trait implementations more easy as well.
- Major overhaul of the PUS handling module.
- Replace `TargetId` by `ComponentId`.
- Replace most usages of `ChannelId` by `ComponentId`. A dedicated channel ID has limited usage
due to the nature of typed channels in Rust.
- `CheckTimer` renamed to `CountdownProvider`.
- Renamed `TargetId` to `ComponentId`.
- Replaced most `ChannelId` occurences with `ComponentId`. For typed channels, there is generally
no need for dedicated channel IDs.
- Changed `params::WritableToBeBytes::raw_len` to `written_len` for consistency.
- `EventReporter` caches component ID.
- Renamed `PusService11SchedHandler` to `PusSchedServiceHandler`.
- Fixed general naming of PUS handlers from `handle_one_tc` to `poll_and_handle_next_tc`.
- Reworked verification module: The sender (`impl EcssTmSenderCore`)
now needs to be passed explicitely to the `VerificationReportingProvider` abstraction. This
allows easier sharing of the TM sender component.
## Fixed
- Update deprecated API for `PusScheduler::insert_wrapped_tc_cds_short`
and `PusScheduler::insert_wrapped_tc_cds_long`.
- `EventReporter` uses interior mutability pattern to allow non-mutable API.
## Removed
- Remove `objects` module.
# [v0.2.0-rc.0] 2024-02-21
## Added ## Added

View File

@ -17,7 +17,10 @@ delegate = ">0.7, <=0.10"
paste = "1" paste = "1"
smallvec = "1" smallvec = "1"
crc = "3" crc = "3"
satrs-shared = "0.1.2"
[dependencies.satrs-shared]
version = "0.1.3"
path = "../satrs-shared"
[dependencies.num_enum] [dependencies.num_enum]
version = ">0.5, <=0.7" version = ">0.5, <=0.7"
@ -68,7 +71,9 @@ features = ["all"]
optional = true optional = true
[dependencies.spacepackets] [dependencies.spacepackets]
version = "0.10" git = "https://egit.irs.uni-stuttgart.de/rust/spacepackets.git"
version = "0.11.0-rc.2"
branch = "main"
default-features = false default-features = false
[dependencies.cobs] [dependencies.cobs]
@ -112,6 +117,7 @@ alloc = [
serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"] serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"]
crossbeam = ["crossbeam-channel"] crossbeam = ["crossbeam-channel"]
heapless = ["dep:heapless"] heapless = ["dep:heapless"]
test_util = []
doc-images = [] doc-images = []
[package.metadata.docs.rs] [package.metadata.docs.rs]

View File

@ -4,11 +4,11 @@ Checklist for new releases
# Pre-Release # Pre-Release
1. Make sure any new modules are documented sufficiently enough and check docs with 1. Make sure any new modules are documented sufficiently enough and check docs with
`cargo +nightly doc --all-features --config 'rustdocflags=["--cfg", "doc_cfg"]' --open`. `cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]' --open`.
2. Bump version specifier in `Cargo.toml`. 2. Bump version specifier in `Cargo.toml`.
3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new 3. Update `CHANGELOG.md`: Convert `unreleased` section into version section with date and add new
`unreleased` section. `unreleased` section.
4. Run `cargo test --all-features`. 4. Run `cargo test --all-features` or `cargo nextest r --all-features` and `cargo test --doc`.
5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`. 5. Run `cargo fmt` and `cargo clippy`. Check `cargo msrv` against MSRV in `Cargo.toml`.
6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal 6. Wait for CI/CD results for EGit and Github. These also check cross-compilation for bare-metal
targets. targets.

View File

@ -1,42 +1,68 @@
use crate::{pool::StoreAddr, TargetId}; use crate::{params::Params, pool::StoreAddr};
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
pub type ActionId = u32; pub type ActionId = u32;
#[non_exhaustive] #[derive(Debug, Eq, PartialEq, Clone)]
#[derive(Clone, Eq, PartialEq, Debug)] pub struct ActionRequest {
pub enum ActionRequest { pub action_id: ActionId,
UnsignedIdAndStoreData { pub variant: ActionRequestVariant,
action_id: ActionId,
data_addr: StoreAddr,
},
#[cfg(feature = "alloc")]
UnsignedIdAndVecData {
action_id: ActionId,
data: alloc::vec::Vec<u8>,
},
#[cfg(feature = "alloc")]
StringIdAndVecData {
action_id: alloc::string::String,
data: alloc::vec::Vec<u8>,
},
#[cfg(feature = "alloc")]
StringIdAndStoreData {
action_id: alloc::string::String,
data: StoreAddr,
},
} }
#[derive(Debug, Clone, PartialEq, Eq)] impl ActionRequest {
pub struct TargetedActionRequest { pub fn new(action_id: ActionId, variant: ActionRequestVariant) -> Self {
target: TargetId, Self { action_id, variant }
action_request: ActionRequest,
}
impl TargetedActionRequest {
pub fn new(target: TargetId, action_request: ActionRequest) -> Self {
Self {
target,
action_request,
}
} }
} }
#[non_exhaustive]
#[derive(Clone, Eq, PartialEq, Debug)]
pub enum ActionRequestVariant {
NoData,
StoreData(StoreAddr),
#[cfg(feature = "alloc")]
VecData(alloc::vec::Vec<u8>),
}
#[derive(Debug, PartialEq, Clone)]
pub struct ActionReply {
pub action_id: ActionId,
pub variant: ActionReplyVariant,
}
/// A reply to an action request.
#[non_exhaustive]
#[derive(Clone, Debug, PartialEq)]
pub enum ActionReplyVariant {
CompletionFailed(Params),
StepFailed { step: u32, reason: Params },
Completed,
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use super::*;
#[derive(Debug, Eq, PartialEq, Clone)]
pub struct ActionRequestStringId {
pub action_id: alloc::string::String,
pub variant: ActionRequestVariant,
}
impl ActionRequestStringId {
pub fn new(action_id: alloc::string::String, variant: ActionRequestVariant) -> Self {
Self { action_id, variant }
}
}
#[derive(Debug, PartialEq, Clone)]
pub struct ActionReplyStringId {
pub action_id: alloc::string::String,
pub variant: ActionReplyVariant,
}
}
#[cfg(test)]
mod tests {}

View File

@ -5,7 +5,7 @@ use std::path::{Path, PathBuf};
use super::{ use super::{
filestore::{FilestoreError, VirtualFilestore}, filestore::{FilestoreError, VirtualFilestore},
user::{CfdpUser, FileSegmentRecvdParams, MetadataReceivedParams}, user::{CfdpUser, FileSegmentRecvdParams, MetadataReceivedParams},
CheckTimer, CheckTimerCreator, EntityType, LocalEntityConfig, PacketInfo, PacketTarget, CheckTimerCreator, CountdownProvider, EntityType, LocalEntityConfig, PacketInfo, PacketTarget,
RemoteEntityConfig, RemoteEntityConfigProvider, State, TimerContext, TransactionId, RemoteEntityConfig, RemoteEntityConfigProvider, State, TimerContext, TransactionId,
TransactionStep, TransactionStep,
}; };
@ -54,7 +54,7 @@ struct TransferState {
completion_disposition: CompletionDisposition, completion_disposition: CompletionDisposition,
checksum: u32, checksum: u32,
current_check_count: u32, current_check_count: u32,
current_check_timer: Option<Box<dyn CheckTimer>>, current_check_timer: Option<Box<dyn CountdownProvider>>,
} }
impl Default for TransferState { impl Default for TransferState {
@ -799,9 +799,9 @@ mod tests {
}; };
use crate::cfdp::{ use crate::cfdp::{
filestore::NativeFilestore, user::OwnedMetadataRecvdParams, CheckTimer, CheckTimerCreator, filestore::NativeFilestore, user::OwnedMetadataRecvdParams, CheckTimerCreator,
DefaultFaultHandler, IndicationConfig, RemoteEntityConfig, StdRemoteEntityConfigProvider, CountdownProvider, DefaultFaultHandler, IndicationConfig, RemoteEntityConfig,
UserFaultHandler, CRC_32, StdRemoteEntityConfigProvider, UserFaultHandler, CRC_32,
}; };
use super::*; use super::*;
@ -1057,7 +1057,7 @@ mod tests {
expired: Arc<AtomicBool>, expired: Arc<AtomicBool>,
} }
impl CheckTimer for TestCheckTimer { impl CountdownProvider for TestCheckTimer {
fn has_expired(&self) -> bool { fn has_expired(&self) -> bool {
self.expired.load(core::sync::atomic::Ordering::Relaxed) self.expired.load(core::sync::atomic::Ordering::Relaxed)
} }
@ -1088,7 +1088,10 @@ mod tests {
} }
impl CheckTimerCreator for TestCheckTimerCreator { impl CheckTimerCreator for TestCheckTimerCreator {
fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box<dyn CheckTimer> { fn get_check_timer_provider(
&self,
timer_context: TimerContext,
) -> Box<dyn CountdownProvider> {
match timer_context { match timer_context {
TimerContext::CheckLimit { .. } => { TimerContext::CheckLimit { .. } => {
Box::new(TestCheckTimer::new(self.check_limit_expired_flag.clone())) Box::new(TestCheckTimer::new(self.check_limit_expired_flag.clone()))

View File

@ -7,7 +7,7 @@ use spacepackets::ByteConversionError;
use std::error::Error; use std::error::Error;
use std::path::Path; use std::path::Path;
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub use stdmod::*; pub use std_mod::*;
pub const CRC_32: Crc<u32> = Crc::<u32>::new(&CRC_32_CKSUM); pub const CRC_32: Crc<u32> = Crc::<u32>::new(&CRC_32_CKSUM);
@ -148,12 +148,11 @@ pub trait VirtualFilestore {
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub mod stdmod { pub mod std_mod {
use super::*; use super::*;
use std::{ use std::{
fs::{self, File, OpenOptions}, fs::{self, File, OpenOptions},
io::{BufReader, Read, Seek, SeekFrom, Write}, io::{BufReader, Read, Seek, SeekFrom, Write},
path::Path,
}; };
#[derive(Default)] #[derive(Default)]

View File

@ -17,6 +17,8 @@ use alloc::boxed::Box;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use crate::time::CountdownProvider;
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub mod dest; pub mod dest;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
@ -45,7 +47,15 @@ pub enum TimerContext {
}, },
} }
/// Generic abstraction for a check timer which is used by 3 mechanisms of the CFDP protocol. /// A generic trait which allows CFDP entities to create check timers which are required to
/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2
/// and 4.6.3.3.
///
/// This trait also allows the creation of different check timers depending on context and purpose
/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or
/// other factors.
///
/// The countdown timer is used by 3 mechanisms of the CFDP protocol.
/// ///
/// ## 1. Check limit handling /// ## 1. Check limit handling
/// ///
@ -74,22 +84,9 @@ pub enum TimerContext {
/// The timer will be used to perform the Positive Acknowledgement Procedures as specified in /// The timer will be used to perform the Positive Acknowledgement Procedures as specified in
/// 4.7. 1of the CFDP standard. The expiration period will be provided by the Positive ACK timer /// 4.7. 1of the CFDP standard. The expiration period will be provided by the Positive ACK timer
/// interval of the remote entity configuration. /// interval of the remote entity configuration.
pub trait CheckTimer: Debug {
fn has_expired(&self) -> bool;
fn reset(&mut self);
}
/// A generic trait which allows CFDP entities to create check timers which are required to
/// implement special procedures in unacknowledged transmission mode, as specified in 4.6.3.2
/// and 4.6.3.3. The [CheckTimer] documentation provides more information about the purpose of the
/// check timer in the context of CFDP.
///
/// This trait also allows the creation of different check timers depending on context and purpose
/// of the timer, the runtime environment (e.g. standard clock timer vs. timer using a RTC) or
/// other factors.
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub trait CheckTimerCreator { pub trait CheckTimerCreator {
fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box<dyn CheckTimer>; fn get_check_timer_provider(&self, timer_context: TimerContext) -> Box<dyn CountdownProvider>;
} }
/// Simple implementation of the [CheckTimerCreator] trait assuming a standard runtime. /// Simple implementation of the [CheckTimerCreator] trait assuming a standard runtime.
@ -112,7 +109,7 @@ impl StdCheckTimer {
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
impl CheckTimer for StdCheckTimer { impl CountdownProvider for StdCheckTimer {
fn has_expired(&self) -> bool { fn has_expired(&self) -> bool {
let elapsed_time = self.start_time.elapsed(); let elapsed_time = self.start_time.elapsed();
if elapsed_time.as_secs() > self.expiry_time_seconds { if elapsed_time.as_secs() > self.expiry_time_seconds {

View File

@ -1,65 +1,4 @@
#[cfg(feature = "alloc")] use crate::{tmtc::ReceivesTcCore, ValidatorU16Id};
use alloc::vec::Vec;
#[cfg(feature = "alloc")]
use hashbrown::HashSet;
use spacepackets::PacketId;
use crate::tmtc::ReceivesTcCore;
pub trait PacketIdLookup {
fn validate(&self, packet_id: u16) -> bool;
}
#[cfg(feature = "alloc")]
impl PacketIdLookup for Vec<u16> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&packet_id)
}
}
#[cfg(feature = "alloc")]
impl PacketIdLookup for HashSet<u16> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&packet_id)
}
}
impl PacketIdLookup for [u16] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&packet_id).is_ok()
}
}
impl PacketIdLookup for &[u16] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&packet_id).is_ok()
}
}
#[cfg(feature = "alloc")]
impl PacketIdLookup for Vec<PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
#[cfg(feature = "alloc")]
impl PacketIdLookup for HashSet<PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
impl PacketIdLookup for [PacketId] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&PacketId::from(packet_id)).is_ok()
}
}
impl PacketIdLookup for &[PacketId] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&PacketId::from(packet_id)).is_ok()
}
}
/// This function parses a given buffer for tightly packed CCSDS space packets. It uses the /// This function parses a given buffer for tightly packed CCSDS space packets. It uses the
/// [PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet and then /// [PacketId] field of the CCSDS packets to detect the start of a CCSDS space packet and then
@ -75,7 +14,7 @@ impl PacketIdLookup for &[PacketId] {
/// error will be returned. /// error will be returned.
pub fn parse_buffer_for_ccsds_space_packets<E>( pub fn parse_buffer_for_ccsds_space_packets<E>(
buf: &mut [u8], buf: &mut [u8],
packet_id_lookup: &(impl PacketIdLookup + ?Sized), packet_id_validator: &(impl ValidatorU16Id + ?Sized),
tc_receiver: &mut (impl ReceivesTcCore<Error = E> + ?Sized), tc_receiver: &mut (impl ReceivesTcCore<Error = E> + ?Sized),
next_write_idx: &mut usize, next_write_idx: &mut usize,
) -> Result<u32, E> { ) -> Result<u32, E> {
@ -88,7 +27,7 @@ pub fn parse_buffer_for_ccsds_space_packets<E>(
break; break;
} }
let packet_id = u16::from_be_bytes(buf[current_idx..current_idx + 2].try_into().unwrap()); let packet_id = u16::from_be_bytes(buf[current_idx..current_idx + 2].try_into().unwrap());
if packet_id_lookup.validate(packet_id) { if packet_id_validator.validate(packet_id) {
let length_field = let length_field =
u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap()); u16::from_be_bytes(buf[current_idx + 4..current_idx + 6].try_into().unwrap());
let packet_size = length_field + 7; let packet_size = length_field + 7;
@ -123,13 +62,13 @@ mod tests {
const TEST_APID_0: u16 = 0x02; const TEST_APID_0: u16 = 0x02;
const TEST_APID_1: u16 = 0x10; const TEST_APID_1: u16 = 0x10;
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0); const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1); const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
#[test] #[test]
fn test_basic() { fn test_basic() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len = ping_tc let packet_len = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
@ -155,9 +94,9 @@ mod tests {
#[test] #[test]
fn test_multi_packet() { fn test_multi_packet() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
@ -190,10 +129,10 @@ mod tests {
#[test] #[test]
fn test_multi_apid() { fn test_multi_apid() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(TEST_APID_1);
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
@ -226,10 +165,10 @@ mod tests {
#[test] #[test]
fn test_split_packet_multi() { fn test_split_packet_multi() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let ping_tc =
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); let action_tc =
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)
@ -257,8 +196,8 @@ mod tests {
#[test] #[test]
fn test_one_split_packet() { fn test_one_split_packet() {
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let ping_tc =
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let mut buffer: [u8; 32] = [0; 32]; let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer) .write_to_bytes(&mut buffer)

File diff suppressed because it is too large Load Diff

View File

@ -80,7 +80,7 @@ impl HasSeverity for SeverityHigh {
const SEVERITY: Severity = Severity::HIGH; const SEVERITY: Severity = Severity::HIGH;
} }
pub trait GenericEvent: EcssEnumeration { pub trait GenericEvent: EcssEnumeration + Copy + Clone {
type Raw; type Raw;
type GroupId; type GroupId;
type UniqueId; type UniqueId;

View File

@ -1,4 +1,3 @@
//! # Hardware Abstraction Layer module //! # Hardware Abstraction Layer module
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std; pub mod std;

View File

@ -107,7 +107,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
/// ///
/// ## Example /// ## Example
/// ///
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/tcp_servers.rs) /// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// test also serves as the example application for this module. /// test also serves as the example application for this module.
pub struct TcpTmtcInCobsServer< pub struct TcpTmtcInCobsServer<
TmError, TmError,

View File

@ -4,11 +4,10 @@ use std::{
net::{SocketAddr, TcpListener, TcpStream}, net::{SocketAddr, TcpListener, TcpStream},
}; };
use alloc::boxed::Box;
use crate::{ use crate::{
encoding::{ccsds::PacketIdLookup, parse_buffer_for_ccsds_space_packets}, encoding::parse_buffer_for_ccsds_space_packets,
tmtc::{ReceivesTc, TmPacketSource}, tmtc::{ReceivesTc, TmPacketSource},
ValidatorU16Id,
}; };
use super::tcp_server::{ use super::tcp_server::{
@ -16,17 +15,19 @@ use super::tcp_server::{
}; };
/// Concrete [TcpTcParser] implementation for the [TcpSpacepacketsServer]. /// Concrete [TcpTcParser] implementation for the [TcpSpacepacketsServer].
pub struct SpacepacketsTcParser { pub struct SpacepacketsTcParser<PacketIdChecker: ValidatorU16Id> {
packet_id_lookup: Box<dyn PacketIdLookup + Send>, packet_id_lookup: PacketIdChecker,
} }
impl SpacepacketsTcParser { impl<PacketIdChecker: ValidatorU16Id> SpacepacketsTcParser<PacketIdChecker> {
pub fn new(packet_id_lookup: Box<dyn PacketIdLookup + Send>) -> Self { pub fn new(packet_id_lookup: PacketIdChecker) -> Self {
Self { packet_id_lookup } Self { packet_id_lookup }
} }
} }
impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for SpacepacketsTcParser { impl<TmError, TcError: 'static, PacketIdChecker: ValidatorU16Id> TcpTcParser<TmError, TcError>
for SpacepacketsTcParser<PacketIdChecker>
{
fn handle_tc_parsing( fn handle_tc_parsing(
&mut self, &mut self,
tc_buffer: &mut [u8], tc_buffer: &mut [u8],
@ -38,7 +39,7 @@ impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for SpacepacketsTc
// Reader vec full, need to parse for packets. // Reader vec full, need to parse for packets.
conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets( conn_result.num_received_tcs += parse_buffer_for_ccsds_space_packets(
&mut tc_buffer[..current_write_idx], &mut tc_buffer[..current_write_idx],
self.packet_id_lookup.as_ref(), &self.packet_id_lookup,
tc_receiver.upcast_mut(), tc_receiver.upcast_mut(),
next_write_idx, next_write_idx,
) )
@ -88,13 +89,14 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
/// [spacepackets::PacketId]s as part of the server configuration for that purpose. /// [spacepackets::PacketId]s as part of the server configuration for that purpose.
/// ///
/// ## Example /// ## Example
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-core/tests/tcp_servers.rs) /// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// also serves as the example application for this module. /// also serves as the example application for this module.
pub struct TcpSpacepacketsServer< pub struct TcpSpacepacketsServer<
TmError, TmError,
TcError: 'static, TcError: 'static,
TmSource: TmPacketSource<Error = TmError>, TmSource: TmPacketSource<Error = TmError>,
TcReceiver: ReceivesTc<Error = TcError>, TcReceiver: ReceivesTc<Error = TcError>,
PacketIdChecker: ValidatorU16Id,
> { > {
generic_server: TcpTmtcGenericServer< generic_server: TcpTmtcGenericServer<
TmError, TmError,
@ -102,7 +104,7 @@ pub struct TcpSpacepacketsServer<
TmSource, TmSource,
TcReceiver, TcReceiver,
SpacepacketsTmSender, SpacepacketsTmSender,
SpacepacketsTcParser, SpacepacketsTcParser<PacketIdChecker>,
>, >,
} }
@ -111,7 +113,8 @@ impl<
TcError: 'static, TcError: 'static,
TmSource: TmPacketSource<Error = TmError>, TmSource: TmPacketSource<Error = TmError>,
TcReceiver: ReceivesTc<Error = TcError>, TcReceiver: ReceivesTc<Error = TcError>,
> TcpSpacepacketsServer<TmError, TcError, TmSource, TcReceiver> PacketIdChecker: ValidatorU16Id,
> TcpSpacepacketsServer<TmError, TcError, TmSource, TcReceiver, PacketIdChecker>
{ {
/// ///
/// ## Parameter /// ## Parameter
@ -127,12 +130,12 @@ impl<
cfg: ServerConfig, cfg: ServerConfig,
tm_source: TmSource, tm_source: TmSource,
tc_receiver: TcReceiver, tc_receiver: TcReceiver,
packet_id_lookup: Box<dyn PacketIdLookup + Send>, packet_id_checker: PacketIdChecker,
) -> Result<Self, std::io::Error> { ) -> Result<Self, std::io::Error> {
Ok(Self { Ok(Self {
generic_server: TcpTmtcGenericServer::new( generic_server: TcpTmtcGenericServer::new(
cfg, cfg,
SpacepacketsTcParser::new(packet_id_lookup), SpacepacketsTcParser::new(packet_id_checker),
SpacepacketsTmSender::default(), SpacepacketsTmSender::default(),
tm_source, tm_source,
tc_receiver, tc_receiver,
@ -170,7 +173,7 @@ mod tests {
thread, thread,
}; };
use alloc::{boxed::Box, sync::Arc}; use alloc::sync::Arc;
use hashbrown::HashSet; use hashbrown::HashSet;
use spacepackets::{ use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket}, ecss::{tc::PusTcCreator, WritablePusPacket},
@ -185,21 +188,21 @@ mod tests {
use super::TcpSpacepacketsServer; use super::TcpSpacepacketsServer;
const TEST_APID_0: u16 = 0x02; const TEST_APID_0: u16 = 0x02;
const TEST_PACKET_ID_0: PacketId = PacketId::const_tc(true, TEST_APID_0); const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_APID_1: u16 = 0x10; const TEST_APID_1: u16 = 0x10;
const TEST_PACKET_ID_1: PacketId = PacketId::const_tc(true, TEST_APID_1); const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
fn generic_tmtc_server( fn generic_tmtc_server(
addr: &SocketAddr, addr: &SocketAddr,
tc_receiver: SyncTcCacher, tc_receiver: SyncTcCacher,
tm_source: SyncTmSource, tm_source: SyncTmSource,
packet_id_lookup: HashSet<PacketId>, packet_id_lookup: HashSet<PacketId>,
) -> TcpSpacepacketsServer<(), (), SyncTmSource, SyncTcCacher> { ) -> TcpSpacepacketsServer<(), (), SyncTmSource, SyncTcCacher, HashSet<PacketId>> {
TcpSpacepacketsServer::new( TcpSpacepacketsServer::new(
ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024), ServerConfig::new(*addr, Duration::from_millis(2), 1024, 1024),
tm_source, tm_source,
tc_receiver, tc_receiver,
Box::new(packet_id_lookup), packet_id_lookup,
) )
.expect("TCP server generation failed") .expect("TCP server generation failed")
} }
@ -233,8 +236,8 @@ mod tests {
assert_eq!(conn_result.num_sent_tms, 0); assert_eq!(conn_result.num_sent_tms, 0);
set_if_done.store(true, Ordering::Relaxed); set_if_done.store(true, Ordering::Relaxed);
}); });
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let ping_tc =
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let tc_0 = ping_tc.to_vec().expect("packet generation failed"); let tc_0 = ping_tc.to_vec().expect("packet generation failed");
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed"); let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
stream stream
@ -265,13 +268,13 @@ mod tests {
// Add telemetry // Add telemetry
let mut total_tm_len = 0; let mut total_tm_len = 0;
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let verif_tm =
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 1, 1, &[], true);
let tm_0 = verif_tm.to_vec().expect("writing packet failed"); let tm_0 = verif_tm.to_vec().expect("writing packet failed");
total_tm_len += tm_0.len(); total_tm_len += tm_0.len();
tm_source.add_tm(&tm_0); tm_source.add_tm(&tm_0);
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); let verif_tm =
let verif_tm = PusTcCreator::new_simple(&mut sph, 1, 3, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 1, 3, &[], true);
let tm_1 = verif_tm.to_vec().expect("writing packet failed"); let tm_1 = verif_tm.to_vec().expect("writing packet failed");
total_tm_len += tm_1.len(); total_tm_len += tm_1.len();
tm_source.add_tm(&tm_1); tm_source.add_tm(&tm_1);
@ -312,14 +315,14 @@ mod tests {
.expect("setting reas timeout failed"); .expect("setting reas timeout failed");
// Send telecommands // Send telecommands
let mut sph = SpHeader::tc_unseg(TEST_APID_0, 0, 0).unwrap(); let ping_tc =
let ping_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let tc_0 = ping_tc.to_vec().expect("ping tc creation failed"); let tc_0 = ping_tc.to_vec().expect("ping tc creation failed");
stream stream
.write_all(&tc_0) .write_all(&tc_0)
.expect("writing to TCP server failed"); .expect("writing to TCP server failed");
let mut sph = SpHeader::tc_unseg(TEST_APID_1, 0, 0).unwrap(); let action_tc =
let action_tc = PusTcCreator::new_simple(&mut sph, 8, 0, None, true); PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
let tc_1 = action_tc.to_vec().expect("action tc creation failed"); let tc_1 = action_tc.to_vec().expect("action tc creation failed");
stream stream
.write_all(&tc_1) .write_all(&tc_1)

View File

@ -40,8 +40,8 @@ use std::vec::Vec;
/// let ping_receiver = PingReceiver::default(); /// let ping_receiver = PingReceiver::default();
/// let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver)) /// let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
/// .expect("Creating UDP TMTC server failed"); /// .expect("Creating UDP TMTC server failed");
/// let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); /// let sph = SpHeader::new_from_apid(0x02);
/// let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); /// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
/// let len = pus_tc /// let len = pus_tc
/// .write_to_bytes(&mut buf) /// .write_to_bytes(&mut buf)
/// .expect("Error writing PUS TC packet"); /// .expect("Error writing PUS TC packet");
@ -178,8 +178,8 @@ mod tests {
let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver)) let mut udp_tc_server = UdpTcServer::new(dest_addr, 2048, Box::new(ping_receiver))
.expect("Creating UDP TMTC server failed"); .expect("Creating UDP TMTC server failed");
is_send(&udp_tc_server); is_send(&udp_tc_server);
let mut sph = SpHeader::tc_unseg(0x02, 0, 0).unwrap(); let sph = SpHeader::new_from_apid(0x02);
let pus_tc = PusTcCreator::new_simple(&mut sph, 17, 1, None, true); let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let len = pus_tc let len = pus_tc
.write_to_bytes(&mut buf) .write_to_bytes(&mut buf)
.expect("Error writing PUS TC packet"); .expect("Error writing PUS TC packet");

View File

@ -1,40 +1,40 @@
use crate::{ use crate::ComponentId;
pus::verification::{TcStateAccepted, VerificationToken},
TargetId,
};
pub type CollectionIntervalFactor = u32; pub type CollectionIntervalFactor = u32;
/// Unique Identifier for a certain housekeeping dataset.
pub type UniqueId = u32; pub type UniqueId = u32;
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum HkRequest { pub struct HkRequest {
OneShot(UniqueId), pub unique_id: UniqueId,
Enable(UniqueId), pub variant: HkRequestVariant,
Disable(UniqueId), }
ModifyCollectionInterval(UniqueId, CollectionIntervalFactor),
impl HkRequest {
pub fn new(unique_id: UniqueId, variant: HkRequestVariant) -> Self {
Self { unique_id, variant }
}
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum HkRequestVariant {
OneShot,
EnablePeriodic,
DisablePeriodic,
ModifyCollectionInterval(CollectionIntervalFactor),
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub struct TargetedHkRequest { pub struct TargetedHkRequest {
pub target_id: TargetId, pub target_id: ComponentId,
pub hk_request: HkRequest, pub hk_request: HkRequestVariant,
} }
impl TargetedHkRequest { impl TargetedHkRequest {
pub fn new(target_id: TargetId, hk_request: HkRequest) -> Self { pub fn new(target_id: ComponentId, hk_request: HkRequestVariant) -> Self {
Self { Self {
target_id, target_id,
hk_request, hk_request,
} }
} }
} }
pub trait PusHkRequestRouter {
type Error;
fn route(
&self,
target_id: TargetId,
hk_request: HkRequest,
token: VerificationToken<TcStateAccepted>,
) -> Result<(), Self::Error>;
}

View File

@ -14,7 +14,7 @@
//! - The [pus] module which provides special support for projects using //! - The [pus] module which provides special support for projects using
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/). //! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
#![no_std] #![no_std]
#![cfg_attr(doc_cfg, feature(doc_cfg))] #![cfg_attr(docs_rs, feature(doc_auto_cfg))]
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
extern crate alloc; extern crate alloc;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
@ -23,18 +23,15 @@ extern crate downcast_rs;
extern crate std; extern crate std;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod cfdp; pub mod cfdp;
pub mod encoding; pub mod encoding;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod event_man; pub mod event_man;
pub mod events; pub mod events;
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod executable; pub mod executable;
pub mod hal; pub mod hal;
pub mod objects; #[cfg(feature = "std")]
pub mod mode_tree;
pub mod pool; pub mod pool;
pub mod power; pub mod power;
pub mod pus; pub mod pus;
@ -42,6 +39,7 @@ pub mod queue;
pub mod request; pub mod request;
pub mod res_code; pub mod res_code;
pub mod seq_count; pub mod seq_count;
pub mod time;
pub mod tmtc; pub mod tmtc;
pub mod action; pub mod action;
@ -51,8 +49,70 @@ pub mod params;
pub use spacepackets; pub use spacepackets;
/// Generic channel ID type. use spacepackets::PacketId;
pub type ChannelId = u32;
/// Generic target ID type. /// Generic component ID type.
pub type TargetId = u64; pub type ComponentId = u64;
pub trait ValidatorU16Id {
fn validate(&self, id: u16) -> bool;
}
#[cfg(feature = "alloc")]
impl ValidatorU16Id for alloc::vec::Vec<u16> {
fn validate(&self, id: u16) -> bool {
self.contains(&id)
}
}
#[cfg(feature = "alloc")]
impl ValidatorU16Id for hashbrown::HashSet<u16> {
fn validate(&self, id: u16) -> bool {
self.contains(&id)
}
}
impl ValidatorU16Id for [u16] {
fn validate(&self, id: u16) -> bool {
self.binary_search(&id).is_ok()
}
}
impl ValidatorU16Id for &[u16] {
fn validate(&self, id: u16) -> bool {
self.binary_search(&id).is_ok()
}
}
#[cfg(feature = "alloc")]
impl ValidatorU16Id for alloc::vec::Vec<spacepackets::PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
#[cfg(feature = "alloc")]
impl ValidatorU16Id for hashbrown::HashSet<spacepackets::PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
#[cfg(feature = "std")]
impl ValidatorU16Id for std::collections::HashSet<PacketId> {
fn validate(&self, packet_id: u16) -> bool {
self.contains(&PacketId::from(packet_id))
}
}
impl ValidatorU16Id for [PacketId] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&PacketId::from(packet_id)).is_ok()
}
}
impl ValidatorU16Id for &[PacketId] {
fn validate(&self, packet_id: u16) -> bool {
self.binary_search(&PacketId::from(packet_id)).is_ok()
}
}

View File

@ -1,67 +1,95 @@
use core::mem::size_of; use core::mem::size_of;
use satrs_shared::res_code::ResultU16;
#[cfg(feature = "serde")] #[cfg(feature = "serde")]
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use spacepackets::ByteConversionError; use spacepackets::ByteConversionError;
use crate::TargetId; #[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "std")]
pub use std_mod::*;
use crate::{
queue::GenericTargetedMessagingError,
request::{GenericMessage, MessageMetadata, MessageReceiver, MessageReceiverWithId, RequestId},
ComponentId,
};
pub type Mode = u32;
pub type Submode = u16;
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct ModeAndSubmode { pub struct ModeAndSubmode {
mode: u32, mode: Mode,
submode: u16, submode: Submode,
} }
impl ModeAndSubmode { impl ModeAndSubmode {
pub const fn new_mode_only(mode: u32) -> Self { pub const RAW_LEN: usize = size_of::<Mode>() + size_of::<Submode>();
pub const fn new_mode_only(mode: Mode) -> Self {
Self { mode, submode: 0 } Self { mode, submode: 0 }
} }
pub const fn new(mode: u32, submode: u16) -> Self { pub const fn new(mode: Mode, submode: Submode) -> Self {
Self { mode, submode } Self { mode, submode }
} }
pub fn raw_len() -> usize {
size_of::<u32>() + size_of::<u16>()
}
pub fn from_be_bytes(buf: &[u8]) -> Result<Self, ByteConversionError> { pub fn from_be_bytes(buf: &[u8]) -> Result<Self, ByteConversionError> {
if buf.len() < 6 { if buf.len() < 6 {
return Err(ByteConversionError::FromSliceTooSmall { return Err(ByteConversionError::FromSliceTooSmall {
expected: 6, expected: Self::RAW_LEN,
found: buf.len(), found: buf.len(),
}); });
} }
Ok(Self { Ok(Self {
mode: u32::from_be_bytes(buf[0..4].try_into().unwrap()), mode: Mode::from_be_bytes(buf[0..size_of::<Mode>()].try_into().unwrap()),
submode: u16::from_be_bytes(buf[4..6].try_into().unwrap()), submode: Submode::from_be_bytes(
buf[size_of::<Mode>()..size_of::<Mode>() + size_of::<Submode>()]
.try_into()
.unwrap(),
),
}) })
} }
pub fn mode(&self) -> u32 { pub fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
if buf.len() < Self::RAW_LEN {
return Err(ByteConversionError::ToSliceTooSmall {
expected: Self::RAW_LEN,
found: buf.len(),
});
}
buf[0..size_of::<Mode>()].copy_from_slice(&self.mode.to_be_bytes());
buf[size_of::<Mode>()..Self::RAW_LEN].copy_from_slice(&self.submode.to_be_bytes());
Ok(Self::RAW_LEN)
}
pub fn mode(&self) -> Mode {
self.mode self.mode
} }
pub fn submode(&self) -> u16 { pub fn submode(&self) -> Submode {
self.submode self.submode
} }
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TargetedModeCommand { pub struct TargetedModeCommand {
pub address: TargetId, pub address: ComponentId,
pub mode_submode: ModeAndSubmode, pub mode_submode: ModeAndSubmode,
} }
impl TargetedModeCommand { impl TargetedModeCommand {
pub const fn new(address: TargetId, mode_submode: ModeAndSubmode) -> Self { pub const fn new(address: ComponentId, mode_submode: ModeAndSubmode) -> Self {
Self { Self {
address, address,
mode_submode, mode_submode,
} }
} }
pub fn address(&self) -> TargetId { pub fn address(&self) -> ComponentId {
self.address self.address
} }
@ -81,6 +109,8 @@ impl TargetedModeCommand {
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ModeRequest { pub enum ModeRequest {
/// Mode information. Can be used to notify other components of changed modes.
ModeInfo(ModeAndSubmode),
SetMode(ModeAndSubmode), SetMode(ModeAndSubmode),
ReadMode, ReadMode,
AnnounceMode, AnnounceMode,
@ -90,6 +120,479 @@ pub enum ModeRequest {
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub struct TargetedModeRequest { pub struct TargetedModeRequest {
target_id: TargetId, target_id: ComponentId,
mode_request: ModeRequest, mode_request: ModeRequest,
} }
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ModeReply {
/// Reply to a mode request to confirm the commanded mode was reached.
ModeReply(ModeAndSubmode),
// Can not reach the commanded mode. Contains a reason as a [ResultU16].
CantReachMode(ResultU16),
/// We are in the wrong mode for unknown reasons. Contains the expected and reached mode.
WrongMode {
expected: ModeAndSubmode,
reached: ModeAndSubmode,
},
}
pub type GenericModeReply = GenericMessage<ModeReply>;
pub trait ModeRequestSender {
fn local_channel_id(&self) -> ComponentId;
fn send_mode_request(
&self,
request_id: RequestId,
target_id: ComponentId,
request: ModeRequest,
) -> Result<(), GenericTargetedMessagingError>;
}
pub trait ModeRequestReceiver {
fn try_recv_mode_request(
&self,
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError>;
}
impl<R: MessageReceiver<ModeRequest>> ModeRequestReceiver
for MessageReceiverWithId<ModeRequest, R>
{
fn try_recv_mode_request(
&self,
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError> {
self.try_recv_message()
}
}
#[derive(Debug, Clone)]
pub enum ModeError {
Messaging(GenericTargetedMessagingError),
}
impl From<GenericTargetedMessagingError> for ModeError {
fn from(value: GenericTargetedMessagingError) -> Self {
Self::Messaging(value)
}
}
pub trait ModeProvider {
fn mode_and_submode(&self) -> ModeAndSubmode;
fn mode(&self) -> Mode {
self.mode_and_submode().mode()
}
fn submode(&self) -> Submode {
self.mode_and_submode().submode()
}
}
pub trait ModeRequestHandler: ModeProvider {
type Error;
fn start_transition(
&mut self,
requestor: MessageMetadata,
mode_and_submode: ModeAndSubmode,
) -> Result<(), Self::Error>;
fn announce_mode(&self, requestor_info: Option<MessageMetadata>, recursive: bool);
fn handle_mode_reached(
&mut self,
requestor_info: Option<MessageMetadata>,
) -> Result<(), Self::Error>;
fn handle_mode_info(
&mut self,
requestor_info: MessageMetadata,
info: ModeAndSubmode,
) -> Result<(), Self::Error>;
fn send_mode_reply(
&self,
requestor_info: MessageMetadata,
reply: ModeReply,
) -> Result<(), Self::Error>;
fn handle_mode_request(
&mut self,
request: GenericMessage<ModeRequest>,
) -> Result<(), Self::Error> {
match request.message {
ModeRequest::SetMode(mode_and_submode) => {
self.start_transition(request.requestor_info, mode_and_submode)
}
ModeRequest::ReadMode => self.send_mode_reply(
request.requestor_info,
ModeReply::ModeReply(self.mode_and_submode()),
),
ModeRequest::AnnounceMode => {
self.announce_mode(Some(request.requestor_info), false);
Ok(())
}
ModeRequest::AnnounceModeRecursive => {
self.announce_mode(Some(request.requestor_info), true);
Ok(())
}
ModeRequest::ModeInfo(info) => self.handle_mode_info(request.requestor_info, info),
}
}
}
pub trait ModeReplyReceiver {
fn try_recv_mode_reply(
&self,
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError>;
}
impl<R: MessageReceiver<ModeReply>> ModeReplyReceiver for MessageReceiverWithId<ModeReply, R> {
fn try_recv_mode_reply(
&self,
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError> {
self.try_recv_message()
}
}
pub trait ModeReplySender {
fn local_channel_id(&self) -> ComponentId;
/// The requestor is assumed to be the target of the reply.
fn send_mode_reply(
&self,
requestor_info: MessageMetadata,
reply: ModeReply,
) -> Result<(), GenericTargetedMessagingError>;
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use crate::{
mode::ModeRequest,
queue::GenericTargetedMessagingError,
request::{
MessageMetadata, MessageSender, MessageSenderAndReceiver, MessageSenderMap,
RequestAndReplySenderAndReceiver, RequestId,
},
ComponentId,
};
use super::*;
impl<S: MessageSender<ModeReply>> MessageSenderMap<ModeReply, S> {
pub fn send_mode_reply(
&self,
requestor_info: MessageMetadata,
target_id: ComponentId,
request: ModeReply,
) -> Result<(), GenericTargetedMessagingError> {
self.send_message(requestor_info, target_id, request)
}
pub fn add_reply_target(&mut self, target_id: ComponentId, request_sender: S) {
self.add_message_target(target_id, request_sender)
}
}
impl<FROM, S: MessageSender<ModeReply>, R: MessageReceiver<FROM>> ModeReplySender
for MessageSenderAndReceiver<ModeReply, FROM, S, R>
{
fn local_channel_id(&self) -> ComponentId {
self.local_channel_id_generic()
}
fn send_mode_reply(
&self,
requestor_info: MessageMetadata,
request: ModeReply,
) -> Result<(), GenericTargetedMessagingError> {
self.message_sender_map.send_mode_reply(
MessageMetadata::new(requestor_info.request_id(), self.local_channel_id()),
requestor_info.sender_id(),
request,
)
}
}
impl<TO, S: MessageSender<TO>, R: MessageReceiver<ModeReply>> ModeReplyReceiver
for MessageSenderAndReceiver<TO, ModeReply, S, R>
{
fn try_recv_mode_reply(
&self,
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError> {
self.message_receiver.try_recv_message()
}
}
impl<
REQUEST,
S0: MessageSender<REQUEST>,
R0: MessageReceiver<ModeReply>,
S1: MessageSender<ModeReply>,
R1: MessageReceiver<REQUEST>,
> RequestAndReplySenderAndReceiver<REQUEST, ModeReply, S0, R0, S1, R1>
{
pub fn add_reply_target(&mut self, target_id: ComponentId, reply_sender: S1) {
self.reply_sender_map
.add_message_target(target_id, reply_sender)
}
}
impl<
REQUEST,
S0: MessageSender<REQUEST>,
R0: MessageReceiver<ModeReply>,
S1: MessageSender<ModeReply>,
R1: MessageReceiver<REQUEST>,
> ModeReplySender for RequestAndReplySenderAndReceiver<REQUEST, ModeReply, S0, R0, S1, R1>
{
fn local_channel_id(&self) -> ComponentId {
self.local_channel_id_generic()
}
fn send_mode_reply(
&self,
requestor_info: MessageMetadata,
request: ModeReply,
) -> Result<(), GenericTargetedMessagingError> {
self.reply_sender_map.send_mode_reply(
MessageMetadata::new(requestor_info.request_id(), self.local_channel_id()),
requestor_info.sender_id(),
request,
)
}
}
impl<
REQUEST,
S0: MessageSender<REQUEST>,
R0: MessageReceiver<ModeReply>,
S1: MessageSender<ModeReply>,
R1: MessageReceiver<REQUEST>,
> ModeReplyReceiver
for RequestAndReplySenderAndReceiver<REQUEST, ModeReply, S0, R0, S1, R1>
{
fn try_recv_mode_reply(
&self,
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError> {
self.reply_receiver.try_recv_message()
}
}
/// Helper type definition for a mode handler which can handle mode requests.
pub type ModeRequestHandlerInterface<S, R> =
MessageSenderAndReceiver<ModeReply, ModeRequest, S, R>;
impl<S: MessageSender<ModeReply>, R: MessageReceiver<ModeRequest>>
ModeRequestHandlerInterface<S, R>
{
pub fn try_recv_mode_request(
&self,
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError> {
self.try_recv_message()
}
pub fn send_mode_reply(
&self,
requestor_info: MessageMetadata,
reply: ModeReply,
) -> Result<(), GenericTargetedMessagingError> {
self.send_message(
requestor_info.request_id(),
requestor_info.sender_id(),
reply,
)
}
}
/// Helper type defintion for a mode handler object which can send mode requests and receive
/// mode replies.
pub type ModeRequestorInterface<S, R> = MessageSenderAndReceiver<ModeRequest, ModeReply, S, R>;
impl<S: MessageSender<ModeRequest>, R: MessageReceiver<ModeReply>> ModeRequestorInterface<S, R> {
pub fn try_recv_mode_reply(
&self,
) -> Result<Option<GenericMessage<ModeReply>>, GenericTargetedMessagingError> {
self.try_recv_message()
}
pub fn send_mode_request(
&self,
request_id: RequestId,
target_id: ComponentId,
reply: ModeRequest,
) -> Result<(), GenericTargetedMessagingError> {
self.send_message(request_id, target_id, reply)
}
}
/// Helper type defintion for a mode handler object which can both send mode requests and
/// process mode requests.
pub type ModeInterface<S0, R0, S1, R1> =
RequestAndReplySenderAndReceiver<ModeRequest, ModeReply, S0, R0, S1, R1>;
impl<S: MessageSender<ModeRequest>> MessageSenderMap<ModeRequest, S> {
pub fn send_mode_request(
&self,
requestor_info: MessageMetadata,
target_id: ComponentId,
request: ModeRequest,
) -> Result<(), GenericTargetedMessagingError> {
self.send_message(requestor_info, target_id, request)
}
pub fn add_request_target(&mut self, target_id: ComponentId, request_sender: S) {
self.add_message_target(target_id, request_sender)
}
}
/*
impl<S: MessageSender<ModeRequest>> ModeRequestSender for MessageSenderMapWithId<ModeRequest, S> {
fn local_channel_id(&self) -> ComponentId {
self.local_channel_id
}
fn send_mode_request(
&self,
request_id: RequestId,
target_id: ComponentId,
request: ModeRequest,
) -> Result<(), GenericTargetedMessagingError> {
self.send_message(request_id, target_id, request)
}
}
*/
impl<TO, S: MessageSender<TO>, R: MessageReceiver<ModeRequest>> ModeRequestReceiver
for MessageSenderAndReceiver<TO, ModeRequest, S, R>
{
fn try_recv_mode_request(
&self,
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError> {
self.message_receiver.try_recv_message()
}
}
impl<FROM, S: MessageSender<ModeRequest>, R: MessageReceiver<FROM>> ModeRequestSender
for MessageSenderAndReceiver<ModeRequest, FROM, S, R>
{
fn local_channel_id(&self) -> ComponentId {
self.local_channel_id_generic()
}
fn send_mode_request(
&self,
request_id: RequestId,
target_id: ComponentId,
request: ModeRequest,
) -> Result<(), GenericTargetedMessagingError> {
self.message_sender_map.send_mode_request(
MessageMetadata::new(request_id, self.local_channel_id()),
target_id,
request,
)
}
}
impl<
REPLY,
S0: MessageSender<ModeRequest>,
R0: MessageReceiver<REPLY>,
S1: MessageSender<REPLY>,
R1: MessageReceiver<ModeRequest>,
> RequestAndReplySenderAndReceiver<ModeRequest, REPLY, S0, R0, S1, R1>
{
pub fn add_request_target(&mut self, target_id: ComponentId, request_sender: S0) {
self.request_sender_map
.add_message_target(target_id, request_sender)
}
}
impl<
REPLY,
S0: MessageSender<ModeRequest>,
R0: MessageReceiver<REPLY>,
S1: MessageSender<REPLY>,
R1: MessageReceiver<ModeRequest>,
> ModeRequestSender
for RequestAndReplySenderAndReceiver<ModeRequest, REPLY, S0, R0, S1, R1>
{
fn local_channel_id(&self) -> ComponentId {
self.local_channel_id_generic()
}
fn send_mode_request(
&self,
request_id: RequestId,
target_id: ComponentId,
request: ModeRequest,
) -> Result<(), GenericTargetedMessagingError> {
self.request_sender_map.send_mode_request(
MessageMetadata::new(request_id, self.local_channel_id()),
target_id,
request,
)
}
}
impl<
REPLY,
S0: MessageSender<ModeRequest>,
R0: MessageReceiver<REPLY>,
S1: MessageSender<REPLY>,
R1: MessageReceiver<ModeRequest>,
> ModeRequestReceiver
for RequestAndReplySenderAndReceiver<ModeRequest, REPLY, S0, R0, S1, R1>
{
fn try_recv_mode_request(
&self,
) -> Result<Option<GenericMessage<ModeRequest>>, GenericTargetedMessagingError> {
self.request_receiver.try_recv_message()
}
}
}
#[cfg(feature = "std")]
pub mod std_mod {
use std::sync::mpsc;
use crate::request::GenericMessage;
use super::*;
pub type ModeRequestHandlerMpsc = ModeRequestHandlerInterface<
mpsc::Sender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
pub type ModeRequestHandlerMpscBounded = ModeRequestHandlerInterface<
mpsc::SyncSender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
pub type ModeRequestorMpsc = ModeRequestorInterface<
mpsc::Sender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
>;
pub type ModeRequestorBoundedMpsc = ModeRequestorInterface<
mpsc::SyncSender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
>;
pub type ModeRequestorAndHandlerMpsc = ModeInterface<
mpsc::Sender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
mpsc::Sender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
pub type ModeRequestorAndHandlerMpscBounded = ModeInterface<
mpsc::SyncSender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
mpsc::SyncSender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
}
#[cfg(test)]
mod tests {}

37
satrs/src/mode_tree.rs Normal file
View File

@ -0,0 +1,37 @@
use alloc::vec::Vec;
use hashbrown::HashMap;
use crate::{
mode::{Mode, ModeAndSubmode, Submode},
ComponentId,
};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum TableEntryType {
/// Target table containing information of the expected children modes for given mode.
Target,
/// Sequence table which contains information about how to reach a target table, including
/// the order of the sequences.
Sequence,
}
pub struct ModeTableEntry {
/// Name of respective table entry.
pub name: &'static str,
/// Target channel ID.
pub channel_id: ComponentId,
pub mode_submode: ModeAndSubmode,
pub allowed_submode_mask: Option<Submode>,
pub check_success: bool,
}
pub struct ModeTableMapValue {
/// Name for a given mode table entry.
pub name: &'static str,
pub entries: Vec<ModeTableEntry>,
}
pub type ModeTable = HashMap<Mode, ModeTableMapValue>;
#[cfg(test)]
mod tests {}

View File

@ -1,308 +0,0 @@
//! # Module providing addressable object support and a manager for them
//!
//! Each addressable object can be identified using an [object ID][ObjectId].
//! The [system object][ManagedSystemObject] trait also allows storing these objects into the
//! [object manager][ObjectManager]. They can then be retrieved and casted back to a known type
//! using the object ID.
//!
//! # Examples
//!
//! ```rust
//! use std::any::Any;
//! use std::error::Error;
//! use satrs::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject};
//!
//! struct ExampleSysObj {
//! id: ObjectId,
//! dummy: u32,
//! was_initialized: bool,
//! }
//!
//! impl ExampleSysObj {
//! fn new(id: ObjectId, dummy: u32) -> ExampleSysObj {
//! ExampleSysObj {
//! id,
//! dummy,
//! was_initialized: false,
//! }
//! }
//! }
//!
//! impl SystemObject for ExampleSysObj {
//! type Error = ();
//! fn get_object_id(&self) -> &ObjectId {
//! &self.id
//! }
//!
//! fn initialize(&mut self) -> Result<(), Self::Error> {
//! self.was_initialized = true;
//! Ok(())
//! }
//! }
//!
//! impl ManagedSystemObject for ExampleSysObj {}
//!
//! let mut obj_manager = ObjectManager::default();
//! let obj_id = ObjectId { id: 0, name: "Example 0"};
//! let example_obj = ExampleSysObj::new(obj_id, 42);
//! obj_manager.insert(Box::new(example_obj));
//! let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&obj_id);
//! let example_obj = obj_back_casted.unwrap();
//! assert_eq!(example_obj.id, obj_id);
//! assert_eq!(example_obj.dummy, 42);
//! ```
#[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "alloc")]
use downcast_rs::Downcast;
#[cfg(feature = "alloc")]
use hashbrown::HashMap;
#[cfg(feature = "std")]
use std::error::Error;
use crate::TargetId;
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub struct ObjectId {
pub id: TargetId,
pub name: &'static str,
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use super::*;
/// Each object which is stored inside the [object manager][ObjectManager] needs to implemented
/// this trait
pub trait SystemObject: Downcast {
type Error;
fn get_object_id(&self) -> &ObjectId;
fn initialize(&mut self) -> Result<(), Self::Error>;
}
downcast_rs::impl_downcast!(SystemObject assoc Error);
pub trait ManagedSystemObject: SystemObject + Send {}
downcast_rs::impl_downcast!(ManagedSystemObject assoc Error);
/// Helper module to manage multiple [ManagedSystemObjects][ManagedSystemObject] by mapping them
/// using an [object ID][ObjectId]
#[cfg(feature = "alloc")]
pub struct ObjectManager<E> {
obj_map: HashMap<ObjectId, Box<dyn ManagedSystemObject<Error = E>>>,
}
#[cfg(feature = "alloc")]
impl<E: 'static> Default for ObjectManager<E> {
fn default() -> Self {
Self::new()
}
}
#[cfg(feature = "alloc")]
impl<E: 'static> ObjectManager<E> {
pub fn new() -> Self {
ObjectManager {
obj_map: HashMap::new(),
}
}
pub fn insert(&mut self, sys_obj: Box<dyn ManagedSystemObject<Error = E>>) -> bool {
let obj_id = sys_obj.get_object_id();
if self.obj_map.contains_key(obj_id) {
return false;
}
self.obj_map.insert(*obj_id, sys_obj).is_none()
}
/// Initializes all System Objects in the hash map and returns the number of successful
/// initializations
pub fn initialize(&mut self) -> Result<u32, Box<dyn Error>> {
let mut init_success = 0;
for val in self.obj_map.values_mut() {
if val.initialize().is_ok() {
init_success += 1
}
}
Ok(init_success)
}
/// Retrieve a reference to an object stored inside the manager. The type to retrieve needs to
/// be explicitly passed as a generic parameter or specified on the left hand side of the
/// expression.
pub fn get_ref<T: ManagedSystemObject<Error = E>>(&self, key: &ObjectId) -> Option<&T> {
self.obj_map.get(key).and_then(|o| o.downcast_ref::<T>())
}
/// Retrieve a mutable reference to an object stored inside the manager. The type to retrieve
/// needs to be explicitly passed as a generic parameter or specified on the left hand side
/// of the expression.
pub fn get_mut<T: ManagedSystemObject<Error = E>>(
&mut self,
key: &ObjectId,
) -> Option<&mut T> {
self.obj_map
.get_mut(key)
.and_then(|o| o.downcast_mut::<T>())
}
}
}
#[cfg(test)]
mod tests {
use crate::objects::{ManagedSystemObject, ObjectId, ObjectManager, SystemObject};
use std::boxed::Box;
use std::string::String;
use std::sync::{Arc, Mutex};
use std::thread;
struct ExampleSysObj {
id: ObjectId,
dummy: u32,
was_initialized: bool,
}
impl ExampleSysObj {
fn new(id: ObjectId, dummy: u32) -> ExampleSysObj {
ExampleSysObj {
id,
dummy,
was_initialized: false,
}
}
}
impl SystemObject for ExampleSysObj {
type Error = ();
fn get_object_id(&self) -> &ObjectId {
&self.id
}
fn initialize(&mut self) -> Result<(), Self::Error> {
self.was_initialized = true;
Ok(())
}
}
impl ManagedSystemObject for ExampleSysObj {}
struct OtherExampleObject {
id: ObjectId,
string: String,
was_initialized: bool,
}
impl SystemObject for OtherExampleObject {
type Error = ();
fn get_object_id(&self) -> &ObjectId {
&self.id
}
fn initialize(&mut self) -> Result<(), Self::Error> {
self.was_initialized = true;
Ok(())
}
}
impl ManagedSystemObject for OtherExampleObject {}
#[test]
fn test_obj_manager_simple() {
let mut obj_manager = ObjectManager::default();
let expl_obj_id = ObjectId {
id: 0,
name: "Example 0",
};
let example_obj = ExampleSysObj::new(expl_obj_id, 42);
assert!(obj_manager.insert(Box::new(example_obj)));
let res = obj_manager.initialize();
assert!(res.is_ok());
assert_eq!(res.unwrap(), 1);
let obj_back_casted: Option<&ExampleSysObj> = obj_manager.get_ref(&expl_obj_id);
assert!(obj_back_casted.is_some());
let expl_obj_back_casted = obj_back_casted.unwrap();
assert_eq!(expl_obj_back_casted.dummy, 42);
assert!(expl_obj_back_casted.was_initialized);
let second_obj_id = ObjectId {
id: 12,
name: "Example 1",
};
let second_example_obj = OtherExampleObject {
id: second_obj_id,
string: String::from("Hello Test"),
was_initialized: false,
};
assert!(obj_manager.insert(Box::new(second_example_obj)));
let res = obj_manager.initialize();
assert!(res.is_ok());
assert_eq!(res.unwrap(), 2);
let obj_back_casted: Option<&OtherExampleObject> = obj_manager.get_ref(&second_obj_id);
assert!(obj_back_casted.is_some());
let expl_obj_back_casted = obj_back_casted.unwrap();
assert_eq!(expl_obj_back_casted.string, String::from("Hello Test"));
assert!(expl_obj_back_casted.was_initialized);
let existing_obj_id = ObjectId {
id: 12,
name: "Example 1",
};
let invalid_obj = OtherExampleObject {
id: existing_obj_id,
string: String::from("Hello Test"),
was_initialized: false,
};
assert!(!obj_manager.insert(Box::new(invalid_obj)));
}
#[test]
fn object_man_threaded() {
let obj_manager = Arc::new(Mutex::new(ObjectManager::new()));
let expl_obj_id = ObjectId {
id: 0,
name: "Example 0",
};
let example_obj = ExampleSysObj::new(expl_obj_id, 42);
let second_obj_id = ObjectId {
id: 12,
name: "Example 1",
};
let second_example_obj = OtherExampleObject {
id: second_obj_id,
string: String::from("Hello Test"),
was_initialized: false,
};
let mut obj_man_handle = obj_manager.lock().expect("Mutex lock failed");
assert!(obj_man_handle.insert(Box::new(example_obj)));
assert!(obj_man_handle.insert(Box::new(second_example_obj)));
let res = obj_man_handle.initialize();
std::mem::drop(obj_man_handle);
assert!(res.is_ok());
assert_eq!(res.unwrap(), 2);
let obj_man_0 = obj_manager.clone();
let jh0 = thread::spawn(move || {
let locked_man = obj_man_0.lock().expect("Mutex lock failed");
let obj_back_casted: Option<&ExampleSysObj> = locked_man.get_ref(&expl_obj_id);
assert!(obj_back_casted.is_some());
let expl_obj_back_casted = obj_back_casted.unwrap();
assert_eq!(expl_obj_back_casted.dummy, 42);
assert!(expl_obj_back_casted.was_initialized);
std::mem::drop(locked_man)
});
let jh1 = thread::spawn(move || {
let locked_man = obj_manager.lock().expect("Mutex lock failed");
let obj_back_casted: Option<&OtherExampleObject> = locked_man.get_ref(&second_obj_id);
assert!(obj_back_casted.is_some());
let expl_obj_back_casted = obj_back_casted.unwrap();
assert_eq!(expl_obj_back_casted.string, String::from("Hello Test"));
assert!(expl_obj_back_casted.was_initialized);
std::mem::drop(locked_man)
});
jh0.join().expect("Joining thread 0 failed");
jh1.join().expect("Joining thread 1 failed");
}
}

View File

@ -43,41 +43,45 @@
//! This includes the [ParamsHeapless] enumeration for contained values which do not require heap //! This includes the [ParamsHeapless] enumeration for contained values which do not require heap
//! allocation, and the [Params] which enumerates [ParamsHeapless] and some additional types which //! allocation, and the [Params] which enumerates [ParamsHeapless] and some additional types which
//! require [alloc] support but allow for more flexbility. //! require [alloc] support but allow for more flexbility.
#[cfg(feature = "alloc")]
use crate::pool::StoreAddr; use crate::pool::StoreAddr;
#[cfg(feature = "alloc")]
use alloc::string::{String, ToString};
#[cfg(feature = "alloc")]
use alloc::vec::Vec;
use core::fmt::Debug; use core::fmt::Debug;
use core::mem::size_of; use core::mem::size_of;
use paste::paste; use paste::paste;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU64, EcssEnumU8}; use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU64, EcssEnumU8};
pub use spacepackets::util::ToBeBytes;
use spacepackets::util::UnsignedEnum; use spacepackets::util::UnsignedEnum;
use spacepackets::ByteConversionError; use spacepackets::ByteConversionError;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub use alloc_mod::*; use alloc::string::{String, ToString};
pub use spacepackets::util::ToBeBytes; #[cfg(feature = "alloc")]
use alloc::vec::Vec;
/// Generic trait which is used for objects which can be converted into a raw network (big) endian /// Generic trait which is used for objects which can be converted into a raw network (big) endian
/// byte format. /// byte format.
pub trait WritableToBeBytes { pub trait WritableToBeBytes {
fn raw_len(&self) -> usize; fn written_len(&self) -> usize;
/// Writes the object to a raw buffer in network endianness (big) /// Writes the object to a raw buffer in network endianness (big)
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>; fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
#[cfg(feature = "alloc")]
fn to_vec(&self) -> Result<Vec<u8>, ByteConversionError> {
let mut vec = alloc::vec![0; self.written_len()];
self.write_to_be_bytes(&mut vec)?;
Ok(vec)
}
} }
macro_rules! param_to_be_bytes_impl { macro_rules! param_to_be_bytes_impl {
($Newtype: ident) => { ($Newtype: ident) => {
impl WritableToBeBytes for $Newtype { impl WritableToBeBytes for $Newtype {
#[inline] #[inline]
fn raw_len(&self) -> usize { fn written_len(&self) -> usize {
size_of::<<Self as ToBeBytes>::ByteArray>() size_of::<<Self as ToBeBytes>::ByteArray>()
} }
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
let raw_len = self.raw_len(); let raw_len = WritableToBeBytes::written_len(self);
if buf.len() < raw_len { if buf.len() < raw_len {
return Err(ByteConversionError::ToSliceTooSmall { return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(), found: buf.len(),
@ -385,32 +389,32 @@ pub enum ParamsRaw {
} }
impl WritableToBeBytes for ParamsRaw { impl WritableToBeBytes for ParamsRaw {
fn raw_len(&self) -> usize { fn written_len(&self) -> usize {
match self { match self {
ParamsRaw::U8(v) => v.raw_len(), ParamsRaw::U8(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U8Pair(v) => v.raw_len(), ParamsRaw::U8Pair(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U8Triplet(v) => v.raw_len(), ParamsRaw::U8Triplet(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I8(v) => v.raw_len(), ParamsRaw::I8(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I8Pair(v) => v.raw_len(), ParamsRaw::I8Pair(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I8Triplet(v) => v.raw_len(), ParamsRaw::I8Triplet(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U16(v) => v.raw_len(), ParamsRaw::U16(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U16Pair(v) => v.raw_len(), ParamsRaw::U16Pair(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U16Triplet(v) => v.raw_len(), ParamsRaw::U16Triplet(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I16(v) => v.raw_len(), ParamsRaw::I16(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I16Pair(v) => v.raw_len(), ParamsRaw::I16Pair(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I16Triplet(v) => v.raw_len(), ParamsRaw::I16Triplet(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U32(v) => v.raw_len(), ParamsRaw::U32(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U32Pair(v) => v.raw_len(), ParamsRaw::U32Pair(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U32Triplet(v) => v.raw_len(), ParamsRaw::U32Triplet(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I32(v) => v.raw_len(), ParamsRaw::I32(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I32Pair(v) => v.raw_len(), ParamsRaw::I32Pair(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I32Triplet(v) => v.raw_len(), ParamsRaw::I32Triplet(v) => WritableToBeBytes::written_len(v),
ParamsRaw::F32(v) => v.raw_len(), ParamsRaw::F32(v) => WritableToBeBytes::written_len(v),
ParamsRaw::F32Pair(v) => v.raw_len(), ParamsRaw::F32Pair(v) => WritableToBeBytes::written_len(v),
ParamsRaw::F32Triplet(v) => v.raw_len(), ParamsRaw::F32Triplet(v) => WritableToBeBytes::written_len(v),
ParamsRaw::U64(v) => v.raw_len(), ParamsRaw::U64(v) => WritableToBeBytes::written_len(v),
ParamsRaw::I64(v) => v.raw_len(), ParamsRaw::I64(v) => WritableToBeBytes::written_len(v),
ParamsRaw::F64(v) => v.raw_len(), ParamsRaw::F64(v) => WritableToBeBytes::written_len(v),
} }
} }
@ -463,7 +467,7 @@ params_raw_from_newtype!(
); );
#[derive(Debug, Copy, Clone, PartialEq, Eq)] #[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum EcssEnumParams { pub enum ParamsEcssEnum {
U8(EcssEnumU8), U8(EcssEnumU8),
U16(EcssEnumU16), U16(EcssEnumU16),
U32(EcssEnumU32), U32(EcssEnumU32),
@ -471,40 +475,46 @@ pub enum EcssEnumParams {
} }
macro_rules! writable_as_be_bytes_ecss_enum_impl { macro_rules! writable_as_be_bytes_ecss_enum_impl {
($EnumIdent: ident) => { ($EnumIdent: ident, $Ty: ident) => {
impl From<$EnumIdent> for ParamsEcssEnum {
fn from(e: $EnumIdent) -> Self {
Self::$Ty(e)
}
}
impl WritableToBeBytes for $EnumIdent { impl WritableToBeBytes for $EnumIdent {
fn raw_len(&self) -> usize { fn written_len(&self) -> usize {
self.size() self.size()
} }
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
<Self as UnsignedEnum>::write_to_be_bytes(self, buf).map(|_| self.raw_len()) <Self as UnsignedEnum>::write_to_be_bytes(self, buf).map(|_| self.written_len())
} }
} }
}; };
} }
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8); writable_as_be_bytes_ecss_enum_impl!(EcssEnumU8, U8);
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16); writable_as_be_bytes_ecss_enum_impl!(EcssEnumU16, U16);
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32); writable_as_be_bytes_ecss_enum_impl!(EcssEnumU32, U32);
writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64); writable_as_be_bytes_ecss_enum_impl!(EcssEnumU64, U64);
impl WritableToBeBytes for EcssEnumParams { impl WritableToBeBytes for ParamsEcssEnum {
fn raw_len(&self) -> usize { fn written_len(&self) -> usize {
match self { match self {
EcssEnumParams::U8(e) => e.raw_len(), ParamsEcssEnum::U8(e) => e.written_len(),
EcssEnumParams::U16(e) => e.raw_len(), ParamsEcssEnum::U16(e) => e.written_len(),
EcssEnumParams::U32(e) => e.raw_len(), ParamsEcssEnum::U32(e) => e.written_len(),
EcssEnumParams::U64(e) => e.raw_len(), ParamsEcssEnum::U64(e) => e.written_len(),
} }
} }
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> { fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
match self { match self {
EcssEnumParams::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf), ParamsEcssEnum::U8(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
EcssEnumParams::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf), ParamsEcssEnum::U16(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
EcssEnumParams::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf), ParamsEcssEnum::U32(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
EcssEnumParams::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf), ParamsEcssEnum::U64(e) => WritableToBeBytes::write_to_be_bytes(e, buf),
} }
} }
} }
@ -513,7 +523,19 @@ impl WritableToBeBytes for EcssEnumParams {
#[derive(Debug, Copy, Clone, PartialEq)] #[derive(Debug, Copy, Clone, PartialEq)]
pub enum ParamsHeapless { pub enum ParamsHeapless {
Raw(ParamsRaw), Raw(ParamsRaw),
EcssEnum(EcssEnumParams), EcssEnum(ParamsEcssEnum),
}
impl From<ParamsRaw> for ParamsHeapless {
fn from(v: ParamsRaw) -> Self {
Self::Raw(v)
}
}
impl From<ParamsEcssEnum> for ParamsHeapless {
fn from(v: ParamsEcssEnum) -> Self {
Self::EcssEnum(v)
}
} }
macro_rules! from_conversions_for_raw { macro_rules! from_conversions_for_raw {
@ -560,55 +582,113 @@ from_conversions_for_raw!(
(f64, Self::F64), (f64, Self::F64),
); );
/// Generic enumeration for additional parameters, including parameters which rely on heap
/// allocations.
#[derive(Debug, Clone, PartialEq)]
#[non_exhaustive]
pub enum Params {
Heapless(ParamsHeapless),
Store(StoreAddr),
#[cfg(feature = "alloc")]
Vec(Vec<u8>),
#[cfg(feature = "alloc")]
String(String),
}
impl From<StoreAddr> for Params {
fn from(x: StoreAddr) -> Self {
Self::Store(x)
}
}
impl From<ParamsHeapless> for Params {
fn from(x: ParamsHeapless) -> Self {
Self::Heapless(x)
}
}
impl From<ParamsRaw> for Params {
fn from(x: ParamsRaw) -> Self {
Self::Heapless(ParamsHeapless::Raw(x))
}
}
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
mod alloc_mod { impl From<Vec<u8>> for Params {
use super::*; fn from(val: Vec<u8>) -> Self {
/// Generic enumeration for additional parameters, including parameters which rely on heap Self::Vec(val)
/// allocations.
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[derive(Debug, Clone)]
pub enum Params {
Heapless(ParamsHeapless),
Store(StoreAddr),
Vec(Vec<u8>),
String(String),
} }
}
impl From<StoreAddr> for Params { /// Converts a byte slice into the [Params::Vec] variant
fn from(x: StoreAddr) -> Self { #[cfg(feature = "alloc")]
Self::Store(x) impl From<&[u8]> for Params {
fn from(val: &[u8]) -> Self {
Self::Vec(val.to_vec())
}
}
#[cfg(feature = "alloc")]
impl From<String> for Params {
fn from(val: String) -> Self {
Self::String(val)
}
}
#[cfg(feature = "alloc")]
/// Converts a string slice into the [Params::String] variant
impl From<&str> for Params {
fn from(val: &str) -> Self {
Self::String(val.to_string())
}
}
/// Please note while [WritableToBeBytes] is implemented for [Params], the default implementation
/// will not be able to process the [Params::Store] parameter variant.
impl WritableToBeBytes for Params {
fn written_len(&self) -> usize {
match self {
Params::Heapless(p) => match p {
ParamsHeapless::Raw(raw) => raw.written_len(),
ParamsHeapless::EcssEnum(enumeration) => enumeration.written_len(),
},
Params::Store(_) => 0,
#[cfg(feature = "alloc")]
Params::Vec(vec) => vec.len(),
#[cfg(feature = "alloc")]
Params::String(string) => string.len(),
} }
} }
impl From<ParamsHeapless> for Params { fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
fn from(x: ParamsHeapless) -> Self { match self {
Self::Heapless(x) Params::Heapless(p) => match p {
} ParamsHeapless::Raw(raw) => raw.write_to_be_bytes(buf),
} ParamsHeapless::EcssEnum(enumeration) => enumeration.write_to_be_bytes(buf),
},
impl From<Vec<u8>> for Params { Params::Store(_) => Ok(0),
fn from(val: Vec<u8>) -> Self { #[cfg(feature = "alloc")]
Self::Vec(val) Params::Vec(vec) => {
} if buf.len() < vec.len() {
} return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
/// Converts a byte slice into the [Params::Vec] variant expected: vec.len(),
impl From<&[u8]> for Params { });
fn from(val: &[u8]) -> Self { }
Self::Vec(val.to_vec()) buf[0..vec.len()].copy_from_slice(vec);
} Ok(vec.len())
} }
#[cfg(feature = "alloc")]
impl From<String> for Params { Params::String(string) => {
fn from(val: String) -> Self { if buf.len() < string.len() {
Self::String(val) return Err(ByteConversionError::ToSliceTooSmall {
} found: buf.len(),
} expected: string.len(),
});
/// Converts a string slice into the [Params::String] variant }
impl From<&str> for Params { buf[0..string.len()].copy_from_slice(string.as_bytes());
fn from(val: &str) -> Self { Ok(string.len())
Self::String(val.to_string()) }
} }
} }
} }
@ -617,6 +697,36 @@ mod alloc_mod {
mod tests { mod tests {
use super::*; use super::*;
fn test_cloning_works(param_raw: &impl WritableToBeBytes) {
let _new_param = param_raw;
}
fn test_writing_fails(param_raw: &(impl WritableToBeBytes + ToBeBytes)) {
let pair_size = WritableToBeBytes::written_len(param_raw);
assert_eq!(pair_size, ToBeBytes::written_len(param_raw));
let mut vec = alloc::vec![0; pair_size - 1];
let result = param_raw.write_to_be_bytes(&mut vec);
if let Err(ByteConversionError::ToSliceTooSmall { found, expected }) = result {
assert_eq!(found, pair_size - 1);
assert_eq!(expected, pair_size);
} else {
panic!("Expected ByteConversionError::ToSliceTooSmall");
}
}
fn test_writing(params_raw: &ParamsRaw, writeable: &impl WritableToBeBytes) {
assert_eq!(params_raw.written_len(), writeable.written_len());
let mut vec = alloc::vec![0; writeable.written_len()];
writeable
.write_to_be_bytes(&mut vec)
.expect("writing parameter to buffer failed");
let mut other_vec = alloc::vec![0; writeable.written_len()];
params_raw
.write_to_be_bytes(&mut other_vec)
.expect("writing parameter to buffer failed");
assert_eq!(vec, other_vec);
}
#[test] #[test]
fn test_basic_u32_pair() { fn test_basic_u32_pair() {
let u32_pair = U32Pair(4, 8); let u32_pair = U32Pair(4, 8);
@ -627,10 +737,32 @@ mod tests {
assert_eq!(u32_conv_back, 4); assert_eq!(u32_conv_back, 4);
u32_conv_back = u32::from_be_bytes(raw[4..8].try_into().unwrap()); u32_conv_back = u32::from_be_bytes(raw[4..8].try_into().unwrap());
assert_eq!(u32_conv_back, 8); assert_eq!(u32_conv_back, 8);
test_writing_fails(&u32_pair);
test_cloning_works(&u32_pair);
let u32_praw = ParamsRaw::from(u32_pair);
test_writing(&u32_praw, &u32_pair);
} }
#[test] #[test]
fn basic_signed_test_pair() { fn test_u16_pair_writing_fails() {
let u16_pair = U16Pair(4, 8);
test_writing_fails(&u16_pair);
test_cloning_works(&u16_pair);
let u16_praw = ParamsRaw::from(u16_pair);
test_writing(&u16_praw, &u16_pair);
}
#[test]
fn test_u8_pair_writing_fails() {
let u8_pair = U8Pair(4, 8);
test_writing_fails(&u8_pair);
test_cloning_works(&u8_pair);
let u8_praw = ParamsRaw::from(u8_pair);
test_writing(&u8_praw, &u8_pair);
}
#[test]
fn basic_i8_test() {
let i8_pair = I8Pair(-3, -16); let i8_pair = I8Pair(-3, -16);
assert_eq!(i8_pair.0, -3); assert_eq!(i8_pair.0, -3);
assert_eq!(i8_pair.1, -16); assert_eq!(i8_pair.1, -16);
@ -639,10 +771,31 @@ mod tests {
assert_eq!(i8_conv_back, -3); assert_eq!(i8_conv_back, -3);
i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap()); i8_conv_back = i8::from_be_bytes(raw[1..2].try_into().unwrap());
assert_eq!(i8_conv_back, -16); assert_eq!(i8_conv_back, -16);
test_writing_fails(&i8_pair);
test_cloning_works(&i8_pair);
let i8_praw = ParamsRaw::from(i8_pair);
test_writing(&i8_praw, &i8_pair);
} }
#[test] #[test]
fn basic_signed_test_triplet() { fn test_from_u32_triplet() {
let raw_params = U32Triplet::from((1, 2, 3));
assert_eq!(raw_params.0, 1);
assert_eq!(raw_params.1, 2);
assert_eq!(raw_params.2, 3);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 12);
assert_eq!(
raw_params.to_be_bytes(),
[0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 3]
);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let u32_triplet = ParamsRaw::from(raw_params);
test_writing(&u32_triplet, &raw_params);
}
#[test]
fn test_i8_triplet() {
let i8_triplet = I8Triplet(-3, -16, -126); let i8_triplet = I8Triplet(-3, -16, -126);
assert_eq!(i8_triplet.0, -3); assert_eq!(i8_triplet.0, -3);
assert_eq!(i8_triplet.1, -16); assert_eq!(i8_triplet.1, -16);
@ -654,6 +807,10 @@ mod tests {
assert_eq!(i8_conv_back, -16); assert_eq!(i8_conv_back, -16);
i8_conv_back = i8::from_be_bytes(raw[2..3].try_into().unwrap()); i8_conv_back = i8::from_be_bytes(raw[2..3].try_into().unwrap());
assert_eq!(i8_conv_back, -126); assert_eq!(i8_conv_back, -126);
test_writing_fails(&i8_triplet);
test_cloning_works(&i8_triplet);
let i8_praw = ParamsRaw::from(i8_triplet);
test_writing(&i8_praw, &i8_triplet);
} }
#[test] #[test]
@ -676,4 +833,352 @@ mod tests {
panic!("Params type is not a vector") panic!("Params type is not a vector")
} }
} }
#[test]
fn test_params_written_len_raw() {
let param_raw = ParamsRaw::from((500_u32, 1000_u32));
let param: Params = Params::Heapless(param_raw.into());
assert_eq!(param.written_len(), 8);
let mut buf: [u8; 8] = [0; 8];
param
.write_to_be_bytes(&mut buf)
.expect("writing to buffer failed");
assert_eq!(u32::from_be_bytes(buf[0..4].try_into().unwrap()), 500);
assert_eq!(u32::from_be_bytes(buf[4..8].try_into().unwrap()), 1000);
}
#[test]
fn test_params_written_string() {
let string = "Test String".to_string();
let param = Params::String(string.clone());
assert_eq!(param.written_len(), string.len());
let vec = param.to_vec().unwrap();
let string_conv_back = String::from_utf8(vec).expect("conversion to string failed");
assert_eq!(string_conv_back, string);
}
#[test]
fn test_params_written_vec() {
let vec: Vec<u8> = alloc::vec![1, 2, 3, 4, 5];
let param = Params::Vec(vec.clone());
assert_eq!(param.written_len(), vec.len());
assert_eq!(param.to_vec().expect("writing vec params failed"), vec);
}
#[test]
fn test_u32_single() {
let raw_params = U32::from(20);
assert_eq!(raw_params.0, 20);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 4);
assert_eq!(raw_params.to_be_bytes(), [0, 0, 0, 20]);
let other = U32::from(20);
assert_eq!(raw_params, other);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let u32_praw = ParamsRaw::from(raw_params);
test_writing(&u32_praw, &raw_params);
}
#[test]
fn test_i8_single() {
let neg_number: i8 = -5_i8;
let raw_params = I8::from(neg_number);
assert_eq!(raw_params.0, neg_number);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 1);
assert_eq!(raw_params.to_be_bytes(), neg_number.to_be_bytes());
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let u8_praw = ParamsRaw::from(raw_params);
test_writing(&u8_praw, &raw_params);
}
#[test]
fn test_u8_single() {
let raw_params = U8::from(20);
assert_eq!(raw_params.0, 20);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 1);
assert_eq!(raw_params.to_be_bytes(), [20]);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let u32_praw = ParamsRaw::from(raw_params);
test_writing(&u32_praw, &raw_params);
}
#[test]
fn test_u16_single() {
let raw_params = U16::from(0x123);
assert_eq!(raw_params.0, 0x123);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 2);
assert_eq!(raw_params.to_be_bytes(), [0x01, 0x23]);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let u16_praw = ParamsRaw::from(raw_params);
test_writing(&u16_praw, &raw_params);
}
#[test]
fn test_u16_triplet() {
let raw_params = U16Triplet::from((1, 2, 3));
assert_eq!(raw_params.0, 1);
assert_eq!(raw_params.1, 2);
assert_eq!(raw_params.2, 3);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 6);
assert_eq!(raw_params.to_be_bytes(), [0, 1, 0, 2, 0, 3]);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let u16_praw = ParamsRaw::from(raw_params);
test_writing(&u16_praw, &raw_params);
}
#[test]
fn test_u8_triplet() {
let raw_params = U8Triplet::from((1, 2, 3));
assert_eq!(raw_params.0, 1);
assert_eq!(raw_params.1, 2);
assert_eq!(raw_params.2, 3);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 3);
assert_eq!(raw_params.to_be_bytes(), [1, 2, 3]);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let u8_praw = ParamsRaw::from(raw_params);
test_writing(&u8_praw, &raw_params);
}
#[test]
fn test_i16_single() {
let value = -300_i16;
let raw_params = I16::from(value);
assert_eq!(raw_params.0, value);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 2);
assert_eq!(raw_params.to_be_bytes(), value.to_be_bytes());
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let i16_praw = ParamsRaw::from(raw_params);
test_writing(&i16_praw, &raw_params);
}
#[test]
fn test_i16_pair() {
let raw_params = I16Pair::from((-300, -400));
assert_eq!(raw_params.0, -300);
assert_eq!(raw_params.1, -400);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 4);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let i16_praw = ParamsRaw::from(raw_params);
test_writing(&i16_praw, &raw_params);
}
#[test]
fn test_i16_triplet() {
let raw_params = I16Triplet::from((-300, -400, -350));
assert_eq!(raw_params.0, -300);
assert_eq!(raw_params.1, -400);
assert_eq!(raw_params.2, -350);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 6);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let i16_praw = ParamsRaw::from(raw_params);
test_writing(&i16_praw, &raw_params);
}
#[test]
fn test_i32_single() {
let raw_params = I32::from(-80000);
assert_eq!(raw_params.0, -80000);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 4);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let i32_praw = ParamsRaw::from(raw_params);
test_writing(&i32_praw, &raw_params);
}
#[test]
fn test_i32_pair() {
let raw_params = I32Pair::from((-80000, -200));
assert_eq!(raw_params.0, -80000);
assert_eq!(raw_params.1, -200);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 8);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let i32_praw = ParamsRaw::from(raw_params);
test_writing(&i32_praw, &raw_params);
}
#[test]
fn test_i32_triplet() {
let raw_params = I32Triplet::from((-80000, -5, -200));
assert_eq!(raw_params.0, -80000);
assert_eq!(raw_params.1, -5);
assert_eq!(raw_params.2, -200);
assert_eq!(WritableToBeBytes::written_len(&raw_params), 12);
test_writing_fails(&raw_params);
test_cloning_works(&raw_params);
let i32_praw = ParamsRaw::from(raw_params);
test_writing(&i32_praw, &raw_params);
}
#[test]
fn test_f32_single() {
let param = F32::from(0.1);
assert_eq!(param.0, 0.1);
assert_eq!(WritableToBeBytes::written_len(&param), 4);
let f32_pair_raw = param.to_be_bytes();
let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap());
assert_eq!(f32_0, 0.1);
test_writing_fails(&param);
test_cloning_works(&param);
let praw = ParamsRaw::from(param);
test_writing(&praw, &param);
let p_try_from = F32::try_from(param.to_be_bytes().as_ref()).expect("try_from failed");
assert_eq!(p_try_from, param);
}
#[test]
fn test_f32_pair() {
let param = F32Pair::from((0.1, 0.2));
assert_eq!(param.0, 0.1);
assert_eq!(param.1, 0.2);
assert_eq!(WritableToBeBytes::written_len(&param), 8);
let f32_pair_raw = param.to_be_bytes();
let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap());
assert_eq!(f32_0, 0.1);
let f32_1 = f32::from_be_bytes(f32_pair_raw[4..8].try_into().unwrap());
assert_eq!(f32_1, 0.2);
let other_pair = F32Pair::from((0.1, 0.2));
assert_eq!(param, other_pair);
test_writing_fails(&param);
test_cloning_works(&param);
let praw = ParamsRaw::from(param);
test_writing(&praw, &param);
let p_try_from = F32Pair::try_from(param.to_be_bytes().as_ref()).expect("try_from failed");
assert_eq!(p_try_from, param);
}
#[test]
fn test_f32_triplet() {
let f32 = F32Triplet::from((0.1, -0.1, -5.2));
assert_eq!(f32.0, 0.1);
assert_eq!(f32.1, -0.1);
assert_eq!(f32.2, -5.2);
assert_eq!(WritableToBeBytes::written_len(&f32), 12);
let f32_pair_raw = f32.to_be_bytes();
let f32_0 = f32::from_be_bytes(f32_pair_raw[0..4].try_into().unwrap());
assert_eq!(f32_0, 0.1);
let f32_1 = f32::from_be_bytes(f32_pair_raw[4..8].try_into().unwrap());
assert_eq!(f32_1, -0.1);
let f32_2 = f32::from_be_bytes(f32_pair_raw[8..12].try_into().unwrap());
assert_eq!(f32_2, -5.2);
test_writing_fails(&f32);
test_cloning_works(&f32);
let f32_praw = ParamsRaw::from(f32);
test_writing(&f32_praw, &f32);
let f32_try_from =
F32Triplet::try_from(f32.to_be_bytes().as_ref()).expect("try_from failed");
assert_eq!(f32_try_from, f32);
}
#[test]
fn test_u64_single() {
let u64 = U64::from(0x1010101010);
assert_eq!(u64.0, 0x1010101010);
assert_eq!(WritableToBeBytes::written_len(&u64), 8);
test_writing_fails(&u64);
test_cloning_works(&u64);
let praw = ParamsRaw::from(u64);
test_writing(&praw, &u64);
}
#[test]
fn test_i64_single() {
let i64 = I64::from(-0xfffffffff);
assert_eq!(i64.0, -0xfffffffff);
assert_eq!(WritableToBeBytes::written_len(&i64), 8);
test_writing_fails(&i64);
test_cloning_works(&i64);
let praw = ParamsRaw::from(i64);
test_writing(&praw, &i64);
}
#[test]
fn test_f64_single() {
let value = 823_823_812_832.232_3;
let f64 = F64::from(value);
assert_eq!(f64.0, value);
assert_eq!(WritableToBeBytes::written_len(&f64), 8);
test_writing_fails(&f64);
test_cloning_works(&f64);
let praw = ParamsRaw::from(f64);
test_writing(&praw, &f64);
}
#[test]
fn test_f64_triplet() {
let f64_triplet = F64Triplet::from((0.1, 0.2, 0.3));
assert_eq!(f64_triplet.0, 0.1);
assert_eq!(f64_triplet.1, 0.2);
assert_eq!(f64_triplet.2, 0.3);
assert_eq!(WritableToBeBytes::written_len(&f64_triplet), 24);
let f64_triplet_raw = f64_triplet.to_be_bytes();
let f64_0 = f64::from_be_bytes(f64_triplet_raw[0..8].try_into().unwrap());
assert_eq!(f64_0, 0.1);
let f64_1 = f64::from_be_bytes(f64_triplet_raw[8..16].try_into().unwrap());
assert_eq!(f64_1, 0.2);
let f64_2 = f64::from_be_bytes(f64_triplet_raw[16..24].try_into().unwrap());
assert_eq!(f64_2, 0.3);
test_writing_fails(&f64_triplet);
test_cloning_works(&f64_triplet);
}
#[test]
fn test_u8_ecss_enum() {
let value = 200;
let u8p = EcssEnumU8::new(value);
test_cloning_works(&u8p);
let praw = ParamsEcssEnum::from(u8p);
assert_eq!(praw.written_len(), 1);
let mut buf = [0; 1];
praw.write_to_be_bytes(&mut buf)
.expect("writing to buffer failed");
buf[0] = 200;
}
#[test]
fn test_u16_ecss_enum() {
let value = 60000;
let u16p = EcssEnumU16::new(value);
test_cloning_works(&u16p);
let praw = ParamsEcssEnum::from(u16p);
assert_eq!(praw.written_len(), 2);
let mut buf = [0; 2];
praw.write_to_be_bytes(&mut buf)
.expect("writing to buffer failed");
assert_eq!(u16::from_be_bytes(buf), value);
}
#[test]
fn test_u32_ecss_enum() {
let value = 70000;
let u32p = EcssEnumU32::new(value);
test_cloning_works(&u32p);
let praw = ParamsEcssEnum::from(u32p);
assert_eq!(praw.written_len(), 4);
let mut buf = [0; 4];
praw.write_to_be_bytes(&mut buf)
.expect("writing to buffer failed");
assert_eq!(u32::from_be_bytes(buf), value);
}
#[test]
fn test_u64_ecss_enum() {
let value = 0xffffffffff;
let u64p = EcssEnumU64::new(value);
test_cloning_works(&u64p);
let praw = ParamsEcssEnum::from(u64p);
assert_eq!(praw.written_len(), 8);
let mut buf = [0; 8];
praw.write_to_be_bytes(&mut buf)
.expect("writing to buffer failed");
assert_eq!(u64::from_be_bytes(buf), value);
}
} }

View File

@ -72,7 +72,6 @@
//! } //! }
//! ``` //! ```
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*; pub use alloc_mod::*;
use core::fmt::{Display, Formatter}; use core::fmt::{Display, Formatter};
use delegate::delegate; use delegate::delegate;

View File

@ -24,6 +24,42 @@ pub enum SwitchState {
Faulty = 3, Faulty = 3,
} }
#[derive(Debug, Eq, PartialEq, Copy, Clone)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum SwitchStateBinary {
Off = 0,
On = 1,
}
impl TryFrom<SwitchState> for SwitchStateBinary {
type Error = ();
fn try_from(value: SwitchState) -> Result<Self, Self::Error> {
match value {
SwitchState::Off => Ok(SwitchStateBinary::Off),
SwitchState::On => Ok(SwitchStateBinary::On),
_ => Err(()),
}
}
}
impl<T: Into<u64>> From<T> for SwitchStateBinary {
fn from(value: T) -> Self {
if value.into() == 0 {
return SwitchStateBinary::Off;
}
SwitchStateBinary::On
}
}
impl From<SwitchStateBinary> for SwitchState {
fn from(value: SwitchStateBinary) -> Self {
match value {
SwitchStateBinary::Off => SwitchState::Off,
SwitchStateBinary::On => SwitchState::On,
}
}
}
pub type SwitchId = u16; pub type SwitchId = u16;
/// Generic trait for a device capable of turning on and off switches. /// Generic trait for a device capable of turning on and off switches.

View File

@ -1,6 +1,10 @@
use crate::{action::ActionRequest, TargetId}; use crate::{
action::{ActionId, ActionRequest},
params::Params,
request::{GenericMessage, MessageMetadata, RequestId},
};
use super::verification::{TcStateAccepted, VerificationToken}; use satrs_shared::res_code::ResultU16;
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
@ -8,209 +12,278 @@ pub use std_mod::*;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
#[allow(unused_imports)]
pub use alloc_mod::*; pub use alloc_mod::*;
/// This trait is an abstraction for the routing of PUS service 8 action requests to a dedicated #[derive(Clone, Debug)]
/// recipient using the generic [TargetId]. pub struct ActionRequestWithId {
pub trait PusActionRequestRouter { pub request_id: RequestId,
type Error; pub request: ActionRequest,
fn route( }
&self,
target_id: TargetId, /// A reply to an action request, but tailored to the PUS standard verification process.
hk_request: ActionRequest, #[non_exhaustive]
token: VerificationToken<TcStateAccepted>, #[derive(Clone, PartialEq, Debug)]
) -> Result<(), Self::Error>; pub enum ActionReplyVariant {
Completed,
StepSuccess {
step: u16,
},
CompletionFailed {
error_code: ResultU16,
params: Option<Params>,
},
StepFailed {
error_code: ResultU16,
step: u16,
params: Option<Params>,
},
}
#[derive(Debug, PartialEq, Clone)]
pub struct PusActionReply {
pub action_id: ActionId,
pub variant: ActionReplyVariant,
}
impl PusActionReply {
pub fn new(action_id: ActionId, variant: ActionReplyVariant) -> Self {
Self { action_id, variant }
}
}
pub type GenericActionReplyPus = GenericMessage<PusActionReply>;
impl GenericActionReplyPus {
pub fn new_action_reply(
requestor_info: MessageMetadata,
action_id: ActionId,
reply: ActionReplyVariant,
) -> Self {
Self::new(requestor_info, PusActionReply::new(action_id, reply))
}
} }
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod alloc_mod { pub mod alloc_mod {
use spacepackets::ecss::tc::PusTcReader; use crate::{
action::ActionRequest,
queue::GenericTargetedMessagingError,
request::{
GenericMessage, MessageReceiver, MessageSender, MessageSenderAndReceiver, RequestId,
},
ComponentId,
};
use crate::pus::verification::VerificationReportingProvider; use super::PusActionReply;
use super::*; /// Helper type definition for a mode handler which can handle mode requests.
pub type ActionRequestHandlerInterface<S, R> =
MessageSenderAndReceiver<PusActionReply, ActionRequest, S, R>;
/// This trait is an abstraction for the conversion of a PUS service 8 action telecommand into impl<S: MessageSender<PusActionReply>, R: MessageReceiver<ActionRequest>>
/// an [ActionRequest]. ActionRequestHandlerInterface<S, R>
/// {
/// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard. pub fn try_recv_action_request(
/// The only requirement is that a valid [TargetId] and an [ActionRequest] are returned by the &self,
/// core conversion function. ) -> Result<Option<GenericMessage<ActionRequest>>, GenericTargetedMessagingError> {
/// self.try_recv_message()
/// The user should take care of performing the error handling as well. Some of the following }
/// aspects might be relevant:
/// pub fn send_action_reply(
/// - Checking the validity of the APID, service ID, subservice ID. &self,
/// - Checking the validity of the user data. request_id: RequestId,
/// target_id: ComponentId,
/// A [VerificationReporterWithSender] instance is passed to the user to also allow handling reply: PusActionReply,
/// of the verification process as part of the PUS standard requirements. ) -> Result<(), GenericTargetedMessagingError> {
pub trait PusActionToRequestConverter { self.send_message(request_id, target_id, reply)
type Error; }
fn convert( }
&mut self,
token: VerificationToken<TcStateAccepted>, /// Helper type defintion for a mode handler object which can send mode requests and receive
tc: &PusTcReader, /// mode replies.
time_stamp: &[u8], pub type ActionRequestorInterface<S, R> =
verif_reporter: &impl VerificationReportingProvider, MessageSenderAndReceiver<ActionRequest, PusActionReply, S, R>;
) -> Result<(TargetId, ActionRequest), Self::Error>;
impl<S: MessageSender<ActionRequest>, R: MessageReceiver<PusActionReply>>
ActionRequestorInterface<S, R>
{
pub fn try_recv_action_reply(
&self,
) -> Result<Option<GenericMessage<PusActionReply>>, GenericTargetedMessagingError> {
self.try_recv_message()
}
pub fn send_action_request(
&self,
request_id: RequestId,
target_id: ComponentId,
request: ActionRequest,
) -> Result<(), GenericTargetedMessagingError> {
self.send_message(request_id, target_id, request)
}
} }
} }
#[cfg(feature = "std")] #[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod { pub mod std_mod {
use crate::pus::{ use std::sync::mpsc;
verification::VerificationReportingProvider, EcssTcInMemConverter, GenericRoutingError,
PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceBase, use crate::{
PusServiceHelper, pus::{
verification::{self, TcStateToken},
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap,
},
ComponentId,
}; };
use super::*; use super::*;
/// This is a high-level handler for the PUS service 8 action service. #[derive(Debug, Clone, PartialEq, Eq)]
/// pub struct ActivePusActionRequestStd {
/// It performs the following handling steps: pub action_id: ActionId,
/// common: ActivePusRequestStd,
/// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter]
/// allows to configure the used telecommand memory backend.
/// 2. Convert the TC to a targeted action request using the provided
/// [PusActionToRequestConverter]. The generic error type is constrained to the
/// [PusPacketHandlingError] for the concrete implementation which offers a packet handler.
/// 3. Route the action request using the provided [PusActionRequestRouter].
/// 4. Handle all routing errors using the provided [PusRoutingErrorHandler].
pub struct PusService8ActionHandler<
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusActionToRequestConverter,
RequestRouter: PusActionRequestRouter<Error = RoutingError>,
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
RoutingError = GenericRoutingError,
> {
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
pub request_converter: RequestConverter,
pub request_router: RequestRouter,
pub routing_error_handler: RoutingErrorHandler,
} }
impl< impl ActiveRequestProvider for ActivePusActionRequestStd {
TcInMemConverter: EcssTcInMemConverter, delegate::delegate! {
VerificationReporter: VerificationReportingProvider, to self.common {
RequestConverter: PusActionToRequestConverter<Error = PusPacketHandlingError>, fn target_id(&self) -> ComponentId;
RequestRouter: PusActionRequestRouter<Error = RoutingError>, fn token(&self) -> verification::TcStateToken;
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>, fn set_token(&mut self, token: verification::TcStateToken);
RoutingError: Clone, fn has_timed_out(&self) -> bool;
> fn timeout(&self) -> core::time::Duration;
PusService8ActionHandler< }
TcInMemConverter, }
VerificationReporter, }
RequestConverter,
RequestRouter, impl ActivePusActionRequestStd {
RoutingErrorHandler, pub fn new_from_common_req(action_id: ActionId, common: ActivePusRequestStd) -> Self {
RoutingError, Self { action_id, common }
> }
where
PusPacketHandlingError: From<RoutingError>,
{
pub fn new( pub fn new(
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>, action_id: ActionId,
request_converter: RequestConverter, target_id: ComponentId,
request_router: RequestRouter, token: TcStateToken,
routing_error_handler: RoutingErrorHandler, timeout: core::time::Duration,
) -> Self { ) -> Self {
Self { Self {
service_helper, action_id,
request_converter, common: ActivePusRequestStd::new(target_id, token, timeout),
request_router,
routing_error_handler,
} }
} }
/// Core function to poll the next TC packet and try to handle it.
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.service_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let mut partial_error = None;
let time_stamp =
PusServiceBase::<VerificationReporter>::get_current_cds_short_timestamp(
&mut partial_error,
);
let (target_id, action_request) = self.request_converter.convert(
ecss_tc_and_token.token,
&tc,
&time_stamp,
&self.service_helper.common.verification_handler,
)?;
if let Err(e) =
self.request_router
.route(target_id, action_request, ecss_tc_and_token.token)
{
self.routing_error_handler.handle_error(
target_id,
ecss_tc_and_token.token,
&tc,
e.clone(),
&time_stamp,
&self.service_helper.common.verification_handler,
);
return Err(e.into());
}
Ok(PusPacketHandlerResult::RequestHandled)
}
} }
pub type DefaultActiveActionRequestMap = DefaultActiveRequestMap<ActivePusActionRequestStd>;
pub type ActionRequestHandlerMpsc = ActionRequestHandlerInterface<
mpsc::Sender<GenericMessage<PusActionReply>>,
mpsc::Receiver<GenericMessage<ActionRequest>>,
>;
pub type ActionRequestHandlerMpscBounded = ActionRequestHandlerInterface<
mpsc::SyncSender<GenericMessage<PusActionReply>>,
mpsc::Receiver<GenericMessage<ActionRequest>>,
>;
pub type ActionRequestorMpsc = ActionRequestorInterface<
mpsc::Sender<GenericMessage<ActionRequest>>,
mpsc::Receiver<GenericMessage<PusActionReply>>,
>;
pub type ActionRequestorBoundedMpsc = ActionRequestorInterface<
mpsc::SyncSender<GenericMessage<ActionRequest>>,
mpsc::Receiver<GenericMessage<PusActionReply>>,
>;
/*
pub type ModeRequestorAndHandlerMpsc = ModeInterface<
mpsc::Sender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
mpsc::Sender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
pub type ModeRequestorAndHandlerMpscBounded = ModeInterface<
mpsc::SyncSender<GenericMessage<ModeRequest>>,
mpsc::Receiver<GenericMessage<ModeReply>>,
mpsc::SyncSender<GenericMessage<ModeReply>>,
mpsc::Receiver<GenericMessage<ModeRequest>>,
>;
*/
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
/*
use core::{cell::RefCell, time::Duration};
use std::{sync::mpsc, time::SystemTimeError};
use alloc::{collections::VecDeque, vec::Vec};
use delegate::delegate; use delegate::delegate;
use spacepackets::{ use spacepackets::{
ecss::{ ecss::{
tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader}, tc::{PusTcCreator, PusTcReader},
tm::PusTmReader, tm::PusTmReader,
PusPacket, PusPacket,
}, },
CcsdsPacket, SequenceFlags, SpHeader, time::{cds, TimeWriter},
CcsdsPacket,
}; };
use crate::pus::{ use crate::{
tests::{ action::ActionRequestVariant,
PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler, TestConverter, params::{self, ParamsRaw, WritableToBeBytes},
TestRouter, TestRoutingErrorHandler, APP_DATA_TOO_SHORT, TEST_APID, pus::{
tests::{
PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler,
TestConverter, TestRouter, APP_DATA_TOO_SHORT,
},
verification::{
self,
tests::{SharedVerificationMap, TestVerificationReporter, VerificationStatus},
FailParams, TcStateAccepted, TcStateNone, TcStateStarted,
VerificationReportingProvider,
},
EcssTcInMemConverter, EcssTcInVecConverter, EcssTmtcError, GenericRoutingError,
MpscTcReceiver, PusPacketHandlerResult, PusPacketHandlingError, PusRequestRouter,
PusServiceHelper, PusTcToRequestConverter, TmAsVecSenderWithMpsc,
}, },
verification::{
tests::TestVerificationReporter, FailParams, RequestId, VerificationReportingProvider,
},
EcssTcInVecConverter, GenericRoutingError, PusPacketHandlerResult, PusPacketHandlingError,
}; };
use super::*; use super::*;
impl PusActionRequestRouter for TestRouter<ActionRequest> { impl<Request> PusRequestRouter<Request> for TestRouter<Request> {
type Error = GenericRoutingError; type Error = GenericRoutingError;
fn route( fn route(
&self, &self,
target_id: TargetId, target_id: TargetId,
hk_request: ActionRequest, request: Request,
_token: VerificationToken<TcStateAccepted>, _token: VerificationToken<TcStateAccepted>,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
self.routing_requests self.routing_requests
.borrow_mut() .borrow_mut()
.push_back((target_id, hk_request)); .push_back((target_id, request));
self.check_for_injected_error() self.check_for_injected_error()
} }
fn handle_error(
&self,
target_id: TargetId,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
error: Self::Error,
time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) {
self.routing_errors
.borrow_mut()
.push_back((target_id, error));
}
} }
impl PusActionToRequestConverter for TestConverter<8> { impl PusTcToRequestConverter<ActionRequest> for TestConverter<8> {
type Error = PusPacketHandlingError; type Error = PusPacketHandlingError;
fn convert( fn convert(
&mut self, &mut self,
@ -244,9 +317,9 @@ mod tests {
.expect("start success failure"); .expect("start success failure");
return Ok(( return Ok((
target_id.into(), target_id.into(),
ActionRequest::UnsignedIdAndVecData { ActionRequest {
action_id: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()), action_id: u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()),
data: tc.user_data()[4..].to_vec(), variant: ActionRequestVariant::VecData(tc.user_data()[4..].to_vec()),
}, },
)); ));
} }
@ -256,29 +329,32 @@ mod tests {
} }
} }
struct Pus8HandlerWithVecTester { pub struct PusDynRequestHandler<const SERVICE: u8, Request> {
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>, srv_helper: PusServiceHelper<
handler: PusService8ActionHandler< MpscTcReceiver,
TmAsVecSenderWithMpsc,
EcssTcInVecConverter, EcssTcInVecConverter,
TestVerificationReporter, TestVerificationReporter,
TestConverter<8>,
TestRouter<ActionRequest>,
TestRoutingErrorHandler,
>, >,
request_converter: TestConverter<SERVICE>,
request_router: TestRouter<Request>,
} }
impl Pus8HandlerWithVecTester { struct Pus8RequestTestbenchWithVec {
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>,
handler: PusDynRequestHandler<8, ActionRequest>,
}
impl Pus8RequestTestbenchWithVec {
pub fn new() -> Self { pub fn new() -> Self {
let (common, srv_handler) = let (common, srv_helper) = PusServiceHandlerWithVecCommon::new_with_test_verif_sender();
PusServiceHandlerWithVecCommon::new_with_test_verif_sender();
Self { Self {
common, common,
handler: PusService8ActionHandler::new( handler: PusDynRequestHandler {
srv_handler, srv_helper,
TestConverter::default(), request_converter: TestConverter::default(),
TestRouter::default(), request_router: TestRouter::default(),
TestRoutingErrorHandler::default(), },
),
} }
} }
@ -293,13 +369,13 @@ mod tests {
} }
} }
delegate! { delegate! {
to self.handler.routing_error_handler { to self.handler.request_router {
pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError); pub fn retrieve_next_routing_error(&mut self) -> (TargetId, GenericRoutingError);
} }
} }
} }
impl PusTestHarness for Pus8HandlerWithVecTester { impl PusTestHarness for Pus8RequestTestbenchWithVec {
delegate! { delegate! {
to self.common { to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>; fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
@ -308,78 +384,421 @@ mod tests {
fn check_next_verification_tm( fn check_next_verification_tm(
&self, &self,
subservice: u8, subservice: u8,
expected_request_id: RequestId, expected_request_id: verification::RequestId,
); );
} }
} }
} }
impl SimplePusPacketHandler for Pus8HandlerWithVecTester { impl SimplePusPacketHandler for Pus8RequestTestbenchWithVec {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.handler.srv_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.handler
.srv_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let time_stamp = cds::TimeProvider::from_now_with_u16_days()
.expect("timestamp generation failed")
.to_vec()
.unwrap();
let (target_id, action_request) = self.handler.request_converter.convert(
ecss_tc_and_token.token,
&tc,
&time_stamp,
&self.handler.srv_helper.common.verification_handler,
)?;
if let Err(e) = self.handler.request_router.route(
target_id,
action_request,
ecss_tc_and_token.token,
) {
self.handler.request_router.handle_error(
target_id,
ecss_tc_and_token.token,
&tc,
e.clone(),
&time_stamp,
&self.handler.srv_helper.common.verification_handler,
);
return Err(e.into());
}
Ok(PusPacketHandlerResult::RequestHandled)
}
}
const TIMEOUT_ERROR_CODE: ResultU16 = ResultU16::new(1, 2);
const COMPLETION_ERROR_CODE: ResultU16 = ResultU16::new(2, 0);
const COMPLETION_ERROR_CODE_STEP: ResultU16 = ResultU16::new(2, 1);
#[derive(Default)]
pub struct TestReplyHandlerHook {
pub unexpected_replies: VecDeque<GenericActionReplyPus>,
pub timeouts: RefCell<VecDeque<ActivePusActionRequest>>,
}
impl ReplyHandlerHook<ActivePusActionRequest, ActionReplyPusWithActionId> for TestReplyHandlerHook {
fn handle_unexpected_reply(&mut self, reply: &GenericActionReplyPus) {
self.unexpected_replies.push_back(reply.clone());
}
fn timeout_callback(&self, active_request: &ActivePusActionRequest) {
self.timeouts.borrow_mut().push_back(active_request.clone());
}
fn timeout_error_code(&self) -> ResultU16 {
TIMEOUT_ERROR_CODE
}
}
pub struct Pus8ReplyTestbench {
verif_reporter: TestVerificationReporter,
#[allow(dead_code)]
ecss_tm_receiver: mpsc::Receiver<Vec<u8>>,
handler: PusService8ReplyHandler<
TestVerificationReporter,
DefaultActiveActionRequestMap,
TestReplyHandlerHook,
mpsc::Sender<Vec<u8>>,
>,
}
impl Pus8ReplyTestbench {
pub fn new(normal_ctor: bool) -> Self {
let reply_handler_hook = TestReplyHandlerHook::default();
let shared_verif_map = SharedVerificationMap::default();
let test_verif_reporter = TestVerificationReporter::new(shared_verif_map.clone());
let (ecss_tm_sender, ecss_tm_receiver) = mpsc::channel();
let reply_handler = if normal_ctor {
PusService8ReplyHandler::new_from_now_with_default_map(
test_verif_reporter.clone(),
128,
reply_handler_hook,
ecss_tm_sender,
)
.expect("creating reply handler failed")
} else {
PusService8ReplyHandler::new_from_now(
test_verif_reporter.clone(),
DefaultActiveActionRequestMap::default(),
128,
reply_handler_hook,
ecss_tm_sender,
)
.expect("creating reply handler failed")
};
Self {
verif_reporter: test_verif_reporter,
ecss_tm_receiver,
handler: reply_handler,
}
}
pub fn init_handling_for_request(
&mut self,
request_id: RequestId,
_action_id: ActionId,
) -> VerificationToken<TcStateStarted> {
assert!(!self.handler.request_active(request_id));
// let action_req = ActionRequest::new(action_id, ActionRequestVariant::NoData);
let token = self.add_tc_with_req_id(request_id.into());
let token = self
.verif_reporter
.acceptance_success(token, &[])
.expect("acceptance success failure");
let token = self
.verif_reporter
.start_success(token, &[])
.expect("start success failure");
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
assert!(verif_info.started.expect("request was not started"));
assert!(verif_info.accepted.expect("request was not accepted"));
token
}
pub fn next_unrequested_reply(&self) -> Option<GenericActionReplyPus> {
self.handler.user_hook.unexpected_replies.front().cloned()
}
pub fn assert_request_completion_success(&self, step: Option<u16>, request_id: RequestId) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
self.assert_request_completion_common(request_id, &verif_info, step, true);
}
pub fn assert_request_completion_failure(
&self,
step: Option<u16>,
request_id: RequestId,
fail_enum: ResultU16,
fail_data: &[u8],
) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
self.assert_request_completion_common(request_id, &verif_info, step, false);
assert_eq!(verif_info.fail_enum.unwrap(), fail_enum.raw() as u64);
assert_eq!(verif_info.failure_data.unwrap(), fail_data);
}
pub fn assert_request_completion_common(
&self,
request_id: RequestId,
verif_info: &VerificationStatus,
step: Option<u16>,
completion_success: bool,
) {
if let Some(step) = step {
assert!(verif_info.step_status.is_some());
assert!(verif_info.step_status.unwrap());
assert_eq!(step, verif_info.step);
}
assert_eq!(
verif_info.completed.expect("request is not completed"),
completion_success
);
assert!(!self.handler.request_active(request_id));
}
pub fn assert_request_step_failure(&self, step: u16, request_id: RequestId) {
let verif_info = self
.verif_reporter
.verification_info(&verification::RequestId::from(request_id))
.expect("no verification info found");
assert!(verif_info.step_status.is_some());
assert!(!verif_info.step_status.unwrap());
assert_eq!(step, verif_info.step);
}
pub fn add_routed_request(
&mut self,
request_id: verification::RequestId,
target_id: TargetId,
action_id: ActionId,
token: VerificationToken<TcStateStarted>,
timeout: Duration,
) {
if self.handler.request_active(request_id.into()) {
panic!("request already present");
}
self.handler
.add_routed_action_request(request_id, target_id, action_id, token, timeout);
if !self.handler.request_active(request_id.into()) {
panic!("request should be active now");
}
}
delegate! { delegate! {
to self.handler { to self.handler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>; pub fn request_active(&self, request_id: RequestId) -> bool;
pub fn handle_action_reply(
&mut self,
action_reply_with_ids: GenericMessage<ActionReplyPusWithActionId>,
time_stamp: &[u8]
) -> Result<(), EcssTmtcError>;
pub fn update_time_from_now(&mut self) -> Result<(), SystemTimeError>;
pub fn check_for_timeouts(&mut self, time_stamp: &[u8]) -> Result<(), EcssTmtcError>;
}
to self.verif_reporter {
fn add_tc_with_req_id(&mut self, req_id: verification::RequestId) -> VerificationToken<TcStateNone>;
} }
} }
} }
#[test] #[test]
fn basic_test() { fn test_reply_handler_completion_success() {
let mut action_handler = Pus8HandlerWithVecTester::new(); let mut reply_testbench = Pus8ReplyTestbench::new(true);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sender_id = 0x06;
let sec_header = PusTcSecondaryHeader::new_simple(8, 1); let request_id = 0x02;
let action_id: u32 = 1; let target_id = 0x05;
let action_id_raw = action_id.to_be_bytes(); let action_id = 0x03;
let tc = PusTcCreator::new(&mut sp_header, sec_header, action_id_raw.as_ref(), true); let token = reply_testbench.init_handling_for_request(request_id, action_id);
action_handler.send_tc(&tc); reply_testbench.add_routed_request(
let result = action_handler.handle_one_tc(); request_id.into(),
assert!(result.is_ok()); target_id,
action_handler.check_next_conversion(&tc); action_id,
let (target_id, action_req) = action_handler.retrieve_next_request(); token,
assert_eq!(target_id, TEST_APID.into()); Duration::from_millis(1),
if let ActionRequest::UnsignedIdAndVecData { action_id, data } = action_req { );
assert_eq!(action_id, 1); assert!(reply_testbench.request_active(request_id));
assert_eq!(data, &[]); let action_reply = GenericMessage::new(
} request_id,
sender_id,
ActionReplyPusWithActionId {
action_id,
variant: ActionReplyPus::Completed,
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_completion_success(None, request_id);
} }
#[test] #[test]
fn test_routing_error() { fn test_reply_handler_step_success() {
let mut action_handler = Pus8HandlerWithVecTester::new(); let mut reply_testbench = Pus8ReplyTestbench::new(false);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let request_id = 0x02;
let sec_header = PusTcSecondaryHeader::new_simple(8, 1); let target_id = 0x05;
let action_id: u32 = 1; let action_id = 0x03;
let action_id_raw = action_id.to_be_bytes(); let token = reply_testbench.init_handling_for_request(request_id, action_id);
let tc = PusTcCreator::new(&mut sp_header, sec_header, action_id_raw.as_ref(), true); reply_testbench.add_routed_request(
let error = GenericRoutingError::UnknownTargetId(25); request_id.into(),
action_handler target_id,
.handler action_id,
.request_router token,
.inject_routing_error(error); Duration::from_millis(1),
action_handler.send_tc(&tc); );
let result = action_handler.handle_one_tc(); let action_reply = GenericActionReplyPus::new_action_reply(
assert!(result.is_err()); request_id,
let check_error = |routing_error: GenericRoutingError| { action_id,
if let GenericRoutingError::UnknownTargetId(id) = routing_error { action_id,
assert_eq!(id, 25); ActionReplyPus::StepSuccess { step: 1 },
} else { );
panic!("unexpected error type"); reply_testbench
} .handle_action_reply(action_reply, &[])
}; .expect("reply handling failure");
if let PusPacketHandlingError::RequestRoutingError(routing_error) = result.unwrap_err() { let action_reply = GenericActionReplyPus::new_action_reply(
check_error(routing_error); request_id,
} else { action_id,
panic!("unexpected error type"); action_id,
} ActionReplyPus::Completed,
);
action_handler.check_next_conversion(&tc); reply_testbench
let (target_id, action_req) = action_handler.retrieve_next_request(); .handle_action_reply(action_reply, &[])
assert_eq!(target_id, TEST_APID.into()); .expect("reply handling failure");
if let ActionRequest::UnsignedIdAndVecData { action_id, data } = action_req { reply_testbench.assert_request_completion_success(Some(1), request_id);
assert_eq!(action_id, 1);
assert_eq!(data, &[]);
}
let (target_id, found_error) = action_handler.retrieve_next_error();
assert_eq!(target_id, TEST_APID.into());
check_error(found_error);
} }
#[test]
fn test_reply_handler_completion_failure() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let sender_id = 0x01;
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let params_raw = ParamsRaw::U32(params::U32(5));
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::CompletionFailed {
error_code: COMPLETION_ERROR_CODE,
params: params_raw.into(),
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_completion_failure(
None,
request_id,
COMPLETION_ERROR_CODE,
&params_raw.to_vec().unwrap(),
);
}
#[test]
fn test_reply_handler_step_failure() {
let mut reply_testbench = Pus8ReplyTestbench::new(false);
let sender_id = 0x01;
let request_id = 0x02;
let target_id = 0x05;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::StepFailed {
error_code: COMPLETION_ERROR_CODE_STEP,
step: 2,
params: ParamsRaw::U32(crate::params::U32(5)).into(),
},
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
reply_testbench.assert_request_step_failure(2, request_id);
}
#[test]
fn test_reply_handler_timeout_handling() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let request_id = 0x02;
let target_id = 0x06;
let action_id = 0x03;
let token = reply_testbench.init_handling_for_request(request_id, action_id);
reply_testbench.add_routed_request(
request_id.into(),
target_id,
action_id,
token,
Duration::from_millis(1),
);
let timeout_param = Duration::from_millis(1).as_millis() as u64;
let timeout_param_raw = timeout_param.to_be_bytes();
std::thread::sleep(Duration::from_millis(2));
reply_testbench
.update_time_from_now()
.expect("time update failure");
reply_testbench.check_for_timeouts(&[]).unwrap();
reply_testbench.assert_request_completion_failure(
None,
request_id,
TIMEOUT_ERROR_CODE,
&timeout_param_raw,
);
}
#[test]
fn test_unrequested_reply() {
let mut reply_testbench = Pus8ReplyTestbench::new(true);
let sender_id = 0x01;
let request_id = 0x02;
let action_id = 0x03;
let action_reply = GenericActionReplyPus::new_action_reply(
request_id,
sender_id,
action_id,
ActionReplyPus::Completed,
);
reply_testbench
.handle_action_reply(action_reply, &[])
.expect("reply handling failure");
let reply = reply_testbench.next_unrequested_reply();
assert!(reply.is_some());
let reply = reply.unwrap();
assert_eq!(reply.message.action_id, action_id);
assert_eq!(reply.request_id, request_id);
assert_eq!(reply.message.variant, ActionReplyPus::Completed);
}
*/
} }

View File

@ -2,152 +2,130 @@ use crate::pus::{source_buffer_large_enough, EcssTmtcError};
use spacepackets::ecss::tm::PusTmCreator; use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::tm::PusTmSecondaryHeader; use spacepackets::ecss::tm::PusTmSecondaryHeader;
use spacepackets::ecss::{EcssEnumeration, PusError}; use spacepackets::ecss::{EcssEnumeration, PusError};
use spacepackets::ByteConversionError;
use spacepackets::{SpHeader, MAX_APID}; use spacepackets::{SpHeader, MAX_APID};
use crate::pus::EcssTmSenderCore; use crate::pus::EcssTmSenderCore;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub use alloc_mod::EventReporter; pub use alloc_mod::*;
pub use spacepackets::ecss::event::*; pub use spacepackets::ecss::event::*;
pub struct EventReporterBase { pub struct EventReportCreator {
msg_count: u16,
apid: u16, apid: u16,
pub dest_id: u16, pub dest_id: u16,
} }
impl EventReporterBase { impl EventReportCreator {
pub fn new(apid: u16) -> Option<Self> { pub fn new(apid: u16, dest_id: u16) -> Option<Self> {
if apid > MAX_APID { if apid > MAX_APID {
return None; return None;
} }
Some(Self { Some(Self { dest_id, apid })
msg_count: 0,
dest_id: 0,
apid,
})
} }
pub fn event_info( pub fn event_info<'time, 'src_data>(
&mut self, &self,
buf: &mut [u8], time_stamp: &'time [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&'src_data [u8]>,
) -> Result<(), EcssTmtcError> { src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm( self.generate_and_send_generic_tm(
buf,
Subservice::TmInfoReport, Subservice::TmInfoReport,
sender,
time_stamp, time_stamp,
event_id, event_id,
aux_data, params,
src_data_buf,
) )
} }
pub fn event_low_severity( pub fn event_low_severity<'time, 'src_data>(
&mut self, &self,
buf: &mut [u8], time_stamp: &'time [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&'src_data [u8]>,
) -> Result<(), EcssTmtcError> { src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm( self.generate_and_send_generic_tm(
buf,
Subservice::TmLowSeverityReport, Subservice::TmLowSeverityReport,
sender,
time_stamp, time_stamp,
event_id, event_id,
aux_data, params,
src_data_buf,
) )
} }
pub fn event_medium_severity( pub fn event_medium_severity<'time, 'src_data>(
&mut self, &self,
buf: &mut [u8], time_stamp: &'time [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&'src_data [u8]>,
) -> Result<(), EcssTmtcError> { buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm( self.generate_and_send_generic_tm(
buf,
Subservice::TmMediumSeverityReport, Subservice::TmMediumSeverityReport,
sender,
time_stamp, time_stamp,
event_id, event_id,
aux_data, params,
)
}
pub fn event_high_severity(
&mut self,
buf: &mut [u8],
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>,
) -> Result<(), EcssTmtcError> {
self.generate_and_send_generic_tm(
buf, buf,
Subservice::TmHighSeverityReport,
sender,
time_stamp,
event_id,
aux_data,
) )
} }
fn generate_and_send_generic_tm( pub fn event_high_severity<'time, 'src_data>(
&mut self, &self,
buf: &mut [u8], time_stamp: &'time [u8],
subservice: Subservice,
sender: &mut (impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&'src_data [u8]>,
) -> Result<(), EcssTmtcError> { src_data_buf: &'src_data mut [u8],
let tm = self.generate_generic_event_tm(buf, subservice, time_stamp, event_id, aux_data)?; ) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
sender.send_tm(tm.into())?; self.generate_and_send_generic_tm(
self.msg_count += 1; Subservice::TmHighSeverityReport,
Ok(()) time_stamp,
event_id,
params,
src_data_buf,
)
} }
fn generate_generic_event_tm<'a>( fn generate_and_send_generic_tm<'time, 'src_data>(
&'a self, &self,
buf: &'a mut [u8],
subservice: Subservice, subservice: Subservice,
time_stamp: &'a [u8], time_stamp: &'time [u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&'src_data [u8]>,
) -> Result<PusTmCreator, EcssTmtcError> { src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_generic_event_tm(subservice, time_stamp, event_id, params, src_data_buf)
}
fn generate_generic_event_tm<'time, 'src_data>(
&self,
subservice: Subservice,
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
params: Option<&'src_data [u8]>,
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
let mut src_data_len = event_id.size(); let mut src_data_len = event_id.size();
if let Some(aux_data) = aux_data { if let Some(aux_data) = params {
src_data_len += aux_data.len(); src_data_len += aux_data.len();
} }
source_buffer_large_enough(buf.len(), src_data_len)?; source_buffer_large_enough(src_data_buf.len(), src_data_len)?;
let mut sp_header = SpHeader::tm_unseg(self.apid, 0, 0).unwrap(); let sec_header =
let sec_header = PusTmSecondaryHeader::new( PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, time_stamp);
5,
subservice.into(),
self.msg_count,
self.dest_id,
Some(time_stamp),
);
let mut current_idx = 0; let mut current_idx = 0;
event_id event_id.write_to_be_bytes(&mut src_data_buf[0..event_id.size()])?;
.write_to_be_bytes(&mut buf[0..event_id.size()])
.map_err(PusError::ByteConversion)?;
current_idx += event_id.size(); current_idx += event_id.size();
if let Some(aux_data) = aux_data { if let Some(aux_data) = params {
buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data); src_data_buf[current_idx..current_idx + aux_data.len()].copy_from_slice(aux_data);
current_idx += aux_data.len(); current_idx += aux_data.len();
} }
Ok(PusTmCreator::new( Ok(PusTmCreator::new(
&mut sp_header, SpHeader::new_from_apid(self.apid),
sec_header, sec_header,
&buf[0..current_idx], &src_data_buf[0..current_idx],
true, true,
)) ))
} }
@ -156,84 +134,130 @@ impl EventReporterBase {
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
mod alloc_mod { mod alloc_mod {
use super::*; use super::*;
use crate::ComponentId;
use alloc::vec; use alloc::vec;
use alloc::vec::Vec; use alloc::vec::Vec;
use core::cell::RefCell;
pub struct EventReporter { pub trait EventTmHookProvider {
source_data_buf: Vec<u8>, fn modify_tm(&self, tm: &mut PusTmCreator);
pub reporter: EventReporterBase,
} }
impl EventReporter { #[derive(Default)]
pub fn new(apid: u16, max_event_id_and_aux_data_size: usize) -> Option<Self> { pub struct DummyEventHook {}
let reporter = EventReporterBase::new(apid)?;
impl EventTmHookProvider for DummyEventHook {
fn modify_tm(&self, _tm: &mut PusTmCreator) {}
}
pub struct EventReporter<EventTmHook: EventTmHookProvider = DummyEventHook> {
id: ComponentId,
// Use interior mutability pattern here. This is just an intermediate buffer to the PUS event packet
// generation.
source_data_buf: RefCell<Vec<u8>>,
pub report_creator: EventReportCreator,
pub tm_hook: EventTmHook,
}
impl EventReporter<DummyEventHook> {
pub fn new(
id: ComponentId,
default_apid: u16,
default_dest_id: u16,
max_event_id_and_aux_data_size: usize,
) -> Option<Self> {
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
Some(Self { Some(Self {
source_data_buf: vec![0; max_event_id_and_aux_data_size], id,
reporter, source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
report_creator: reporter,
tm_hook: DummyEventHook::default(),
}) })
} }
}
impl<EventTmHook: EventTmHookProvider> EventReporter<EventTmHook> {
pub fn new_with_hook(
id: ComponentId,
default_apid: u16,
default_dest_id: u16,
max_event_id_and_aux_data_size: usize,
tm_hook: EventTmHook,
) -> Option<Self> {
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
Some(Self {
id,
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
report_creator: reporter,
tm_hook,
})
}
pub fn event_info( pub fn event_info(
&mut self, &self,
sender: &mut (impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
self.reporter.event_info( let mut mut_buf = self.source_data_buf.borrow_mut();
self.source_data_buf.as_mut_slice(), let mut tm_creator = self
sender, .report_creator
time_stamp, .event_info(time_stamp, event_id, params, mut_buf.as_mut_slice())
event_id, .map_err(PusError::ByteConversion)?;
aux_data, self.tm_hook.modify_tm(&mut tm_creator);
) sender.send_tm(self.id, tm_creator.into())?;
Ok(())
} }
pub fn event_low_severity( pub fn event_low_severity(
&mut self, &self,
sender: &mut (impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
self.reporter.event_low_severity( let mut mut_buf = self.source_data_buf.borrow_mut();
self.source_data_buf.as_mut_slice(), let mut tm_creator = self
sender, .report_creator
time_stamp, .event_low_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
event_id, .map_err(PusError::ByteConversion)?;
aux_data, self.tm_hook.modify_tm(&mut tm_creator);
) sender.send_tm(self.id, tm_creator.into())?;
Ok(())
} }
pub fn event_medium_severity( pub fn event_medium_severity(
&mut self, &self,
sender: &mut (impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
self.reporter.event_medium_severity( let mut mut_buf = self.source_data_buf.borrow_mut();
self.source_data_buf.as_mut_slice(), let mut tm_creator = self
sender, .report_creator
time_stamp, .event_medium_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
event_id, .map_err(PusError::ByteConversion)?;
aux_data, self.tm_hook.modify_tm(&mut tm_creator);
) sender.send_tm(self.id, tm_creator.into())?;
Ok(())
} }
pub fn event_high_severity( pub fn event_high_severity(
&mut self, &self,
sender: &mut (impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event_id: impl EcssEnumeration, event_id: impl EcssEnumeration,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<(), EcssTmtcError> { ) -> Result<(), EcssTmtcError> {
self.reporter.event_high_severity( let mut mut_buf = self.source_data_buf.borrow_mut();
self.source_data_buf.as_mut_slice(), let mut tm_creator = self
sender, .report_creator
time_stamp, .event_high_severity(time_stamp, event_id, params, mut_buf.as_mut_slice())
event_id, .map_err(PusError::ByteConversion)?;
aux_data, self.tm_hook.modify_tm(&mut tm_creator);
) sender.send_tm(self.id, tm_creator.into())?;
Ok(())
} }
} }
} }
@ -242,9 +266,10 @@ mod alloc_mod {
mod tests { mod tests {
use super::*; use super::*;
use crate::events::{EventU32, Severity}; use crate::events::{EventU32, Severity};
use crate::pus::test_util::TEST_COMPONENT_ID_0;
use crate::pus::tests::CommonTmInfo; use crate::pus::tests::CommonTmInfo;
use crate::pus::{EcssChannel, PusTmWrapper}; use crate::pus::{ChannelWithId, PusTmVariant};
use crate::ChannelId; use crate::ComponentId;
use spacepackets::ByteConversionError; use spacepackets::ByteConversionError;
use std::cell::RefCell; use std::cell::RefCell;
use std::collections::VecDeque; use std::collections::VecDeque;
@ -258,6 +283,7 @@ mod tests {
#[derive(Debug, Eq, PartialEq, Clone)] #[derive(Debug, Eq, PartialEq, Clone)]
struct TmInfo { struct TmInfo {
pub sender_id: ComponentId,
pub common: CommonTmInfo, pub common: CommonTmInfo,
pub event: EventU32, pub event: EventU32,
pub aux_data: Vec<u8>, pub aux_data: Vec<u8>,
@ -268,19 +294,19 @@ mod tests {
pub service_queue: RefCell<VecDeque<TmInfo>>, pub service_queue: RefCell<VecDeque<TmInfo>>,
} }
impl EcssChannel for TestSender { impl ChannelWithId for TestSender {
fn id(&self) -> ChannelId { fn id(&self) -> ComponentId {
0 0
} }
} }
impl EcssTmSenderCore for TestSender { impl EcssTmSenderCore for TestSender {
fn send_tm(&self, tm: PusTmWrapper) -> Result<(), EcssTmtcError> { fn send_tm(&self, sender_id: ComponentId, tm: PusTmVariant) -> Result<(), EcssTmtcError> {
match tm { match tm {
PusTmWrapper::InStore(_) => { PusTmVariant::InStore(_) => {
panic!("TestSender: unexpected call with address"); panic!("TestSender: unexpected call with address");
} }
PusTmWrapper::Direct(tm) => { PusTmVariant::Direct(tm) => {
assert!(!tm.source_data().is_empty()); assert!(!tm.source_data().is_empty());
let src_data = tm.source_data(); let src_data = tm.source_data();
assert!(src_data.len() >= 4); assert!(src_data.len() >= 4);
@ -291,6 +317,7 @@ mod tests {
aux_data.extend_from_slice(&src_data[4..]); aux_data.extend_from_slice(&src_data[4..]);
} }
self.service_queue.borrow_mut().push_back(TmInfo { self.service_queue.borrow_mut().push_back(TmInfo {
sender_id,
common: CommonTmInfo::new_from_tm(&tm), common: CommonTmInfo::new_from_tm(&tm),
event, event,
aux_data, aux_data,
@ -348,7 +375,12 @@ mod tests {
error_data: Option<&[u8]>, error_data: Option<&[u8]>,
) { ) {
let mut sender = TestSender::default(); let mut sender = TestSender::default();
let reporter = EventReporter::new(EXAMPLE_APID, max_event_aux_data_buf); let reporter = EventReporter::new(
TEST_COMPONENT_ID_0.id(),
EXAMPLE_APID,
0,
max_event_aux_data_buf,
);
assert!(reporter.is_some()); assert!(reporter.is_some());
let mut reporter = reporter.unwrap(); let mut reporter = reporter.unwrap();
let time_stamp_empty: [u8; 7] = [0; 7]; let time_stamp_empty: [u8; 7] = [0; 7];
@ -378,6 +410,7 @@ mod tests {
assert_eq!(tm_info.common.msg_counter, 0); assert_eq!(tm_info.common.msg_counter, 0);
assert_eq!(tm_info.common.apid, EXAMPLE_APID); assert_eq!(tm_info.common.apid, EXAMPLE_APID);
assert_eq!(tm_info.event, event); assert_eq!(tm_info.event, event);
assert_eq!(tm_info.sender_id, TEST_COMPONENT_ID_0.id());
assert_eq!(tm_info.aux_data, error_copy); assert_eq!(tm_info.aux_data, error_copy);
} }
@ -440,7 +473,7 @@ mod tests {
fn insufficient_buffer() { fn insufficient_buffer() {
let mut sender = TestSender::default(); let mut sender = TestSender::default();
for i in 0..3 { for i in 0..3 {
let reporter = EventReporter::new(EXAMPLE_APID, i); let reporter = EventReporter::new(0, EXAMPLE_APID, 0, i);
assert!(reporter.is_some()); assert!(reporter.is_some());
let mut reporter = reporter.unwrap(); let mut reporter = reporter.unwrap();
check_buf_too_small(&mut reporter, &mut sender, i); check_buf_too_small(&mut reporter, &mut sender, i);

View File

@ -2,8 +2,6 @@ use crate::events::{EventU32, GenericEvent, Severity};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use crate::events::{EventU32TypedSev, HasSeverity}; use crate::events::{EventU32TypedSev, HasSeverity};
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use alloc::boxed::Box;
#[cfg(feature = "alloc")]
use core::hash::Hash; use core::hash::Hash;
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
use hashbrown::HashSet; use hashbrown::HashSet;
@ -32,19 +30,19 @@ pub use heapless_mod::*;
/// structure to track disabled events. A more primitive and embedded friendly /// structure to track disabled events. A more primitive and embedded friendly
/// solution could track this information in a static or pre-allocated list which contains /// solution could track this information in a static or pre-allocated list which contains
/// the disabled events. /// the disabled events.
pub trait PusEventMgmtBackendProvider<Provider: GenericEvent> { pub trait PusEventMgmtBackendProvider<Event: GenericEvent> {
type Error; type Error;
fn event_enabled(&self, event: &Provider) -> bool; fn event_enabled(&self, event: &Event) -> bool;
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>; fn enable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>;
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error>; fn disable_event_reporting(&mut self, event: &Event) -> Result<bool, Self::Error>;
} }
#[cfg(feature = "heapless")] #[cfg(feature = "heapless")]
pub mod heapless_mod { pub mod heapless_mod {
use super::*; use super::*;
use crate::events::{GenericEvent, LargestEventRaw}; use crate::events::LargestEventRaw;
use std::marker::PhantomData; use core::marker::PhantomData;
#[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))] #[cfg_attr(doc_cfg, doc(cfg(feature = "heapless")))]
// TODO: After a new version of heapless is released which uses hash32 version 0.3, try using // TODO: After a new version of heapless is released which uses hash32 version 0.3, try using
@ -108,6 +106,10 @@ impl From<EcssTmtcError> for EventManError {
#[cfg(feature = "alloc")] #[cfg(feature = "alloc")]
pub mod alloc_mod { pub mod alloc_mod {
use core::marker::PhantomData;
use crate::events::EventU16;
use super::*; use super::*;
/// Default backend provider which uses a hash set as the event reporting status container /// Default backend provider which uses a hash set as the event reporting status container
@ -115,14 +117,11 @@ pub mod alloc_mod {
/// ///
/// This provider is a good option for host systems or larger embedded systems where /// This provider is a good option for host systems or larger embedded systems where
/// the expected occasional memory allocation performed by the [HashSet] is not an issue. /// the expected occasional memory allocation performed by the [HashSet] is not an issue.
pub struct DefaultPusMgmtBackendProvider<Event: GenericEvent = EventU32> { pub struct DefaultPusEventMgmtBackend<Event: GenericEvent = EventU32> {
disabled: HashSet<Event>, disabled: HashSet<Event>,
} }
/// Safety: All contained field are [Send] as well impl<Event: GenericEvent> Default for DefaultPusEventMgmtBackend<Event> {
unsafe impl<Event: GenericEvent + Send> Send for DefaultPusMgmtBackendProvider<Event> {}
impl<Event: GenericEvent> Default for DefaultPusMgmtBackendProvider<Event> {
fn default() -> Self { fn default() -> Self {
Self { Self {
disabled: HashSet::default(), disabled: HashSet::default(),
@ -130,55 +129,59 @@ pub mod alloc_mod {
} }
} }
impl<Provider: GenericEvent + PartialEq + Eq + Hash + Copy + Clone> impl<EV: GenericEvent + PartialEq + Eq + Hash + Copy + Clone> PusEventMgmtBackendProvider<EV>
PusEventMgmtBackendProvider<Provider> for DefaultPusMgmtBackendProvider<Provider> for DefaultPusEventMgmtBackend<EV>
{ {
type Error = (); type Error = ();
fn event_enabled(&self, event: &Provider) -> bool {
fn event_enabled(&self, event: &EV) -> bool {
!self.disabled.contains(event) !self.disabled.contains(event)
} }
fn enable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> { fn enable_event_reporting(&mut self, event: &EV) -> Result<bool, Self::Error> {
Ok(self.disabled.remove(event)) Ok(self.disabled.remove(event))
} }
fn disable_event_reporting(&mut self, event: &Provider) -> Result<bool, Self::Error> { fn disable_event_reporting(&mut self, event: &EV) -> Result<bool, Self::Error> {
Ok(self.disabled.insert(*event)) Ok(self.disabled.insert(*event))
} }
} }
pub struct PusEventDispatcher<BackendError, Provider: GenericEvent> { pub struct PusEventDispatcher<
B: PusEventMgmtBackendProvider<EV, Error = E>,
EV: GenericEvent,
E,
> {
reporter: EventReporter, reporter: EventReporter,
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>, backend: B,
phantom: PhantomData<(E, EV)>,
} }
/// Safety: All contained fields are send as well. impl<B: PusEventMgmtBackendProvider<Event, Error = E>, Event: GenericEvent, E>
unsafe impl<E: Send, Event: GenericEvent + Send> Send for PusEventDispatcher<E, Event> {} PusEventDispatcher<B, Event, E>
{
impl<BackendError, Provider: GenericEvent> PusEventDispatcher<BackendError, Provider> { pub fn new(reporter: EventReporter, backend: B) -> Self {
pub fn new( Self {
reporter: EventReporter, reporter,
backend: Box<dyn PusEventMgmtBackendProvider<Provider, Error = BackendError>>, backend,
) -> Self { phantom: PhantomData,
Self { reporter, backend } }
} }
}
impl<BackendError, Event: GenericEvent> PusEventDispatcher<BackendError, Event> { pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, E> {
pub fn enable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> {
self.backend.enable_event_reporting(event) self.backend.enable_event_reporting(event)
} }
pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, BackendError> { pub fn disable_tm_for_event(&mut self, event: &Event) -> Result<bool, E> {
self.backend.disable_event_reporting(event) self.backend.disable_event_reporting(event)
} }
pub fn generate_pus_event_tm_generic( pub fn generate_pus_event_tm_generic(
&mut self, &self,
sender: &mut (impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event: Event, event: Event,
aux_data: Option<&[u8]>, params: Option<&[u8]>,
) -> Result<bool, EventManError> { ) -> Result<bool, EventManError> {
if !self.backend.event_enabled(&event) { if !self.backend.event_enabled(&event) {
return Ok(false); return Ok(false);
@ -186,46 +189,58 @@ pub mod alloc_mod {
match event.severity() { match event.severity() {
Severity::INFO => self Severity::INFO => self
.reporter .reporter
.event_info(sender, time_stamp, event, aux_data) .event_info(sender, time_stamp, event, params)
.map(|_| true) .map(|_| true)
.map_err(|e| e.into()), .map_err(|e| e.into()),
Severity::LOW => self Severity::LOW => self
.reporter .reporter
.event_low_severity(sender, time_stamp, event, aux_data) .event_low_severity(sender, time_stamp, event, params)
.map(|_| true) .map(|_| true)
.map_err(|e| e.into()), .map_err(|e| e.into()),
Severity::MEDIUM => self Severity::MEDIUM => self
.reporter .reporter
.event_medium_severity(sender, time_stamp, event, aux_data) .event_medium_severity(sender, time_stamp, event, params)
.map(|_| true) .map(|_| true)
.map_err(|e| e.into()), .map_err(|e| e.into()),
Severity::HIGH => self Severity::HIGH => self
.reporter .reporter
.event_high_severity(sender, time_stamp, event, aux_data) .event_high_severity(sender, time_stamp, event, params)
.map(|_| true) .map(|_| true)
.map_err(|e| e.into()), .map_err(|e| e.into()),
} }
} }
} }
impl<BackendError> PusEventDispatcher<BackendError, EventU32> { impl<EV: GenericEvent + Copy + PartialEq + Eq + Hash>
PusEventDispatcher<DefaultPusEventMgmtBackend<EV>, EV, ()>
{
pub fn new_with_default_backend(reporter: EventReporter) -> Self {
Self {
reporter,
backend: DefaultPusEventMgmtBackend::default(),
phantom: PhantomData,
}
}
}
impl<B: PusEventMgmtBackendProvider<EventU32, Error = E>, E> PusEventDispatcher<B, EventU32, E> {
pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>( pub fn enable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self, &mut self,
event: &EventU32TypedSev<Severity>, event: &EventU32TypedSev<Severity>,
) -> Result<bool, BackendError> { ) -> Result<bool, E> {
self.backend.enable_event_reporting(event.as_ref()) self.backend.enable_event_reporting(event.as_ref())
} }
pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>( pub fn disable_tm_for_event_with_sev<Severity: HasSeverity>(
&mut self, &mut self,
event: &EventU32TypedSev<Severity>, event: &EventU32TypedSev<Severity>,
) -> Result<bool, BackendError> { ) -> Result<bool, E> {
self.backend.disable_event_reporting(event.as_ref()) self.backend.disable_event_reporting(event.as_ref())
} }
pub fn generate_pus_event_tm<Severity: HasSeverity>( pub fn generate_pus_event_tm<Severity: HasSeverity>(
&mut self, &self,
sender: &mut (impl EcssTmSenderCore + ?Sized), sender: &(impl EcssTmSenderCore + ?Sized),
time_stamp: &[u8], time_stamp: &[u8],
event: EventU32TypedSev<Severity>, event: EventU32TypedSev<Severity>,
aux_data: Option<&[u8]>, aux_data: Option<&[u8]>,
@ -233,32 +248,45 @@ pub mod alloc_mod {
self.generate_pus_event_tm_generic(sender, time_stamp, event.into(), aux_data) self.generate_pus_event_tm_generic(sender, time_stamp, event.into(), aux_data)
} }
} }
pub type DefaultPusEventU16Dispatcher<E> =
PusEventDispatcher<DefaultPusEventMgmtBackend<EventU16>, EventU16, E>;
pub type DefaultPusEventU32Dispatcher<E> =
PusEventDispatcher<DefaultPusEventMgmtBackend<EventU32>, EventU32, E>;
} }
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use crate::events::SeverityInfo; use crate::events::SeverityInfo;
use crate::pus::MpscTmAsVecSender; use crate::pus::PusTmAsVec;
use std::sync::mpsc::{channel, TryRecvError}; use crate::request::UniqueApidTargetId;
use std::sync::mpsc::{self, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = const INFO_EVENT: EventU32TypedSev<SeverityInfo> =
EventU32TypedSev::<SeverityInfo>::const_new(1, 0); EventU32TypedSev::<SeverityInfo>::const_new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5); const LOW_SEV_EVENT: EventU32 = EventU32::const_new(Severity::LOW, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7]; const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u16 = 0x02;
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05);
fn create_basic_man() -> PusEventDispatcher<(), EventU32> { fn create_basic_man_1() -> DefaultPusEventU32Dispatcher<()> {
let reporter = EventReporter::new(0x02, 128).expect("Creating event repoter failed"); let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
let backend = DefaultPusMgmtBackendProvider::<EventU32>::default(); .expect("Creating event repoter failed");
PusEventDispatcher::new(reporter, Box::new(backend)) PusEventDispatcher::new_with_default_backend(reporter)
}
fn create_basic_man_2() -> DefaultPusEventU32Dispatcher<()> {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
.expect("Creating event repoter failed");
let backend = DefaultPusEventMgmtBackend::default();
PusEventDispatcher::new(reporter, backend)
} }
#[test] #[test]
fn test_basic() { fn test_basic() {
let mut event_man = create_basic_man(); let event_man = create_basic_man_1();
let (event_tx, event_rx) = channel(); let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>();
let mut sender = MpscTmAsVecSender::new(0, "test_sender", event_tx);
let event_sent = event_man let event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed"); .expect("Sending info event failed");
assert!(event_sent); assert!(event_sent);
@ -268,14 +296,14 @@ mod tests {
#[test] #[test]
fn test_disable_event() { fn test_disable_event() {
let mut event_man = create_basic_man(); let mut event_man = create_basic_man_2();
let (event_tx, event_rx) = channel(); let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>();
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx); // let mut sender = TmAsVecSenderWithMpsc::new(0, "test", event_tx);
let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT); let res = event_man.disable_tm_for_event(&LOW_SEV_EVENT);
assert!(res.is_ok()); assert!(res.is_ok());
assert!(res.unwrap()); assert!(res.unwrap());
let mut event_sent = event_man let mut event_sent = event_man
.generate_pus_event_tm_generic(&mut sender, &EMPTY_STAMP, LOW_SEV_EVENT, None) .generate_pus_event_tm_generic(&event_tx, &EMPTY_STAMP, LOW_SEV_EVENT, None)
.expect("Sending low severity event failed"); .expect("Sending low severity event failed");
assert!(!event_sent); assert!(!event_sent);
let res = event_rx.try_recv(); let res = event_rx.try_recv();
@ -283,7 +311,7 @@ mod tests {
assert!(matches!(res.unwrap_err(), TryRecvError::Empty)); assert!(matches!(res.unwrap_err(), TryRecvError::Empty));
// Check that only the low severity event was disabled // Check that only the low severity event was disabled
event_sent = event_man event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed"); .expect("Sending info event failed");
assert!(event_sent); assert!(event_sent);
event_rx.try_recv().expect("No info event received"); event_rx.try_recv().expect("No info event received");
@ -291,9 +319,8 @@ mod tests {
#[test] #[test]
fn test_reenable_event() { fn test_reenable_event() {
let mut event_man = create_basic_man(); let mut event_man = create_basic_man_1();
let (event_tx, event_rx) = channel(); let (event_tx, event_rx) = mpsc::channel::<PusTmAsVec>();
let mut sender = MpscTmAsVecSender::new(0, "test", event_tx);
let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT); let mut res = event_man.disable_tm_for_event_with_sev(&INFO_EVENT);
assert!(res.is_ok()); assert!(res.is_ok());
assert!(res.unwrap()); assert!(res.unwrap());
@ -301,7 +328,7 @@ mod tests {
assert!(res.is_ok()); assert!(res.is_ok());
assert!(res.unwrap()); assert!(res.unwrap());
let event_sent = event_man let event_sent = event_man
.generate_pus_event_tm(&mut sender, &EMPTY_STAMP, INFO_EVENT, None) .generate_pus_event_tm(&event_tx, &EMPTY_STAMP, INFO_EVENT, None)
.expect("Sending info event failed"); .expect("Sending info event failed");
assert!(event_sent); assert!(event_sent);
event_rx.try_recv().expect("No info event received"); event_rx.try_recv().expect("No info event received");

View File

@ -2,46 +2,63 @@ use crate::events::EventU32;
use crate::pus::event_man::{EventRequest, EventRequestWithToken}; use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::TcStateToken; use crate::pus::verification::TcStateToken;
use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError}; use crate::pus::{PartialPusHandlingError, PusPacketHandlerResult, PusPacketHandlingError};
use crate::queue::GenericSendError;
use spacepackets::ecss::event::Subservice; use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::PusPacket; use spacepackets::ecss::PusPacket;
use std::sync::mpsc::Sender; use std::sync::mpsc::Sender;
use super::verification::VerificationReportingProvider; use super::verification::VerificationReportingProvider;
use super::{EcssTcInMemConverter, PusServiceBase, PusServiceHelper}; use super::{
EcssTcInMemConverter, EcssTcReceiverCore, EcssTmSenderCore, GenericConversionError,
GenericRoutingError, PusServiceHelper,
};
pub struct PusService5EventHandler< pub struct PusEventServiceHandler<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
> { > {
pub service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>, pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
event_request_tx: Sender<EventRequestWithToken>, event_request_tx: Sender<EventRequestWithToken>,
} }
impl< impl<
TcReceiver: EcssTcReceiverCore,
TmSender: EcssTmSenderCore,
TcInMemConverter: EcssTcInMemConverter, TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider, VerificationReporter: VerificationReportingProvider,
> PusService5EventHandler<TcInMemConverter, VerificationReporter> > PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{ {
pub fn new( pub fn new(
service_handler: PusServiceHelper<TcInMemConverter, VerificationReporter>, service_helper: PusServiceHelper<
TcReceiver,
TmSender,
TcInMemConverter,
VerificationReporter,
>,
event_request_tx: Sender<EventRequestWithToken>, event_request_tx: Sender<EventRequestWithToken>,
) -> Self { ) -> Self {
Self { Self {
service_helper: service_handler, service_helper,
event_request_tx, event_request_tx,
} }
} }
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> { pub fn poll_and_handle_next_tc(
&mut self,
time_stamp: &[u8],
) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?; let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() { if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty); return Ok(PusPacketHandlerResult::Empty);
} }
let ecss_tc_and_token = possible_packet.unwrap(); let ecss_tc_and_token = possible_packet.unwrap();
let tc = self self.service_helper
.service_helper .tc_in_mem_converter_mut()
.tc_in_mem_converter .cache(&ecss_tc_and_token.tc_in_memory)?;
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?; let tc = self.service_helper.tc_in_mem_converter().convert()?;
let subservice = tc.subservice(); let subservice = tc.subservice();
let srv = Subservice::try_from(subservice); let srv = Subservice::try_from(subservice);
if srv.is_err() { if srv.is_err() {
@ -50,65 +67,73 @@ impl<
ecss_tc_and_token.token, ecss_tc_and_token.token,
)); ));
} }
let handle_enable_disable_request = |enable: bool, stamp: [u8; 7]| { let handle_enable_disable_request =
if tc.user_data().len() < 4 { |enable: bool| -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
return Err(PusPacketHandlingError::NotEnoughAppData { if tc.user_data().len() < 4 {
expected: 4, return Err(GenericConversionError::NotEnoughAppData {
found: tc.user_data().len(), expected: 4,
}); found: tc.user_data().len(),
} }
let user_data = tc.user_data(); .into());
let event_u32 = EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
let start_token = self
.service_helper
.common
.verification_handler
.start_success(ecss_tc_and_token.token, &stamp)
.map_err(|_| PartialPusHandlingError::Verification);
let partial_error = start_token.clone().err();
let mut token: TcStateToken = ecss_tc_and_token.token.into();
if let Ok(start_token) = start_token {
token = start_token.into();
}
let event_req_with_token = if enable {
EventRequestWithToken {
request: EventRequest::Enable(event_u32),
token,
} }
} else { let user_data = tc.user_data();
EventRequestWithToken { let event_u32 =
request: EventRequest::Disable(event_u32), EventU32::from(u32::from_be_bytes(user_data[0..4].try_into().unwrap()));
token, let start_token = self
.service_helper
.common
.verif_reporter
.start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
time_stamp,
)
.map_err(|_| PartialPusHandlingError::Verification);
let partial_error = start_token.clone().err();
let mut token: TcStateToken = ecss_tc_and_token.token.into();
if let Ok(start_token) = start_token {
token = start_token.into();
} }
let event_req_with_token = if enable {
EventRequestWithToken {
request: EventRequest::Enable(event_u32),
token,
}
} else {
EventRequestWithToken {
request: EventRequest::Disable(event_u32),
token,
}
};
self.event_request_tx
.send(event_req_with_token)
.map_err(|_| {
PusPacketHandlingError::RequestRouting(GenericRoutingError::Send(
GenericSendError::RxDisconnected,
))
})?;
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
}; };
self.event_request_tx
.send(event_req_with_token)
.map_err(|_| {
PusPacketHandlingError::Other("Forwarding event request failed".into())
})?;
if let Some(partial_error) = partial_error {
return Ok(PusPacketHandlerResult::RequestHandledPartialSuccess(
partial_error,
));
}
Ok(PusPacketHandlerResult::RequestHandled)
};
let mut partial_error = None;
let time_stamp = PusServiceBase::<VerificationReporter>::get_current_cds_short_timestamp(
&mut partial_error,
);
match srv.unwrap() { match srv.unwrap() {
Subservice::TmInfoReport Subservice::TmInfoReport
| Subservice::TmLowSeverityReport | Subservice::TmLowSeverityReport
| Subservice::TmMediumSeverityReport | Subservice::TmMediumSeverityReport
| Subservice::TmHighSeverityReport => { | Subservice::TmHighSeverityReport => {
return Err(PusPacketHandlingError::InvalidSubservice(tc.subservice())) return Err(PusPacketHandlingError::RequestConversion(
GenericConversionError::WrongService(tc.subservice()),
))
} }
Subservice::TcEnableEventGeneration => { Subservice::TcEnableEventGeneration => {
handle_enable_disable_request(true, time_stamp)?; handle_enable_disable_request(true)?;
} }
Subservice::TcDisableEventGeneration => { Subservice::TcDisableEventGeneration => {
handle_enable_disable_request(false, time_stamp)?; handle_enable_disable_request(false)?;
} }
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => { Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
return Ok(PusPacketHandlerResult::SubserviceNotImplemented( return Ok(PusPacketHandlerResult::SubserviceNotImplemented(
@ -126,53 +151,70 @@ impl<
mod tests { mod tests {
use delegate::delegate; use delegate::delegate;
use spacepackets::ecss::event::Subservice; use spacepackets::ecss::event::Subservice;
use spacepackets::time::{cds, TimeWriter};
use spacepackets::util::UnsignedEnum; use spacepackets::util::UnsignedEnum;
use spacepackets::{ use spacepackets::{
ecss::{ ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader}, tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader, tm::PusTmReader,
}, },
SequenceFlags, SpHeader, SpHeader,
}; };
use std::sync::mpsc::{self, Sender}; use std::sync::mpsc::{self, Sender};
use crate::pus::event_man::EventRequest; use crate::pus::event_man::EventRequest;
use crate::pus::tests::SimplePusPacketHandler; use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID};
use crate::pus::verification::{RequestId, VerificationReporterWithSender}; use crate::pus::verification::{
RequestId, VerificationReporter, VerificationReportingProvider,
};
use crate::pus::{GenericConversionError, MpscTcReceiver, MpscTmInSharedPoolSenderBounded};
use crate::{ use crate::{
events::EventU32, events::EventU32,
pus::{ pus::{
event_man::EventRequestWithToken, event_man::EventRequestWithToken,
tests::{PusServiceHandlerWithSharedStoreCommon, PusTestHarness, TEST_APID}, tests::PusServiceHandlerWithSharedStoreCommon,
verification::{TcStateAccepted, VerificationToken}, verification::{TcStateAccepted, VerificationToken},
EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError, EcssTcInSharedStoreConverter, PusPacketHandlerResult, PusPacketHandlingError,
}, },
}; };
use super::PusService5EventHandler; use super::PusEventServiceHandler;
const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25); const TEST_EVENT_0: EventU32 = EventU32::const_new(crate::events::Severity::INFO, 5, 25);
struct Pus5HandlerWithStoreTester { struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon, common: PusServiceHandlerWithSharedStoreCommon,
handler: handler: PusEventServiceHandler<
PusService5EventHandler<EcssTcInSharedStoreConverter, VerificationReporterWithSender>, MpscTcReceiver,
MpscTmInSharedPoolSenderBounded,
EcssTcInSharedStoreConverter,
VerificationReporter,
>,
} }
impl Pus5HandlerWithStoreTester { impl Pus5HandlerWithStoreTester {
pub fn new(event_request_tx: Sender<EventRequestWithToken>) -> Self { pub fn new(event_request_tx: Sender<EventRequestWithToken>) -> Self {
let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(); let (common, srv_handler) = PusServiceHandlerWithSharedStoreCommon::new(0);
Self { Self {
common, common,
handler: PusService5EventHandler::new(srv_handler, event_request_tx), handler: PusEventServiceHandler::new(srv_handler, event_request_tx),
} }
} }
} }
impl PusTestHarness for Pus5HandlerWithStoreTester { impl PusTestHarness for Pus5HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
self.handler
.service_helper
.verif_reporter()
.acceptance_success(self.handler.service_helper.tm_sender(), init_token, &[0; 7])
.expect("acceptance success failure")
}
delegate! { delegate! {
to self.common { to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>; fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator);
fn read_next_tm(&mut self) -> PusTmReader<'_>; fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool; fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId); fn check_next_verification_tm(&self, subservice: u8, expected_request_id: RequestId);
@ -182,10 +224,9 @@ mod tests {
} }
impl SimplePusPacketHandler for Pus5HandlerWithStoreTester { impl SimplePusPacketHandler for Pus5HandlerWithStoreTester {
delegate! { fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
to self.handler { let time_stamp = cds::CdsTime::new_with_u16_days(0, 0).to_vec().unwrap();
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>; self.handler.poll_and_handle_next_tc(&time_stamp)
}
} }
} }
@ -195,15 +236,16 @@ mod tests {
expected_event_req: EventRequest, expected_event_req: EventRequest,
event_req_receiver: mpsc::Receiver<EventRequestWithToken>, event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
) { ) {
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8); let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8);
let mut app_data = [0; 4]; let mut app_data = [0; 4];
TEST_EVENT_0 TEST_EVENT_0
.write_to_be_bytes(&mut app_data) .write_to_be_bytes(&mut app_data)
.expect("writing test event failed"); .expect("writing test event failed");
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &app_data, true); let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = test_harness.send_tc(&ping_tc); let token = test_harness.init_verification(&ping_tc);
let request_id = token.req_id(); test_harness.send_tc(&token, &ping_tc);
let request_id = token.request_id();
test_harness.handle_one_tc().unwrap(); test_harness.handle_one_tc().unwrap();
test_harness.check_next_verification_tm(1, request_id); test_harness.check_next_verification_tm(1, request_id);
test_harness.check_next_verification_tm(3, request_id); test_harness.check_next_verification_tm(3, request_id);
@ -256,10 +298,11 @@ mod tests {
fn test_sending_custom_subservice() { fn test_sending_custom_subservice() {
let (event_request_tx, _) = mpsc::channel(); let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx); let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(5, 200); let sec_header = PusTcSecondaryHeader::new_simple(5, 200);
let ping_tc = PusTcCreator::new_no_app_data(&mut sp_header, sec_header, true); let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
test_harness.send_tc(&ping_tc); let token = test_harness.init_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc(); let result = test_harness.handle_one_tc();
assert!(result.is_ok()); assert!(result.is_ok());
let result = result.unwrap(); let result = result.unwrap();
@ -274,15 +317,19 @@ mod tests {
fn test_sending_invalid_app_data() { fn test_sending_invalid_app_data() {
let (event_request_tx, _) = mpsc::channel(); let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx); let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap(); let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = let sec_header =
PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8); PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8);
let ping_tc = PusTcCreator::new(&mut sp_header, sec_header, &[0, 1, 2], true); let ping_tc = PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], true);
test_harness.send_tc(&ping_tc); let token = test_harness.init_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc(); let result = test_harness.handle_one_tc();
assert!(result.is_err()); assert!(result.is_err());
let result = result.unwrap_err(); let result = result.unwrap_err();
if let PusPacketHandlingError::NotEnoughAppData { expected, found } = result { if let PusPacketHandlingError::RequestConversion(
GenericConversionError::NotEnoughAppData { expected, found },
) = result
{
assert_eq!(expected, 4); assert_eq!(expected, 4);
assert_eq!(found, 3); assert_eq!(found, 3);
} else { } else {

View File

@ -1,394 +0,0 @@
pub use spacepackets::ecss::hk::*;
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub use std_mod::*;
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub use alloc_mod::*;
use crate::{hk::HkRequest, TargetId};
use super::verification::{TcStateAccepted, VerificationToken};
/// This trait is an abstraction for the routing of PUS service 3 housekeeping requests to a
/// dedicated recipient using the generic [TargetId].
pub trait PusHkRequestRouter {
type Error;
fn route(
&self,
target_id: TargetId,
hk_request: HkRequest,
token: VerificationToken<TcStateAccepted>,
) -> Result<(), Self::Error>;
}
#[cfg(feature = "alloc")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
pub mod alloc_mod {
use spacepackets::ecss::tc::PusTcReader;
use crate::pus::verification::VerificationReportingProvider;
use super::*;
/// This trait is an abstraction for the conversion of a PUS service 8 action telecommand into
/// a [HkRequest].
///
/// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard.
/// The only requirement is that a valid [TargetId] and a [HkRequest] are returned by the
/// core conversion function.
///
/// The user should take care of performing the error handling as well. Some of the following
/// aspects might be relevant:
///
/// - Checking the validity of the APID, service ID, subservice ID.
/// - Checking the validity of the user data.
///
/// A [VerificationReporterWithSender] instance is passed to the user to also allow handling
/// of the verification process as part of the PUS standard requirements.
pub trait PusHkToRequestConverter {
type Error;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) -> Result<(TargetId, HkRequest), Self::Error>;
}
}
#[cfg(feature = "std")]
#[cfg_attr(doc_cfg, doc(cfg(feature = "std")))]
pub mod std_mod {
use crate::pus::{
verification::VerificationReportingProvider, EcssTcInMemConverter, GenericRoutingError,
PusPacketHandlerResult, PusPacketHandlingError, PusRoutingErrorHandler, PusServiceBase,
PusServiceHelper,
};
use super::*;
/// This is a generic high-level handler for the PUS service 3 housekeeping service.
///
/// It performs the following handling steps:
///
/// 1. Retrieve the next TC packet from the [PusServiceHelper]. The [EcssTcInMemConverter]
/// allows to configure the used telecommand memory backend.
/// 2. Convert the TC to a targeted action request using the provided
/// [PusActionToRequestConverter]. The generic error type is constrained to the
/// [PusPacketHandlerResult] for the concrete implementation which offers a packet handler.
/// 3. Route the action request using the provided [PusActionRequestRouter]. The generic error
/// type is constrained to the [GenericRoutingError] for the concrete implementation.
/// 4. Handle all routing errors using the provided [PusRoutingErrorHandler]. The generic error
/// type is constrained to the [GenericRoutingError] for the concrete implementation.
pub struct PusService3HkHandler<
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusHkToRequestConverter,
RequestRouter: PusHkRequestRouter<Error = RoutingError>,
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
RoutingError = GenericRoutingError,
> {
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
pub request_converter: RequestConverter,
pub request_router: RequestRouter,
pub routing_error_handler: RoutingErrorHandler,
}
impl<
TcInMemConverter: EcssTcInMemConverter,
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusHkToRequestConverter<Error = PusPacketHandlingError>,
RequestRouter: PusHkRequestRouter<Error = RoutingError>,
RoutingErrorHandler: PusRoutingErrorHandler<Error = RoutingError>,
RoutingError: Clone,
>
PusService3HkHandler<
TcInMemConverter,
VerificationReporter,
RequestConverter,
RequestRouter,
RoutingErrorHandler,
RoutingError,
>
where
PusPacketHandlingError: From<RoutingError>,
{
pub fn new(
service_helper: PusServiceHelper<TcInMemConverter, VerificationReporter>,
request_converter: RequestConverter,
request_router: RequestRouter,
routing_error_handler: RoutingErrorHandler,
) -> Self {
Self {
service_helper,
request_converter,
request_router,
routing_error_handler,
}
}
pub fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError> {
let possible_packet = self.service_helper.retrieve_and_accept_next_packet()?;
if possible_packet.is_none() {
return Ok(PusPacketHandlerResult::Empty);
}
let ecss_tc_and_token = possible_packet.unwrap();
let tc = self
.service_helper
.tc_in_mem_converter
.convert_ecss_tc_in_memory_to_reader(&ecss_tc_and_token.tc_in_memory)?;
let mut partial_error = None;
let time_stamp =
PusServiceBase::<VerificationReporter>::get_current_cds_short_timestamp(
&mut partial_error,
);
let (target_id, hk_request) = self.request_converter.convert(
ecss_tc_and_token.token,
&tc,
&time_stamp,
&self.service_helper.common.verification_handler,
)?;
if let Err(e) =
self.request_router
.route(target_id, hk_request, ecss_tc_and_token.token)
{
self.routing_error_handler.handle_error(
target_id,
ecss_tc_and_token.token,
&tc,
e.clone(),
&time_stamp,
&self.service_helper.common.verification_handler,
);
return Err(e.into());
}
Ok(PusPacketHandlerResult::RequestHandled)
}
}
}
#[cfg(test)]
mod tests {
use delegate::delegate;
use spacepackets::ecss::hk::Subservice;
use spacepackets::{
ecss::{
tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader},
tm::PusTmReader,
PusPacket,
},
CcsdsPacket, SequenceFlags, SpHeader,
};
use crate::{
hk::HkRequest,
pus::{
tests::{
PusServiceHandlerWithVecCommon, PusTestHarness, SimplePusPacketHandler,
TestConverter, TestRouter, TestRoutingErrorHandler, APP_DATA_TOO_SHORT, TEST_APID,
},
verification::{
tests::TestVerificationReporter, FailParams, RequestId, TcStateAccepted,
VerificationReportingProvider, VerificationToken,
},
EcssTcInVecConverter, GenericRoutingError, PusPacketHandlerResult,
PusPacketHandlingError,
},
TargetId,
};
use super::{PusHkRequestRouter, PusHkToRequestConverter, PusService3HkHandler};
impl PusHkRequestRouter for TestRouter<HkRequest> {
type Error = GenericRoutingError;
fn route(
&self,
target_id: TargetId,
hk_request: HkRequest,
_token: VerificationToken<TcStateAccepted>,
) -> Result<(), Self::Error> {
self.routing_requests
.borrow_mut()
.push_back((target_id, hk_request));
self.check_for_injected_error()
}
}
impl PusHkToRequestConverter for TestConverter<3> {
type Error = PusPacketHandlingError;
fn convert(
&mut self,
token: VerificationToken<TcStateAccepted>,
tc: &PusTcReader,
time_stamp: &[u8],
verif_reporter: &impl VerificationReportingProvider,
) -> Result<(TargetId, HkRequest), Self::Error> {
self.conversion_request.push_back(tc.raw_data().to_vec());
self.check_service(tc)?;
let target_id = tc.apid();
if tc.user_data().len() < 4 {
verif_reporter
.start_failure(
token,
FailParams::new(
time_stamp,
&APP_DATA_TOO_SHORT,
(tc.user_data().len() as u32).to_be_bytes().as_ref(),
),
)
.expect("start success failure");
return Err(PusPacketHandlingError::NotEnoughAppData {
expected: 4,
found: tc.user_data().len(),
});
}
if tc.subservice() == Subservice::TcGenerateOneShotHk as u8 {
verif_reporter
.start_success(token, time_stamp)
.expect("start success failure");
return Ok((
target_id.into(),
HkRequest::OneShot(u32::from_be_bytes(
tc.user_data()[0..4].try_into().unwrap(),
)),
));
}
Err(PusPacketHandlingError::InvalidAppData(
"unexpected app data".into(),
))
}
}
struct Pus3HandlerWithVecTester {
common: PusServiceHandlerWithVecCommon<TestVerificationReporter>,
handler: PusService3HkHandler<
EcssTcInVecConverter,
TestVerificationReporter,
TestConverter<3>,
TestRouter<HkRequest>,
TestRoutingErrorHandler,
>,
}
impl Pus3HandlerWithVecTester {
pub fn new() -> Self {
let (common, srv_handler) =
PusServiceHandlerWithVecCommon::new_with_test_verif_sender();
Self {
common,
handler: PusService3HkHandler::new(
srv_handler,
TestConverter::default(),
TestRouter::default(),
TestRoutingErrorHandler::default(),
),
}
}
delegate! {
to self.handler.request_converter {
pub fn check_next_conversion(&mut self, tc: &PusTcCreator);
}
}
delegate! {
to self.handler.request_router {
pub fn retrieve_next_request(&mut self) -> (TargetId, HkRequest);
}
}
delegate! {
to self.handler.routing_error_handler {
pub fn retrieve_next_error(&mut self) -> (TargetId, GenericRoutingError);
}
}
}
impl PusTestHarness for Pus3HandlerWithVecTester {
delegate! {
to self.common {
fn send_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
fn check_next_verification_tm(
&self,
subservice: u8,
expected_request_id: RequestId,
);
}
}
}
impl SimplePusPacketHandler for Pus3HandlerWithVecTester {
delegate! {
to self.handler {
fn handle_one_tc(&mut self) -> Result<PusPacketHandlerResult, PusPacketHandlingError>;
}
}
}
#[test]
fn basic_test() {
let mut hk_handler = Pus3HandlerWithVecTester::new();
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(3, Subservice::TcGenerateOneShotHk as u8);
let unique_id: u32 = 1;
let unique_id_raw = unique_id.to_be_bytes();
let tc = PusTcCreator::new(&mut sp_header, sec_header, unique_id_raw.as_ref(), true);
hk_handler.send_tc(&tc);
let result = hk_handler.handle_one_tc();
assert!(result.is_ok());
hk_handler.check_next_conversion(&tc);
let (target_id, hk_request) = hk_handler.retrieve_next_request();
assert_eq!(target_id, TEST_APID.into());
if let HkRequest::OneShot(id) = hk_request {
assert_eq!(id, unique_id);
} else {
panic!("unexpected request");
}
}
#[test]
fn test_routing_error() {
let mut hk_handler = Pus3HandlerWithVecTester::new();
let mut sp_header = SpHeader::tc(TEST_APID, SequenceFlags::Unsegmented, 0, 0).unwrap();
let sec_header = PusTcSecondaryHeader::new_simple(3, Subservice::TcGenerateOneShotHk as u8);
let unique_id: u32 = 1;
let unique_id_raw = unique_id.to_be_bytes();
let tc = PusTcCreator::new(&mut sp_header, sec_header, unique_id_raw.as_ref(), true);
let error = GenericRoutingError::UnknownTargetId(25);
hk_handler
.handler
.request_router
.inject_routing_error(error);
hk_handler.send_tc(&tc);
let result = hk_handler.handle_one_tc();
assert!(result.is_err());
let check_error = |routing_error: GenericRoutingError| {
if let GenericRoutingError::UnknownTargetId(id) = routing_error {
assert_eq!(id, 25);
} else {
panic!("unexpected error type");
}
};
if let PusPacketHandlingError::RequestRoutingError(routing_error) = result.unwrap_err() {
check_error(routing_error);
} else {
panic!("unexpected error type");
}
hk_handler.check_next_conversion(&tc);
let (target_id, hk_req) = hk_handler.retrieve_next_request();
assert_eq!(target_id, TEST_APID.into());
if let HkRequest::OneShot(unique_id) = hk_req {
assert_eq!(unique_id, 1);
}
let (target_id, found_error) = hk_handler.retrieve_next_error();
assert_eq!(target_id, TEST_APID.into());
check_error(found_error);
}
}

Some files were not shown because too many files have changed in this diff Show More