Compare commits

...

61 Commits

Author SHA1 Message Date
Robin Mueller
d7b8a8c1d1 start new CCSDS scheduler 2025-10-20 12:06:18 +02:00
06e713a557 Merge pull request 'Rework Event Management Module' (#251) from rework-event-management-module into main
Reviewed-on: #251
2025-10-20 11:21:44 +02:00
Robin Mueller
778512d50e rework event management module 2025-10-20 11:16:28 +02:00
5d40638964 Merge pull request 'update spacepackets dependency' (#250) from update-dependencies into main
Reviewed-on: #250
2025-09-26 16:01:40 +02:00
Robin Mueller
1b07b845f2 update spacepackets dependency 2025-09-26 16:01:30 +02:00
2e58a311c8 Merge pull request 'CI fix and naming improvement' (#249) from example-ci-fix into main
Reviewed-on: #249
2025-09-06 19:40:17 +02:00
Robin Mueller
62d64e692a CI fix and naming improvement 2025-09-06 19:36:56 +02:00
3784f47b66 Merge pull request 'update heapless dependency' (#248) from update-deps into main
Reviewed-on: #248
2025-09-06 19:32:18 +02:00
Robin Mueller
e1911f1b6e update heapless dependency 2025-09-06 19:32:06 +02:00
2e53ce1871 Merge pull request 'update trait names' (#247) from update-trait-names into main
Reviewed-on: #247
2025-09-06 19:31:46 +02:00
Robin Mueller
a6c460129b update trait names 2025-09-06 19:26:39 +02:00
d5ecefd683 Merge pull request 'small changelog adaption' (#246) from small-changelog-adaption into main
Reviewed-on: #246
2025-08-26 14:29:41 +02:00
2f9f3b8183 small changelog adaption 2025-08-26 14:28:22 +02:00
b28cff85de Merge pull request 'prepare next MIB release' (#245) from satrs-mib-release into main
Reviewed-on: #245
2025-08-26 14:26:47 +02:00
da3de9f22c prepare next MIB release 2025-08-26 14:24:48 +02:00
aefdf0987d Merge pull request 'add matrix chat badge' (#243) from add-chat-badge into main
Reviewed-on: #243
2025-08-14 14:17:56 +02:00
Robin Mueller
7ae9e62d66 add matrix chat badge 2025-08-14 14:17:41 +02:00
fea1a9028b Merge pull request 'edition bump to 2024 ald clippy' (#244) from edition-bump-clippy-fixes into main
Reviewed-on: #244
2025-08-14 14:17:18 +02:00
Robin Mueller
abc145fb2d edition bump to 2024 ald clippy 2025-08-14 14:10:00 +02:00
490635dfcf Merge pull request 'prepare patch releases' (#242) from prep-patch-releases into main
Reviewed-on: #242
2025-07-23 14:09:59 +02:00
de3d22a022 prepare patch releases 2025-07-23 14:05:04 +02:00
cbe211fe8b Merge pull request 'CFDP reference' (#241) from cfdp-ref into main
Reviewed-on: #241
2025-07-22 10:47:08 +02:00
Robin Mueller
e379bc3fd7 CFDP 2025-07-22 10:46:56 +02:00
a61ee85796 Merge pull request 'clippy fixes' (#240) from clippy-fixes into main
Reviewed-on: #240
2025-07-22 10:44:42 +02:00
Robin Mueller
81473b30f9 clippy fixes 2025-07-22 10:44:11 +02:00
22675a73f9 Merge pull request 'changelog' (#239) from satrs-changelog into main
Reviewed-on: #239
2025-07-22 10:36:58 +02:00
Robin Mueller
c68e3d4f75 changelog 2025-07-22 10:35:38 +02:00
3deedfba17 Merge pull request 'bump all dependencies' (#238) from dependency-update into main
Reviewed-on: #238
2025-07-22 10:31:15 +02:00
Robin Mueller
533caea0fe bump all dependencies 2025-07-22 10:29:54 +02:00
848fe6f207 Merge pull request 'spacepackets update' (#237) from update-satrs-deps into main
Reviewed-on: #237
2025-07-18 19:24:55 +02:00
Robin Mueller
cf64fea7d9 bump cobs-rs dependency 2025-07-18 19:24:40 +02:00
4948db3fa5 Merge pull request 'bump cobs-rs dependency' (#236) from update-satrs-deps into main
Reviewed-on: #236
2025-07-18 19:19:58 +02:00
Robin Mueller
1920e4878c bump cobs-rs dependency 2025-07-18 19:19:24 +02:00
f46bc94b04 Merge pull request 'some test fixes' (#235) from test-fix into main
Reviewed-on: #235
2025-05-19 15:49:02 +02:00
fb45da1890 some test fixes 2025-05-19 15:46:57 +02:00
f600ee499a Merge pull request 'add first generator' (#234) from generators into main
Reviewed-on: #234
2025-05-19 15:23:37 +02:00
3f28f60c59 add first generator 2025-05-19 15:21:51 +02:00
44b1f2b037 Merge pull request 'possible fix for clippy warning' (#233) from possible-fix-for-clippy-warning into main
Reviewed-on: #233
2025-05-19 15:15:24 +02:00
4b22958b34 possible fix for clippy warning 2025-05-19 15:14:59 +02:00
a711c6acd9 Merge pull request 'bump spacepackets' (#232) from bump-spacepackets into main
Reviewed-on: #232
2025-05-19 14:19:00 +02:00
99a954a1f5 some tweaks 2025-05-19 14:18:26 +02:00
4a4fd7ac2c use git deps 2025-05-16 19:49:43 +02:00
9bf08849a2 annoying 2025-05-16 19:43:45 +02:00
b8f7fefe26 Merge pull request 'should fix tests' (#231) from test-fix into main
Reviewed-on: #231
2025-05-10 16:31:14 +02:00
a501832698 should fix tests 2025-05-10 16:30:37 +02:00
c7284d3f1c Merge pull request 'minor wording improvements' (#230) from some-wording-improvements into main
Reviewed-on: #230
2025-05-10 16:27:37 +02:00
18263d4568 Merge branch 'main' into some-wording-improvements 2025-05-10 16:27:29 +02:00
95519c1363 minor wording improvements 2025-05-10 16:27:04 +02:00
2ec32717d0 Merge pull request 'use released dependencies' (#229) from use-relesed-dependencies into main
Reviewed-on: #229
2025-05-10 16:24:37 +02:00
bfdd777685 use released dependencies 2025-05-10 16:24:06 +02:00
b54c2b7863 Merge pull request 'spacepackets update' (#227) from spacepackets-update into main
Reviewed-on: #227
2025-05-10 16:19:07 +02:00
19f3355283 Merge pull request 'update SIM suite name' (#228) from update-sim-suite-name into main
Reviewed-on: #228
2025-05-10 16:18:16 +02:00
4aeb28d2f1 update SIM suite name 2025-05-10 16:14:48 +02:00
ddc4544456 spacepackets update, clippy fixes 2025-05-10 16:13:45 +02:00
9ab36c0362 Merge pull request 'update docs' (#226) from update-docs into main
Reviewed-on: #226
2025-05-06 16:21:17 +02:00
b95769c177 Merge branch 'main' into update-docs 2025-05-06 16:21:10 +02:00
bb20533ae1 Merge pull request 'remove token API for verification creator core' (#224) from simplify-verification-reporter-core into main
Reviewed-on: #224
2025-05-05 19:03:34 +02:00
27cacd0f43 remove token API for verification creator core 2025-05-05 19:02:37 +02:00
bd6488e87b update docs 2025-04-01 20:58:18 +02:00
52ec0d44aa Merge pull request 'bump README links' (#223) from update-readme-links into main
Reviewed-on: #223
2025-04-01 15:15:14 +02:00
4fa9f8d685 bump README links 2025-04-01 15:05:28 +02:00
83 changed files with 4483 additions and 2821 deletions

View File

@@ -47,6 +47,8 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- run: cargo fmt --all -- --check
docs:
@@ -55,7 +57,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]'
- run: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc -p satrs --all-features
clippy:
name: Clippy
@@ -63,4 +65,6 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
components: clippy
- run: cargo clippy -- -D warnings

View File

@@ -1,9 +1,10 @@
<p align="center"> <img src="misc/satrs-logo-v2.png" width="40%"> </p>
[![sat-rs website](https://img.shields.io/badge/sat--rs-website-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
[![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/)
[![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://robamu.github.io/sat-rs/book/)
[![Crates.io](https://img.shields.io/crates/v/satrs)](https://crates.io/crates/satrs)
[![docs.rs](https://img.shields.io/docsrs/satrs)](https://docs.rs/satrs)
[![matrix chat](https://img.shields.io/matrix/sat-rs%3Amatrix.org)](https://matrix.to/#/#sat-rs:matrix.org)
sat-rs
=========
@@ -11,7 +12,7 @@ sat-rs
This is the repository of the sat-rs library. Its primary goal is to provide re-usable components
to write on-board software for remote systems like rovers or satellites. It is specifically written
for the special requirements for these systems. You can find an overview of the project and the
link to the [more high-level sat-rs book](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
link to the [more high-level sat-rs book](https://robamu.github.io/sat-rs/book/)
at the [IRS software projects website](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/).
This is early-stage software. Important features are missing. New releases
@@ -38,7 +39,7 @@ This project currently contains following crates:
Example of a simple example on-board software using various sat-rs components which can be run
on a host computer or on any system with a standard runtime like a Raspberry Pi.
* [`satrs-minisim`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-minisim):
Mini-Simulator based on [asynchronix](https://github.com/asynchronics/asynchronix) which
Mini-Simulator based on [nexosim](https://github.com/asynchronics/nexosim) which
simulates some physical devices for the `satrs-example` application device handlers.
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib):
Components to build a mission information base from the on-board software directly.
@@ -61,6 +62,8 @@ Each project has its own `CHANGELOG.md`.
packet protocol implementations. This repository is re-exported in the
[`satrs`](https://egit.irs.uni-stuttgart.de/rust/satrs/src/branch/main/satrs)
crate.
* [`cfdp`](https://egit.irs.uni-stuttgart.de/rust/cfdp): CCSDS File Delivery Protocol
(CFDP) high-level library components.
# Flight Heritage

View File

@@ -0,0 +1,8 @@
[package]
name = "satrs-gen"
version = "0.1.0"
edition = "2024"
[dependencies]
toml = "0.8"
heck = "0.5"

View File

@@ -0,0 +1,34 @@
[apid]
Sched = 1
GenericPus = 2
Acs = 3
Cfdp = 4
Tmtc = 5
Eps = 6
[ids]
[ids.Eps]
Pcdu = 0
Subsystem = 1
[ids.Tmtc]
UdpServer = 0
TcpServer = 1
[ids.GenericPus]
PusEventManagement = 0
PusRouting = 1
PusTest = 2
PusAction = 3
PusMode = 4
PusHk = 5
[ids.Sched]
PusSched = 0
[ids.Acs]
Subsystem = 1
Assembly = 2
Mgm0 = 3
Mgm1 = 4

View File

@@ -0,0 +1,91 @@
use heck::{ToShoutySnakeCase, ToSnakeCase};
use std::{
collections::BTreeMap,
fs::{self, File},
io::{self, Write},
};
use toml::{Value, map::Map};
fn main() -> io::Result<()> {
// Read the configuration file
let config_str = fs::read_to_string("components.toml").expect("Unable to read file");
let config: Value = toml::from_str(&config_str).expect("Unable to parse TOML");
let mut output = File::create("../satrs-example/src/ids.rs")?;
generate_rust_code(&config, &mut output);
Ok(())
}
fn sort_enum_table(table_map: &Map<String, Value>) -> BTreeMap<u64, &str> {
// Collect entries into a BTreeMap to sort them by key
let mut sorted_entries: BTreeMap<u64, &str> = BTreeMap::new();
for (key, value) in table_map {
if let Some(value) = value.as_integer() {
if !(0..=0x7FF).contains(&value) {
panic!("Invalid APID value: {}", value);
}
sorted_entries.insert(value as u64, key);
}
}
sorted_entries
}
fn generate_rust_code(config: &Value, writer: &mut impl Write) {
writeln!(
writer,
"//! This is an auto-generated configuration module."
)
.unwrap();
writeln!(writer, "use satrs::request::UniqueApidTargetId;").unwrap();
writeln!(writer).unwrap();
// Generate the main module
writeln!(
writer,
"#[derive(Debug, Copy, Clone, PartialEq, Eq, strum::EnumIter)]"
)
.unwrap();
writeln!(writer, "pub enum Apid {{").unwrap();
// Generate constants for the main module
if let Some(apid_table) = config.get("apid").and_then(Value::as_table) {
let sorted_entries = sort_enum_table(apid_table);
// Write the sorted entries to the writer
for (value, key) in sorted_entries {
writeln!(writer, " {} = {},", key, value).unwrap();
}
}
writeln!(writer, "}}").unwrap();
// Generate ID tables.
if let Some(id_tables) = config.get("ids").and_then(Value::as_table) {
for (mod_name, table) in id_tables {
let mod_name_as_snake = mod_name.to_snake_case();
writeln!(writer).unwrap();
writeln!(writer, "pub mod {} {{", mod_name_as_snake).unwrap();
let sorted_entries = sort_enum_table(table.as_table().unwrap());
writeln!(writer, " #[derive(Debug, Copy, Clone, PartialEq, Eq)]").unwrap();
writeln!(writer, " pub enum Id {{").unwrap();
// Write the sorted entries to the writer
for (value, key) in &sorted_entries {
writeln!(writer, " {} = {},", key, value).unwrap();
}
writeln!(writer, " }}").unwrap();
writeln!(writer).unwrap();
for (_value, key) in sorted_entries {
let key_shouting = key.to_shouty_snake_case();
writeln!(
writer,
" pub const {}: super::UniqueApidTargetId = super::UniqueApidTargetId::new(super::Apid::{} as u16, Id::{} as u32);",
key_shouting, mod_name, key
).unwrap();
}
writeln!(writer, "}}").unwrap();
}
}
}

23
justfile Normal file
View File

@@ -0,0 +1,23 @@
all: check embedded test fmt clippy docs
check:
cargo check
cargo check -p satrs-example --no-default-features
test:
cargo nextest run --all-features
cargo test --doc --all-features
embedded:
cargo check -p satrs --target=thumbv7em-none-eabihf --no-default-features
fmt:
cargo fmt --all -- --check
clippy:
cargo clippy -- -D warnings
docs-satrs:
RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc -p satrs --all-features
docs: docs-satrs

View File

@@ -3,20 +3,20 @@
Software for space systems oftentimes has different requirements than the software for host
systems or servers. Currently, most space systems are considered embedded systems.
For these systems, the computation power and the available heap are the most important resources
which are constrained. This might make completeley heap based memory management schemes which
For these systems, the computation power and the available heap are important resources
which are also constrained. This might make completeley heap based memory management schemes which
are oftentimes used on host and server based systems unfeasable. Still, completely forbidding
heap allocations might make software development unnecessarilly difficult, especially in a
time where the OBSW might be running on Linux based systems with hundreds of MBs of RAM.
A useful pattern used commonly in space systems is to limit heap allocations to program
A useful pattern commonly used in space systems is to limit heap allocations to program
initialization time and avoid frequent run-time allocations. This prevents issues like
running out of memory (something even Rust can not protect from) or heap fragmentation on systems
without a MMU.
# Using pre-allocated pool structures
A huge candidate for heap allocations is the TMTC and handling. TC, TMs and IPC data are all
A candidate for heap allocations is the TMTC and handling. TC, TMs and IPC data are all
candidates where the data size might vary greatly. The regular solution for host systems
might be to send around this data as a `Vec<u8>` until it is dropped. `sat-rs` provides
another solution to avoid run-time allocations by offering pre-allocated static

View File

@@ -18,9 +18,11 @@ csv = "1"
num_enum = "0.7"
thiserror = "2"
lazy_static = "1"
strum = { version = "0.26", features = ["derive"] }
strum = { version = "0.27", features = ["derive"] }
derive-new = "0.7"
cfg-if = "1"
arbitrary-int = "2"
bitbybit = "1.4"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
@@ -36,8 +38,8 @@ version = "0.1.1"
path = "../satrs-mib"
[features]
heap_tmtc = []
default = ["heap_tmtc"]
heap_tmtc = []
[dev-dependencies]
env_logger = "0.11"

View File

@@ -2,7 +2,7 @@ use derive_new::new;
use satrs::hk::{HkRequest, HkRequestVariant};
use satrs::mode_tree::{ModeChild, ModeNode};
use satrs::power::{PowerSwitchInfo, PowerSwitcherCommandSender};
use satrs_example::config::pus::PUS_MODE_SERVICE;
use satrs_example::ids::generic_pus::PUS_MODE;
use satrs_example::{DeviceMode, TimestampHelper};
use satrs_minisim::acs::lis3mdl::{
MgmLis3MdlReply, MgmLis3RawValues, FIELD_LSB_PER_GAUSS_4_SENS, GAUSS_TO_MICROTESLA_FACTOR,
@@ -83,7 +83,7 @@ impl SpiInterface for SpiSimInterface {
.sim_request_tx
.send(SimRequest::new_with_epoch_time(mgm_sensor_request))
{
log::error!("failed to send MGM LIS3 request: {}", e);
log::error!("failed to send MGM LIS3 request: {e}");
}
match self.sim_reply_rx.recv_timeout(Duration::from_millis(50)) {
Ok(sim_reply) => {
@@ -97,7 +97,7 @@ impl SpiInterface for SpiSimInterface {
.copy_from_slice(&sim_reply_lis3.raw.z.to_le_bytes());
}
Err(e) => {
log::warn!("MGM LIS3 SIM reply timeout: {}", e);
log::warn!("MGM LIS3 SIM reply timeout: {e}");
}
}
Ok(())
@@ -417,7 +417,7 @@ impl<
if requestor.sender_id() == NO_SENDER {
return Ok(());
}
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {:x}",
requestor.sender_id()
@@ -434,7 +434,7 @@ impl<
requestor: MessageMetadata,
reply: ModeReply,
) -> Result<(), Self::Error> {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
@@ -492,7 +492,7 @@ mod tests {
tmtc::PacketAsVec,
ComponentId,
};
use satrs_example::config::{acs::MGM_ASSEMBLY, components::Apid};
use satrs_example::ids::{acs::ASSEMBLY, Apid};
use satrs_minisim::acs::lis3mdl::MgmLis3RawValues;
use crate::{eps::TestSwitchHelper, pus::hk::HkReply, requests::CompositeRequest};
@@ -538,7 +538,7 @@ mod tests {
impl ModeNode for MgmAssemblyMock {
fn id(&self) -> satrs::ComponentId {
PUS_MODE_SERVICE.into()
PUS_MODE.into()
}
}
@@ -557,7 +557,7 @@ mod tests {
impl ModeNode for PusMock {
fn id(&self) -> satrs::ComponentId {
PUS_MODE_SERVICE.into()
PUS_MODE.into()
}
}
@@ -574,7 +574,7 @@ mod tests {
let (request_tx, request_rx) = mpsc::sync_channel(5);
let (reply_tx_to_pus, reply_rx_to_pus) = mpsc::sync_channel(5);
let (reply_tx_to_parent, reply_rx_to_parent) = mpsc::sync_channel(5);
let id = UniqueApidTargetId::new(Apid::Acs as u16, 1);
let id = UniqueApidTargetId::new(Apid::Acs.raw_value(), 1);
let mode_node = ModeRequestHandlerMpscBounded::new(id.into(), request_rx);
let (composite_request_tx, composite_request_rx) = mpsc::channel();
let (hk_reply_tx, hk_reply_rx) = mpsc::sync_channel(10);
@@ -592,8 +592,8 @@ mod tests {
TestSpiInterface::default(),
shared_mgm_set,
);
handler.add_mode_parent(PUS_MODE_SERVICE.into(), reply_tx_to_pus);
handler.add_mode_parent(MGM_ASSEMBLY.into(), reply_tx_to_parent);
handler.add_mode_parent(PUS_MODE.into(), reply_tx_to_pus);
handler.add_mode_parent(ASSEMBLY.into(), reply_tx_to_parent);
Self {
mode_request_tx: request_tx,
mode_reply_rx_to_pus: reply_rx_to_pus,
@@ -631,7 +631,7 @@ mod tests {
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,
@@ -692,7 +692,7 @@ mod tests {
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,

View File

@@ -1,10 +1,9 @@
use arbitrary_int::u11;
use satrs::pus::verification::RequestId;
use satrs::spacepackets::ecss::tc::PusTcCreator;
use satrs::spacepackets::ecss::tm::PusTmReader;
use satrs::{
spacepackets::ecss::{PusPacket, WritablePusPacket},
spacepackets::SpHeader,
};
use satrs::spacepackets::ecss::CreatorConfig;
use satrs::spacepackets::SpHeader;
use satrs_example::config::{OBSW_SERVER_ADDR, SERVER_PORT};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::Duration;
@@ -12,7 +11,13 @@ use std::time::Duration;
fn main() {
let mut buf = [0; 32];
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let pus_tc = PusTcCreator::new_simple(SpHeader::new_from_apid(0x02), 17, 1, &[], true);
let pus_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(u11::new(0x02)),
17,
1,
&[],
CreatorConfig::default(),
);
let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed");
let tc_req_id = RequestId::new(&pus_tc);
println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}");
@@ -29,7 +34,7 @@ fn main() {
let res = client.recv(&mut buf);
match res {
Ok(_len) => {
let (pus_tm, size) = PusTmReader::new(&buf, 7).expect("Parsing PUS TM failed");
let pus_tm = PusTmReader::new(&buf, 7).expect("Parsing PUS TM failed");
if pus_tm.service() == 17 && pus_tm.subservice() == 2 {
println!("Received PUS Ping Reply TM[17,2]")
} else if pus_tm.service() == 1 {

View File

@@ -1,3 +1,4 @@
use arbitrary_int::u11;
use lazy_static::lazy_static;
use satrs::{
res_code::ResultU16,
@@ -10,7 +11,7 @@ use strum::IntoEnumIterator;
use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::{
events::{EventU32TypedSev, SeverityInfo},
events_legacy::{EventU32TypedSev, SeverityInfo},
pool::{StaticMemoryPool, StaticPoolConfig},
};
@@ -43,14 +44,14 @@ pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<Severi
lazy_static! {
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, id as u16));
for id in crate::ids::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, u11::new(id as u16)));
}
set
};
pub static ref APID_VALIDATOR: HashSet<u16> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
for id in crate::ids::Apid::iter() {
set.insert(id as u16);
}
set
@@ -122,92 +123,11 @@ pub mod mode_err {
}
pub mod components {
use satrs::{request::UniqueApidTargetId, ComponentId};
use strum::EnumIter;
use satrs::ComponentId;
#[derive(Copy, Clone, PartialEq, Eq, EnumIter)]
pub enum Apid {
Sched = 1,
GenericPus = 2,
Acs = 3,
Cfdp = 4,
Tmtc = 5,
Eps = 6,
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum EpsId {
Pcdu = 0,
Subsystem = 1,
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum TmtcId {
UdpServer = 0,
TcpServer = 1,
}
pub const EPS_SUBSYSTEM: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Eps as u16, EpsId::Subsystem as u32);
pub const PCDU_HANDLER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Eps as u16, EpsId::Pcdu as u32);
pub const UDP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::UdpServer as u32);
pub const TCP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::TcpServer as u32);
pub const NO_SENDER: ComponentId = ComponentId::MAX;
}
pub mod pus {
use super::components::Apid;
use satrs::request::UniqueApidTargetId;
// Component IDs for components with the PUS APID.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum PusId {
PusEventManagement = 0,
PusRouting = 1,
PusTest = 2,
PusAction = 3,
PusMode = 4,
PusHk = 5,
}
pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32);
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, 0);
pub const PUS_ROUTING_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32);
pub const PUS_TEST_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32);
pub const PUS_MODE_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32);
pub const PUS_HK_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32);
pub const PUS_SCHED_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Sched as u16, 0);
}
pub mod acs {
use super::components::Apid;
use satrs::request::UniqueApidTargetId;
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum AcsId {
Subsystem = 1,
Assembly = 2,
Mgm0 = 3,
Mgm1 = 4,
}
pub const MGM_ASSEMBLY: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Assembly as u32);
pub const MGM_HANDLER_0: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
pub const MGM_HANDLER_1: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
}
pub mod pool {
use super::*;
pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) {

View File

@@ -20,10 +20,8 @@ use satrs::{
spacepackets::ByteConversionError,
};
use satrs_example::{
config::{
components::{NO_SENDER, PCDU_HANDLER},
pus::PUS_MODE_SERVICE,
},
config::components::NO_SENDER,
ids::{eps::PCDU, generic_pus::PUS_MODE},
DeviceMode, TimestampHelper,
};
use satrs_minisim::{
@@ -403,7 +401,7 @@ impl<ComInterface: SerialInterface> PcduHandler<ComInterface> {
}
}
}) {
log::warn!("receiving PCDU replies failed: {:?}", e);
log::warn!("receiving PCDU replies failed: {e:?}");
}
}
}
@@ -452,7 +450,7 @@ impl<ComInterface: SerialInterface> ModeRequestHandler for PcduHandler<ComInterf
if requestor.sender_id() == NO_SENDER {
return Ok(());
}
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
@@ -469,7 +467,7 @@ impl<ComInterface: SerialInterface> ModeRequestHandler for PcduHandler<ComInterf
requestor: MessageMetadata,
reply: ModeReply,
) -> Result<(), Self::Error> {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
@@ -492,7 +490,7 @@ impl<ComInterface: SerialInterface> ModeRequestHandler for PcduHandler<ComInterf
impl<ComInterface: SerialInterface> ModeNode for PcduHandler<ComInterface> {
fn id(&self) -> satrs::ComponentId {
PCDU_HANDLER.into()
PCDU.into()
}
}
@@ -511,10 +509,7 @@ mod tests {
use satrs::{
mode::ModeRequest, power::SwitchStateBinary, request::GenericMessage, tmtc::PacketAsVec,
};
use satrs_example::config::{
acs::MGM_HANDLER_0,
components::{Apid, EPS_SUBSYSTEM, PCDU_HANDLER},
};
use satrs_example::ids::{self, Apid};
use satrs_minisim::eps::SwitchMapBinary;
use super::*;
@@ -570,15 +565,14 @@ mod tests {
let (mode_request_tx, mode_request_rx) = mpsc::sync_channel(5);
let (mode_reply_tx_to_pus, mode_reply_rx_to_pus) = mpsc::sync_channel(5);
let (mode_reply_tx_to_parent, mode_reply_rx_to_parent) = mpsc::sync_channel(5);
let mode_node =
ModeRequestHandlerMpscBounded::new(PCDU_HANDLER.into(), mode_request_rx);
let mode_node = ModeRequestHandlerMpscBounded::new(PCDU.into(), mode_request_rx);
let (composite_request_tx, composite_request_rx) = mpsc::channel();
let (hk_reply_tx, hk_reply_rx) = mpsc::sync_channel(10);
let (tm_tx, tm_rx) = mpsc::sync_channel::<PacketAsVec>(5);
let (switch_request_tx, switch_reqest_rx) = mpsc::channel();
let shared_switch_map = Arc::new(Mutex::new(SwitchSet::default()));
let mut handler = PcduHandler::new(
UniqueApidTargetId::new(Apid::Eps as u16, 0),
UniqueApidTargetId::new(Apid::Eps.raw_value(), 0),
"TEST_PCDU",
mode_node,
composite_request_rx,
@@ -588,8 +582,8 @@ mod tests {
SerialInterfaceTest::default(),
shared_switch_map,
);
handler.add_mode_parent(EPS_SUBSYSTEM.into(), mode_reply_tx_to_parent);
handler.add_mode_parent(PUS_MODE_SERVICE.into(), mode_reply_tx_to_pus);
handler.add_mode_parent(ids::eps::SUBSYSTEM.into(), mode_reply_tx_to_parent);
handler.add_mode_parent(PUS_MODE.into(), mode_reply_tx_to_pus);
Self {
mode_request_tx,
mode_reply_rx_to_pus,
@@ -684,7 +678,7 @@ mod tests {
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,
@@ -719,7 +713,7 @@ mod tests {
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,
@@ -729,7 +723,7 @@ mod tests {
testbench
.switch_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, MGM_HANDLER_0.id()),
MessageMetadata::new(0, ids::acs::MGM0.id()),
SwitchRequest::new(0, SwitchStateBinary::On),
))
.expect("failed to send switch request");

View File

@@ -1,13 +1,15 @@
use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter;
use satrs::event_man::{EventMessageU32, EventRoutingError};
use satrs::pus::event::EventTmHookProvider;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u11;
use satrs::event_man_legacy::{EventMessageU32, EventRoutingError};
use satrs::pus::event::EventTmHook;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::EcssTmSender;
use satrs::request::UniqueApidTargetId;
use satrs::{
event_man::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
event_man_legacy::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
pus::{
event_man::{
DefaultPusEventU32TmCreator, EventReporter, EventRequest, EventRequestWithToken,
@@ -16,17 +18,17 @@ use satrs::{
},
spacepackets::time::cds::CdsTime,
};
use satrs_example::config::pus::PUS_EVENT_MANAGEMENT;
use satrs_example::ids::generic_pus::PUS_EVENT_MANAGEMENT;
use crate::update_time;
// This helper sets the APID of the event sender for the PUS telemetry.
#[derive(Default)]
pub struct EventApidSetter {
pub next_apid: u16,
pub next_apid: u11,
}
impl EventTmHookProvider for EventApidSetter {
impl EventTmHook for EventApidSetter {
fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) {
tm.set_apid(self.next_apid);
}
@@ -59,12 +61,11 @@ impl<TmSender: EcssTmSender> PusEventHandler<TmSender> {
// telemetry for each event.
let event_reporter = EventReporter::new_with_hook(
PUS_EVENT_MANAGEMENT.raw(),
0,
u11::ZERO,
0,
128,
EventApidSetter::default(),
)
.unwrap();
);
let pus_event_dispatcher =
DefaultPusEventU32TmCreator::new_with_default_backend(event_reporter);
let pus_event_man_send_provider = EventU32SenderMpscBounded::new(
@@ -218,19 +219,16 @@ impl<TmSender: EcssTmSender> EventHandler<TmSender> {
#[cfg(test)]
mod tests {
use satrs::{
events::EventU32,
pus::verification::VerificationReporterCfg,
spacepackets::{
ecss::{tm::PusTmReader, PusPacket},
CcsdsPacket,
},
events_legacy::EventU32,
pus::verification::VerificationReporterConfig,
spacepackets::ecss::{tm::PusTmReader, PusPacket},
tmtc::PacketAsVec,
};
use super::*;
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(1, 2);
const TEST_EVENT: EventU32 = EventU32::new(satrs::events::Severity::Info, 1, 1);
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(u11::new(1), 2);
const TEST_EVENT: EventU32 = EventU32::new(satrs::events_legacy::Severity::Info, 1, 1);
pub struct EventManagementTestbench {
pub event_tx: mpsc::SyncSender<EventMessageU32>,
@@ -244,7 +242,7 @@ mod tests {
let (event_tx, event_rx) = mpsc::sync_channel(10);
let (_event_req_tx, event_req_rx) = mpsc::sync_channel(10);
let (tm_sender, tm_receiver) = mpsc::channel();
let verif_reporter_cfg = VerificationReporterCfg::new(0x05, 2, 2, 128).unwrap();
let verif_reporter_cfg = VerificationReporterConfig::new(u11::new(0x05), 2, 2, 128);
let verif_reporter =
VerificationReporter::new(PUS_EVENT_MANAGEMENT.id(), &verif_reporter_cfg);
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
@@ -270,7 +268,7 @@ mod tests {
.event_tx
.send(EventMessageU32::new(
TEST_CREATOR_ID.id(),
EventU32::new(satrs::events::Severity::Info, 1, 1),
EventU32::new(satrs::events_legacy::Severity::Info, 1, 1),
))
.expect("failed to send event");
testbench.pus_event_handler.handle_event_requests();
@@ -281,9 +279,7 @@ mod tests {
.try_recv()
.expect("failed to receive TM packet");
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
.expect("failed to create TM reader")
.0;
let tm_reader = PusTmReader::new(&tm_packet.packet, 7).expect("failed to create TM reader");
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
assert_eq!(tm_reader.user_data().len(), 4);
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());

View File

@@ -1,8 +1,8 @@
use derive_new::new;
use satrs::hk::UniqueId;
use satrs::request::UniqueApidTargetId;
use satrs::spacepackets::ecss::hk;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::ecss::{hk, CreatorConfig};
use satrs::spacepackets::{ByteConversionError, SpHeader};
#[derive(Debug, new, Copy, Clone)]
@@ -63,7 +63,7 @@ impl PusHkHelper {
SpHeader::new_from_apid(self.component_id.apid),
sec_header,
&buf[0..8 + hk_data_len],
true,
CreatorConfig::default(),
))
}
}

96
satrs-example/src/ids.rs Normal file
View File

@@ -0,0 +1,96 @@
//! This is an auto-generated configuration module.
use satrs::request::UniqueApidTargetId;
#[derive(Debug, PartialEq, Eq, strum::EnumIter)]
#[bitbybit::bitenum(u11)]
pub enum Apid {
Sched = 1,
GenericPus = 2,
Acs = 3,
Cfdp = 4,
Tmtc = 5,
Eps = 6,
}
pub mod acs {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Id {
Subsystem = 1,
Assembly = 2,
Mgm0 = 3,
Mgm1 = 4,
}
pub const SUBSYSTEM: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Subsystem as u32);
pub const ASSEMBLY: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Assembly as u32);
pub const MGM0: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Mgm0 as u32);
pub const MGM1: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Mgm1 as u32);
}
pub mod eps {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Id {
Pcdu = 0,
Subsystem = 1,
}
pub const PCDU: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Eps.raw_value(), Id::Pcdu as u32);
pub const SUBSYSTEM: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Eps.raw_value(), Id::Subsystem as u32);
}
pub mod generic_pus {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Id {
PusEventManagement = 0,
PusRouting = 1,
PusTest = 2,
PusAction = 3,
PusMode = 4,
PusHk = 5,
}
pub const PUS_EVENT_MANAGEMENT: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusEventManagement as u32,
);
pub const PUS_ROUTING: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::GenericPus.raw_value(), Id::PusRouting as u32);
pub const PUS_TEST: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::GenericPus.raw_value(), Id::PusTest as u32);
pub const PUS_ACTION: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::GenericPus.raw_value(), Id::PusAction as u32);
pub const PUS_MODE: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::GenericPus.raw_value(), Id::PusMode as u32);
pub const PUS_HK: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::GenericPus.raw_value(), Id::PusHk as u32);
}
pub mod sched {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Id {
PusSched = 0,
}
pub const PUS_SCHED: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Sched.raw_value(), Id::PusSched as u32);
}
pub mod tmtc {
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Id {
UdpServer = 0,
TcpServer = 1,
}
pub const UDP_SERVER: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Tmtc.raw_value(), Id::UdpServer as u32);
pub const TCP_SERVER: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Tmtc.raw_value(), Id::TcpServer as u32);
}

View File

@@ -24,7 +24,7 @@ pub fn create_sim_client(sim_request_rx: mpsc::Receiver<SimRequest>) -> Option<S
return Some(sim_client);
}
Err(e) => {
log::warn!("sim client creation error: {}", e);
log::warn!("sim client creation error: {e}");
}
}
None
@@ -116,7 +116,7 @@ impl SimClientUdp {
.udp_client
.send_to(request_json.as_bytes(), self.simulator_addr)
{
log::error!("error sending data to UDP SIM server: {}", e);
log::error!("error sending data to UDP SIM server: {e}");
break;
} else {
no_sim_requests_handled = false;
@@ -151,7 +151,7 @@ impl SimClientUdp {
}
}
Err(e) => {
log::warn!("failed to deserialize SIM reply: {}", e);
log::warn!("failed to deserialize SIM reply: {e}");
}
}
}
@@ -161,7 +161,7 @@ impl SimClientUdp {
{
break;
}
log::error!("error receiving data from UDP SIM server: {}", e);
log::error!("error receiving data from UDP SIM server: {e}");
break;
}
}

View File

@@ -31,7 +31,7 @@ impl SpacePacketValidator for SimplePacketValidator {
if self.valid_ids.contains(&sp_header.packet_id()) {
return SpValidity::Valid;
}
log::warn!("ignoring space packet with header {:?}", sp_header);
log::warn!("ignoring space packet with header {sp_header:?}");
// We could perform a CRC check.. but lets keep this simple and assume that TCP ensures
// data integrity.
SpValidity::Skip

View File

@@ -1,13 +1,14 @@
#![allow(dead_code)]
use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc;
use log::{info, warn};
use satrs::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use satrs::pus::HandlingStatus;
use satrs::tmtc::{PacketAsVec, PacketInPool, StoreAndSendError};
use satrs::{
hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{PoolProviderWithGuards, SharedStaticMemoryPool},
};
use satrs::tmtc::{PacketAsVec, StoreAndSendError};
use satrs::pool::{PoolProviderWithGuards, SharedStaticMemoryPool};
use satrs::tmtc::PacketInPool;
use crate::tmtc::sender::TmTcSender;
@@ -112,6 +113,9 @@ mod tests {
sync::{Arc, Mutex},
};
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use satrs::spacepackets::ecss::CreatorConfig;
use satrs::{
spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
@@ -119,7 +123,8 @@ mod tests {
},
ComponentId,
};
use satrs_example::config::{components, OBSW_SERVER_ADDR};
use satrs_example::config::OBSW_SERVER_ADDR;
use satrs_example::ids;
use crate::tmtc::sender::{MockSender, TmTcSender};
@@ -175,8 +180,8 @@ mod tests {
udp_tc_server,
tm_handler,
};
let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true)
let sph = SpHeader::new_for_unseg_tc(ids::Apid::GenericPus.raw_value(), u14::ZERO, 0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], CreatorConfig::default())
.to_vec()
.unwrap();
let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed");

View File

@@ -1,6 +1,7 @@
use satrs::spacepackets::time::{cds::CdsTime, TimeWriter};
use satrs::spacepackets::time::cds::CdsTime;
pub mod config;
pub mod ids;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum DeviceMode {

View File

@@ -33,18 +33,22 @@ use satrs::{
hal::std::{tcp_server::ServerConfig, udp_server::UdpTcServer},
mode::{Mode, ModeAndSubmode, ModeRequest, ModeRequestHandlerMpscBounded},
mode_tree::connect_mode_nodes,
pus::{event_man::EventRequestWithToken, EcssTcInMemConverter, HandlingStatus},
pus::{event_man::EventRequestWithToken, EcssTcCacher, HandlingStatus},
request::{GenericMessage, MessageMetadata},
spacepackets::time::{cds::CdsTime, TimeWriter},
spacepackets::time::cds::CdsTime,
};
use satrs_example::{
config::{
acs::{MGM_HANDLER_0, MGM_HANDLER_1},
components::{NO_SENDER, PCDU_HANDLER, TCP_SERVER, UDP_SERVER},
components::NO_SENDER,
pool::create_sched_tc_pool,
tasks::{FREQ_MS_AOCS, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, SIM_CLIENT_IDLE_DELAY_MS},
OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT,
},
ids::{
acs::*,
eps::*,
tmtc::{TCP_SERVER, UDP_SERVER},
},
DeviceMode,
};
use tmtc::sender::TmTcSender;
@@ -53,12 +57,12 @@ use tmtc::{tc_source::TcSourceTask, tm_sink::TmSink};
cfg_if::cfg_if! {
if #[cfg(feature = "heap_tmtc")] {
use interface::udp::DynamicUdpTmHandler;
use satrs::pus::EcssTcInVecConverter;
use satrs::pus::EcssTcVecCacher;
use tmtc::{tc_source::TcSourceTaskDynamic, tm_sink::TmSinkDynamic};
} else {
use std::sync::RwLock;
use interface::udp::StaticUdpTmHandler;
use satrs::pus::EcssTcInSharedPoolConverter;
use satrs::pus::EcssTcInSharedPoolCacher;
use satrs::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use satrs_example::config::pool::create_static_pools;
use tmtc::{
@@ -124,13 +128,13 @@ fn main() {
let mut request_map = GenericRequestRouter::default();
request_map
.composite_router_map
.insert(MGM_HANDLER_0.id(), mgm_0_handler_composite_tx);
.insert(MGM0.id(), mgm_0_handler_composite_tx);
request_map
.composite_router_map
.insert(MGM_HANDLER_0.id(), mgm_1_handler_composite_tx);
.insert(MGM1.id(), mgm_1_handler_composite_tx);
request_map
.composite_router_map
.insert(PCDU_HANDLER.id(), pcdu_handler_composite_tx);
.insert(PCDU.id(), pcdu_handler_composite_tx);
// This helper structure is used by all telecommand providers which need to send telecommands
// to the TC source.
@@ -139,9 +143,9 @@ fn main() {
let tc_sender_with_shared_pool =
PacketSenderWithSharedPool::new(tc_source_tx, shared_tc_pool_wrapper.clone());
let tc_in_mem_converter =
EcssTcInMemConverter::Static(EcssTcInSharedPoolConverter::new(shared_tc_pool, 4096));
EcssTcCacher::Static(EcssTcInSharedPoolCacher::new(shared_tc_pool, 4096));
} else if #[cfg(feature = "heap_tmtc")] {
let tc_in_mem_converter = EcssTcInMemConverter::Heap(EcssTcInVecConverter::default());
let tc_in_mem_converter = EcssTcCacher::Heap(EcssTcVecCacher::default());
}
}
@@ -301,10 +305,8 @@ fn main() {
let shared_mgm_0_set = Arc::default();
let shared_mgm_1_set = Arc::default();
let mgm_0_mode_node =
ModeRequestHandlerMpscBounded::new(MGM_HANDLER_0.into(), mgm_0_handler_mode_rx);
let mgm_1_mode_node =
ModeRequestHandlerMpscBounded::new(MGM_HANDLER_1.into(), mgm_1_handler_mode_rx);
let mgm_0_mode_node = ModeRequestHandlerMpscBounded::new(MGM0.into(), mgm_0_handler_mode_rx);
let mgm_1_mode_node = ModeRequestHandlerMpscBounded::new(MGM1.into(), mgm_1_handler_mode_rx);
let (mgm_0_spi_interface, mgm_1_spi_interface) =
if let Some(sim_client) = opt_sim_client.as_mut() {
sim_client
@@ -328,7 +330,7 @@ fn main() {
)
};
let mut mgm_0_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_0,
MGM0,
"MGM_0",
mgm_0_mode_node,
mgm_0_handler_composite_rx,
@@ -339,7 +341,7 @@ fn main() {
shared_mgm_0_set,
);
let mut mgm_1_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_1,
MGM1,
"MGM_1",
mgm_1_mode_node,
mgm_1_handler_composite_rx,
@@ -372,10 +374,9 @@ fn main() {
} else {
SerialSimInterfaceWrapper::Dummy(SerialInterfaceDummy::default())
};
let pcdu_mode_node =
ModeRequestHandlerMpscBounded::new(PCDU_HANDLER.into(), pcdu_handler_mode_rx);
let pcdu_mode_node = ModeRequestHandlerMpscBounded::new(PCDU.into(), pcdu_handler_mode_rx);
let mut pcdu_handler = PcduHandler::new(
PCDU_HANDLER,
PCDU,
"PCDU",
pcdu_mode_node,
pcdu_handler_composite_rx,

View File

@@ -9,15 +9,16 @@ use satrs::pus::verification::{
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTmSender, EcssTmtcError,
ActiveRequest, EcssTcAndToken, EcssTcCacher, EcssTmSender, EcssTmtcError,
GenericConversionError, MpscTcReceiver, PusPacketHandlingError, PusReplyHandler,
PusServiceHelper, PusTcToRequestConverter,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket, PusServiceId};
use satrs_example::config::pus::PUS_ACTION_SERVICE;
use satrs_example::config::tmtc_err;
use satrs_example::ids;
use satrs_example::ids::generic_pus::PUS_ACTION;
use std::sync::mpsc;
use std::time::Duration;
@@ -207,17 +208,17 @@ impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for Actio
pub fn create_action_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper {
let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_ACTION_SERVICE.id(),
ids::generic_pus::PUS_ACTION.id(),
pus_action_rx,
tm_sender,
create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
create_verification_reporter(PUS_ACTION.id(), PUS_ACTION.apid),
tc_in_mem_converter,
),
ActionRequestConverter::default(),
@@ -273,8 +274,9 @@ mod tests {
TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::pus::verification::test_util::TestVerificationReporter;
use satrs::pus::{verification, EcssTcInVecConverter};
use satrs::pus::{verification, EcssTcVecCacher};
use satrs::request::MessageMetadata;
use satrs::spacepackets::ecss::CreatorConfig;
use satrs::tmtc::PacketAsVec;
use satrs::ComponentId;
use satrs::{
@@ -324,7 +326,7 @@ mod tests {
pus_action_rx,
TmTcSender::Heap(tm_funnel_tx.clone()),
verif_reporter,
EcssTcInMemConverter::Heap(EcssTcInVecConverter::default()),
EcssTcCacher::Heap(EcssTcVecCacher::default()),
),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
@@ -367,7 +369,7 @@ mod tests {
if let Err(mpsc::TryRecvError::Empty) = packet {
} else {
let tm = packet.unwrap();
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0;
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap();
panic!("unexpected TM packet {unexpected_tm:?}");
}
}
@@ -410,7 +412,11 @@ mod tests {
pub fn add_tc(&mut self, tc: &PusTcCreator) {
self.request_id = Some(verification::RequestId::new(tc).into());
let token = self.service.service_helper.verif_reporter_mut().add_tc(tc);
let token = self
.service
.service_helper
.verif_reporter_mut()
.start_verification(tc);
let accepted_token = self
.service
.service_helper
@@ -448,7 +454,8 @@ mod tests {
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let pus8_packet =
PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
testbench.verify_next_tc_is_handled_properly(&time_stamp);
@@ -494,7 +501,7 @@ mod tests {
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
CreatorConfig::default(),
);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
@@ -520,7 +527,7 @@ mod tests {
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
CreatorConfig::default(),
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
@@ -559,7 +566,7 @@ mod tests {
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
CreatorConfig::default(),
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);

View File

@@ -6,17 +6,17 @@ use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter, MpscTcReceiver,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcCacher, MpscTcReceiver,
PartialPusHandlingError, PusServiceHelper,
};
use satrs::spacepackets::ecss::PusServiceId;
use satrs_example::config::pus::PUS_EVENT_MANAGEMENT;
use satrs_example::ids::generic_pus::PUS_EVENT_MANAGEMENT;
use super::{DirectPusService, HandlingStatus};
pub fn create_event_service(
tm_sender: TmTcSender,
tm_in_pool_converter: EcssTcInMemConverter,
tm_in_pool_converter: EcssTcCacher,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> EventServiceWrapper {
@@ -36,12 +36,8 @@ pub fn create_event_service(
}
pub struct EventServiceWrapper {
pub handler: PusEventServiceHandler<
MpscTcReceiver,
TmTcSender,
EcssTcInMemConverter,
VerificationReporter,
>,
pub handler:
PusEventServiceHandler<MpscTcReceiver, TmTcSender, EcssTcCacher, VerificationReporter>,
}
impl DirectPusService for EventServiceWrapper {

View File

@@ -5,16 +5,16 @@ use satrs::pus::verification::{
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
EcssTcInMemConverter, EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver,
PusPacketHandlingError, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
ActivePusRequestStd, ActiveRequest, DefaultActiveRequestMap, EcssTcAndToken, EcssTcCacher,
EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver, PusPacketHandlingError,
PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::res_code::ResultU16;
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket, PusServiceId};
use satrs_example::config::pus::PUS_HK_SERVICE;
use satrs_example::config::{hk_err, tmtc_err};
use satrs_example::ids::generic_pus::PUS_HK;
use std::sync::mpsc;
use std::time::Duration;
@@ -242,17 +242,17 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
pub fn create_hk_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper {
let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_HK_SERVICE.id(),
PUS_HK.id(),
pus_hk_rx,
tm_sender,
create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
create_verification_reporter(PUS_HK.id(), PUS_HK.apid),
tc_in_mem_converter,
),
HkRequestConverter::default(),
@@ -302,10 +302,13 @@ impl TargetedPusService for HkServiceWrapper {
#[cfg(test)]
mod tests {
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use satrs::pus::test_util::{
TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::request::MessageMetadata;
use satrs::spacepackets::ecss::CreatorConfig;
use satrs::{
hk::HkRequestVariant,
pus::test_util::TEST_APID,
@@ -328,7 +331,7 @@ mod tests {
fn hk_converter_one_shot_req() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
@@ -340,7 +343,7 @@ mod tests {
3,
Subservice::TcGenerateOneShotHk as u8,
&app_data,
true,
CreatorConfig::default(),
);
let accepted_token = hk_bench.add_tc(&hk_req);
let (_active_req, req) = hk_bench
@@ -358,7 +361,7 @@ mod tests {
fn hk_converter_enable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
@@ -380,7 +383,7 @@ mod tests {
3,
Subservice::TcEnableHkGeneration as u8,
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
@@ -388,7 +391,7 @@ mod tests {
3,
Subservice::TcEnableDiagGeneration as u8,
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc1);
}
@@ -397,7 +400,7 @@ mod tests {
fn hk_conversion_disable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
@@ -419,7 +422,7 @@ mod tests {
3,
Subservice::TcDisableHkGeneration as u8,
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
@@ -427,7 +430,7 @@ mod tests {
3,
Subservice::TcDisableDiagGeneration as u8,
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc1);
}
@@ -436,7 +439,7 @@ mod tests {
fn hk_conversion_modify_interval() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 12] = [0; 12];
@@ -462,7 +465,7 @@ mod tests {
3,
Subservice::TcModifyHkCollectionInterval as u8,
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
@@ -470,7 +473,7 @@ mod tests {
3,
Subservice::TcModifyDiagCollectionInterval as u8,
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc1);
}

View File

@@ -4,13 +4,13 @@ use log::warn;
use satrs::pool::PoolAddr;
use satrs::pus::verification::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReporterCfg, VerificationReportingProvider, VerificationToken,
VerificationReporterConfig, VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConversionProvider,
EcssTcInMemConverter, EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError,
GenericRoutingError, HandlingStatus, PusPacketHandlingError, PusReplyHandler, PusRequestRouter,
PusServiceHelper, PusTcToRequestConverter, TcInMemory,
ActiveRequest, ActiveRequestStore, CacheAndReadRawEcssTc, EcssTcAndToken, EcssTcCacher,
EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
HandlingStatus, PusPacketHandlingError, PusReplyHandler, PusRequestRouter, PusServiceHelper,
PusTcToRequestConverter, TcInMemory,
};
use satrs::queue::{GenericReceiveError, GenericSendError};
use satrs::request::{Apid, GenericMessage, MessageMetadata};
@@ -18,8 +18,8 @@ use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketInPool};
use satrs::ComponentId;
use satrs_example::config::pus::PUS_ROUTING_SERVICE;
use satrs_example::config::{tmtc_err, CustomPusServiceId};
use satrs_example::ids::generic_pus::PUS_ROUTING;
use satrs_example::TimestampHelper;
use std::fmt::Debug;
use std::sync::mpsc;
@@ -33,7 +33,7 @@ pub mod stack;
pub mod test;
pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter {
let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap();
let verif_cfg = VerificationReporterConfig::new(apid, 1, 2, 8);
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
VerificationReporter::new(owner_id, &verif_cfg)
@@ -62,12 +62,9 @@ pub struct PusTcDistributor {
impl PusTcDistributor {
pub fn new(tm_sender: TmTcSender, pus_router: PusTcMpscRouter) -> Self {
Self {
id: PUS_ROUTING_SERVICE.raw(),
id: PUS_ROUTING.raw(),
tm_sender,
verif_reporter: create_verification_reporter(
PUS_ROUTING_SERVICE.id(),
PUS_ROUTING_SERVICE.apid,
),
verif_reporter: create_verification_reporter(PUS_ROUTING.id(), PUS_ROUTING.apid),
pus_router,
stamp_helper: TimestampHelper::default(),
}
@@ -105,12 +102,12 @@ impl PusTcDistributor {
sender_id,
pus_tc_result.unwrap_err()
);
log::warn!("raw data: {:x?}", raw_tc);
log::warn!("raw data: {raw_tc:x?}");
// TODO: Shouldn't this be an error?
return Ok(HandlingStatus::HandledOne);
}
let pus_tc = pus_tc_result.unwrap().0;
let init_token = self.verif_reporter.add_tc(&pus_tc);
let pus_tc = pus_tc_result.unwrap();
let init_token = self.verif_reporter.start_verification(&pus_tc);
self.stamp_helper.update_from_now();
let accepted_token = self
.verif_reporter
@@ -272,16 +269,16 @@ pub struct PusTargetedRequestService<
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestMapInstance: ActiveRequestStore<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequest,
RequestType,
ReplyType,
> {
pub service_helper:
PusServiceHelper<TcReceiver, TmTcSender, EcssTcInMemConverter, VerificationReporter>,
PusServiceHelper<TcReceiver, TmTcSender, EcssTcCacher, VerificationReporter>,
pub request_router: GenericRequestRouter,
pub request_converter: RequestConverter,
pub active_request_map: ActiveRequestMap,
pub active_request_map: ActiveRequestMapInstance,
pub reply_handler: ReplyHandler,
pub reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>,
@@ -292,8 +289,8 @@ impl<
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestMapInstance: ActiveRequestStore<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequest,
RequestType,
ReplyType,
>
@@ -302,7 +299,7 @@ impl<
VerificationReporter,
RequestConverter,
ReplyHandler,
ActiveRequestMap,
ActiveRequestMapInstance,
ActiveRequestInfo,
RequestType,
ReplyType,
@@ -314,11 +311,11 @@ where
service_helper: PusServiceHelper<
TcReceiver,
TmTcSender,
EcssTcInMemConverter,
EcssTcCacher,
VerificationReporter,
>,
request_converter: RequestConverter,
active_request_map: ActiveRequestMap,
active_request_map: ActiveRequestMapInstance,
reply_hook: ReplyHandler,
request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
@@ -512,7 +509,7 @@ where
/// and also log the error.
pub fn generic_pus_request_timeout_handler(
sender: &(impl EcssTmSender + ?Sized),
active_request: &(impl ActiveRequestProvider + Debug),
active_request: &(impl ActiveRequest + Debug),
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
service_str: &'static str,
@@ -534,13 +531,15 @@ pub fn generic_pus_request_timeout_handler(
pub(crate) mod tests {
use std::time::Duration;
use arbitrary_int::u11;
use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::{MpscTmAsVecSender, PusTmVariant};
use satrs::request::RequestId;
use satrs::spacepackets::ecss::CreatorConfig;
use satrs::{
pus::{
verification::test_util::TestVerificationReporter, ActivePusRequestStd,
ActiveRequestMapProvider, MpscTcReceiver,
ActiveRequestStore, MpscTcReceiver,
},
request::UniqueApidTargetId,
spacepackets::{
@@ -559,7 +558,7 @@ pub(crate) mod tests {
// Testbench dedicated to the testing of [PusReplyHandler]s
pub struct ReplyHandlerTestbench<
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestInfo: ActiveRequest,
Reply,
> {
pub id: ComponentId,
@@ -573,7 +572,7 @@ pub(crate) mod tests {
impl<
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestInfo: ActiveRequest,
Reply,
> ReplyHandlerTestbench<ReplyHandler, ActiveRequestInfo, Reply>
{
@@ -593,17 +592,17 @@ pub(crate) mod tests {
pub fn add_tc(
&mut self,
apid: u16,
apid: u11,
apid_target: u32,
time_stamp: &[u8],
) -> (verification::RequestId, ActivePusRequestStd) {
let sp_header = SpHeader::new_from_apid(apid);
let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0);
let init = self.verif_reporter.add_tc(&PusTcCreator::new(
let init = self.verif_reporter.start_verification(&PusTcCreator::new(
sp_header,
sec_header_dummy,
&[],
true,
CreatorConfig::default(),
));
let accepted = self
.verif_reporter
@@ -674,7 +673,7 @@ pub(crate) mod tests {
// Testbench dedicated to the testing of [PusTcToRequestConverter]s
pub struct PusConverterTestbench<
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestInfo: ActiveRequest,
Request,
> {
pub id: ComponentId,
@@ -688,7 +687,7 @@ pub(crate) mod tests {
impl<
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestInfo: ActiveRequest,
Request,
> PusConverterTestbench<Converter, ActiveRequestInfo, Request>
{
@@ -706,7 +705,7 @@ pub(crate) mod tests {
}
pub fn add_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let token = self.verif_reporter.add_tc(tc);
let token = self.verif_reporter.start_verification(tc);
self.current_request_id = Some(verification::RequestId::new(tc));
self.current_packet = Some(tc.to_vec().unwrap());
self.verif_reporter
@@ -722,7 +721,7 @@ pub(crate) mod tests {
&mut self,
token: VerificationToken<TcStateAccepted>,
time_stamp: &[u8],
expected_apid: u16,
expected_apid: u11,
expected_apid_target: u32,
) -> Result<(ActiveRequestInfo, Request), Converter::Error> {
if self.current_packet.is_none() {
@@ -734,7 +733,7 @@ pub(crate) mod tests {
let tc_reader = PusTcReader::new(&current_packet).unwrap();
let (active_info, request) = self.converter.convert(
token,
&tc_reader.0,
&tc_reader,
&self.dummy_sender,
&self.verif_reporter,
time_stamp,
@@ -754,8 +753,8 @@ pub(crate) mod tests {
pub struct TargetedPusRequestTestbench<
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestMapInstance: ActiveRequestStore<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequest,
RequestType,
ReplyType,
> {
@@ -764,7 +763,7 @@ pub(crate) mod tests {
TestVerificationReporter,
RequestConverter,
ReplyHandler,
ActiveRequestMap,
ActiveRequestMapInstance,
ActiveRequestInfo,
RequestType,
ReplyType,

View File

@@ -1,6 +1,9 @@
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use derive_new::new;
use satrs::mode_tree::{ModeNode, ModeParent};
use satrs_example::config::pus::PUS_MODE_SERVICE;
use satrs::spacepackets::ecss::CreatorConfig;
use satrs_example::ids;
use std::sync::mpsc;
use std::time::Duration;
@@ -8,8 +11,8 @@ use crate::requests::GenericRequestRouter;
use crate::tmtc::sender::TmTcSender;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, MpscTcReceiver,
PusPacketHandlingError, PusServiceHelper,
DefaultActiveRequestMap, EcssTcAndToken, EcssTcCacher, MpscTcReceiver, PusPacketHandlingError,
PusServiceHelper,
};
use satrs::request::GenericMessage;
use satrs::{
@@ -20,8 +23,8 @@ use satrs::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider,
VerificationToken,
},
ActivePusRequestStd, ActiveRequestProvider, EcssTmSender, EcssTmtcError,
GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
ActivePusRequestStd, ActiveRequest, EcssTmSender, EcssTmtcError, GenericConversionError,
PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
},
request::UniqueApidTargetId,
spacepackets::{
@@ -77,10 +80,15 @@ impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
.write_to_be_bytes(&mut source_data)
.expect("writing mode reply failed");
let req_id = verification::RequestId::from(reply.request_id());
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0);
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), u14::ZERO, 0);
let sec_header =
PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp);
let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true);
let pus_tm = PusTmCreator::new(
sp_header,
sec_header,
&source_data,
CreatorConfig::default(),
);
tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?;
verification_handler.completion_success(tm_sender, started_token, time_stamp)?;
}
@@ -210,22 +218,25 @@ impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestCo
pub fn create_mode_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper {
let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_MODE_SERVICE.id(),
ids::generic_pus::PUS_MODE.id(),
pus_action_rx,
tm_sender,
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
create_verification_reporter(
ids::generic_pus::PUS_MODE.id(),
ids::generic_pus::PUS_MODE.apid,
),
tc_in_mem_converter,
),
ModeRequestConverter::default(),
DefaultActiveRequestMap::default(),
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
ModeReplyHandler::new(ids::generic_pus::PUS_MODE.id()),
mode_router,
reply_receiver,
);
@@ -287,8 +298,11 @@ impl TargetedPusService for ModeServiceWrapper {
#[cfg(test)]
mod tests {
use arbitrary_int::traits::Integer;
use arbitrary_int::u14;
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};
use satrs::request::MessageMetadata;
use satrs::spacepackets::ecss::CreatorConfig;
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::mode::Subservice,
@@ -311,11 +325,11 @@ mod tests {
fn mode_converter_read_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
@@ -327,7 +341,7 @@ mod tests {
fn mode_converter_set_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8);
let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN];
let mode_and_submode = ModeAndSubmode::new(2, 1);
@@ -335,7 +349,7 @@ mod tests {
mode_and_submode
.write_to_be_bytes(&mut app_data[4..])
.unwrap();
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
@@ -353,11 +367,11 @@ mod tests {
fn mode_converter_announce_mode() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
@@ -369,12 +383,12 @@ mod tests {
fn mode_converter_announce_mode_recursively() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8);
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)

View File

@@ -5,17 +5,17 @@ use crate::pus::create_verification_reporter;
use crate::tmtc::sender::TmTcSender;
use log::info;
use satrs::pool::{PoolProvider, StaticMemoryPool};
use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler::{PusSchedulerAlloc, TcInfo};
use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter, MpscTcReceiver,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcCacher, MpscTcReceiver,
PartialPusHandlingError, PusServiceHelper,
};
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool};
use satrs::ComponentId;
use satrs_example::config::pus::PUS_SCHED_SERVICE;
use satrs_example::ids::sched::PUS_SCHED;
use super::{DirectPusService, HandlingStatus};
@@ -84,9 +84,9 @@ pub struct SchedulingServiceWrapper {
pub pus_11_handler: PusSchedServiceHandler<
MpscTcReceiver,
TmTcSender,
EcssTcInMemConverter,
EcssTcCacher,
VerificationReporter,
PusScheduler,
PusSchedulerAlloc,
>,
pub sched_tc_pool: StaticMemoryPool,
pub releaser_buf: [u8; 4096],
@@ -174,19 +174,19 @@ impl SchedulingServiceWrapper {
pub fn create_scheduler_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
tc_releaser: TcReleaser,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
) -> SchedulingServiceWrapper {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
let scheduler = PusSchedulerAlloc::new_with_current_init_time(Duration::from_secs(5))
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new(
PUS_SCHED_SERVICE.id(),
PUS_SCHED.id(),
pus_sched_rx,
tm_sender,
create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
create_verification_reporter(PUS_SCHED.id(), PUS_SCHED.apid),
tc_in_mem_converter,
),
scheduler,

View File

@@ -1,33 +1,33 @@
use crate::pus::create_verification_reporter;
use crate::tmtc::sender::TmTcSender;
use log::info;
use satrs::event_man::{EventMessage, EventMessageU32};
use satrs::event_man_legacy::{EventMessage, EventMessageU32};
use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
use satrs::pus::PartialPusHandlingError;
use satrs::pus::{
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConversionProvider,
EcssTcInMemConverter, MpscTcReceiver, PusServiceHelper,
CacheAndReadRawEcssTc, DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcCacher,
MpscTcReceiver, PusServiceHelper,
};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs_example::config::pus::PUS_TEST_SERVICE;
use satrs_example::config::{tmtc_err, TEST_EVENT};
use satrs_example::ids::generic_pus::PUS_TEST;
use std::sync::mpsc;
use super::{DirectPusService, HandlingStatus};
pub fn create_test_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
event_sender: mpsc::SyncSender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> TestCustomServiceWrapper {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
PUS_TEST_SERVICE.id(),
PUS_TEST.id(),
pus_test_rx,
tm_sender,
create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
create_verification_reporter(PUS_TEST.id(), PUS_TEST.apid),
tc_in_mem_converter,
));
TestCustomServiceWrapper {
@@ -37,12 +37,8 @@ pub fn create_test_service(
}
pub struct TestCustomServiceWrapper {
pub handler: PusService17TestHandler<
MpscTcReceiver,
TmTcSender,
EcssTcInMemConverter,
VerificationReporter,
>,
pub handler:
PusService17TestHandler<MpscTcReceiver, TmTcSender, EcssTcCacher, VerificationReporter>,
pub event_tx: mpsc::SyncSender<EventMessageU32>,
}
@@ -90,7 +86,7 @@ impl DirectPusService for TestCustomServiceWrapper {
);
}
DirectPusPacketHandlerResult::CustomSubservice(subservice, token) => {
let (tc, _) = PusTcReader::new(
let tc = PusTcReader::new(
self.handler
.service_helper
.tc_in_mem_converter
@@ -100,7 +96,7 @@ impl DirectPusService for TestCustomServiceWrapper {
if subservice == 128 {
info!("generating test event");
self.event_tx
.send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
.send(EventMessage::new(PUS_TEST.id(), TEST_EVENT.into()))
.expect("Sending test event failed");
match self.handler.service_helper.verif_reporter().start_success(
self.handler.service_helper.tm_sender(),

View File

@@ -8,14 +8,14 @@ use satrs::mode::ModeRequest;
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{ActiveRequestProvider, EcssTmSender, GenericRoutingError, PusRequestRouter};
use satrs::pus::{ActiveRequest, EcssTmSender, GenericRoutingError, PusRequestRouter};
use satrs::queue::GenericSendError;
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::ComponentId;
use satrs_example::config::pus::PUS_ROUTING_SERVICE;
use satrs_example::config::tmtc_err;
use satrs_example::ids;
#[derive(Clone, Debug)]
#[non_exhaustive]
@@ -37,7 +37,7 @@ pub struct GenericRequestRouter {
impl Default for GenericRequestRouter {
fn default() -> Self {
Self {
id: PUS_ROUTING_SERVICE.raw(),
id: ids::generic_pus::PUS_ROUTING.raw(),
composite_router_map: Default::default(),
mode_router_map: Default::default(),
}
@@ -46,7 +46,7 @@ impl Default for GenericRequestRouter {
impl GenericRequestRouter {
pub(crate) fn handle_error_generic(
&self,
active_request: &impl ActiveRequestProvider,
active_request: &impl ActiveRequest,
tc: &PusTcReader,
error: GenericRoutingError,
tm_sender: &(impl EcssTmSender + ?Sized),

View File

@@ -11,7 +11,9 @@ use crate::pus::PusTcDistributor;
pub struct TcSourceTaskStatic {
shared_tc_pool: SharedPacketPool,
tc_receiver: mpsc::Receiver<PacketInPool>,
tc_buf: [u8; 4096],
/// We allocate this buffer from the heap to avoid a clippy warning on large enum variant
/// differences.
tc_buf: Box<[u8; 4096]>,
pus_distributor: PusTcDistributor,
}
@@ -25,7 +27,7 @@ impl TcSourceTaskStatic {
Self {
shared_tc_pool,
tc_receiver,
tc_buf: [0; 4096],
tc_buf: Box::new([0; 4096]),
pus_distributor: pus_receiver,
}
}
@@ -44,11 +46,11 @@ impl TcSourceTaskStatic {
.0
.read()
.expect("locking tc pool failed");
pool.read(&packet_in_pool.store_addr, &mut self.tc_buf)
pool.read(&packet_in_pool.store_addr, self.tc_buf.as_mut_slice())
.expect("reading pool failed");
drop(pool);
self.pus_distributor
.handle_tc_packet_in_store(packet_in_pool, &self.tc_buf)
.handle_tc_packet_in_store(packet_in_pool, self.tc_buf.as_slice())
.ok();
HandlingStatus::HandledOne
}

View File

@@ -3,18 +3,17 @@ use std::{
sync::mpsc::{self},
};
use arbitrary_int::{u11, u14};
use log::info;
use satrs::{
pool::PoolProvider,
spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
seq_count::CcsdsSimpleSeqCountProvider,
seq_count::SequenceCounter,
seq_count::SequenceCounterCcsdsSimple,
time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket,
},
};
use satrs::{
spacepackets::seq_count::SequenceCountProvider,
tmtc::{PacketAsVec, PacketInPool, SharedPacketPool},
};
@@ -22,14 +21,16 @@ use crate::interface::tcp::SyncTcpTmSource;
#[derive(Default)]
pub struct CcsdsSeqCounterMap {
apid_seq_counter_map: HashMap<u16, CcsdsSimpleSeqCountProvider>,
apid_seq_counter_map: HashMap<u11, SequenceCounterCcsdsSimple>,
}
impl CcsdsSeqCounterMap {
pub fn get_and_increment(&mut self, apid: u16) -> u16 {
self.apid_seq_counter_map
.entry(apid)
.or_default()
.get_and_increment()
pub fn get_and_increment(&mut self, apid: u11) -> u14 {
u14::new(
self.apid_seq_counter_map
.entry(apid)
.or_default()
.get_and_increment(),
)
}
}
@@ -114,7 +115,7 @@ impl TmSinkStatic {
let mut tm_copy = Vec::new();
pool_guard
.modify(&pus_tm_in_pool.store_addr, |buf| {
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN)
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN, true)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
tm_copy = buf.to_vec()
@@ -154,8 +155,9 @@ impl TmSinkDynamic {
if let Ok(mut tm) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update
// the CRC.
let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed");
let zero_copy_writer =
PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN, true)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
self.tm_server_tx

View File

@@ -8,6 +8,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.1.3] 2024-08-26
Bump `satrs-shared`.
# [v0.1.2] 2024-04-17
Allow `satrs-shared` from `v0.1.3` to `<v0.2`.
@@ -19,3 +23,6 @@ Allow `satrs-shared` from `v0.1.3` to `<v0.2`.
# [v0.1.0] 2024-02-12
Initial release containing the `resultcode` macro.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-mib-v0.1.3...HEAD
[v0.1.3]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-mib-v0.1.2...satrs-mib-v0.1.3

View File

@@ -1,6 +1,6 @@
[package]
name = "satrs-mib"
version = "0.1.2"
version = "0.1.3"
edition = "2021"
rust-version = "1.61"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
@@ -23,7 +23,8 @@ version = "1"
optional = true
[dependencies.satrs-shared]
version = ">=0.1.3, <=0.2"
version = "0.2"
path = "../satrs-shared"
features = ["serde"]
[dependencies.satrs-mib-codegen]

View File

@@ -28,7 +28,8 @@ features = ["full"]
trybuild = { version = "1", features = ["diff"] }
[dev-dependencies.satrs-shared]
version = ">=0.1.3, <=0.2"
version = "0.2"
path = "../../satrs-shared"
[dev-dependencies.satrs-mib]
path = ".."

View File

@@ -11,7 +11,7 @@ serde_json = "1"
log = "0.4"
thiserror = "2"
fern = "0.7"
strum = { version = "0.26", features = ["derive"] }
strum = { version = "0.27", features = ["derive"] }
num_enum = "0.7"
humantime = "2"
tai-time = { version = "0.3", features = ["serde"] }

View File

@@ -120,7 +120,7 @@ impl SimController {
fn handle_ctrl_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let sim_ctrl_request = SimCtrlRequest::from_sim_message(request)?;
if SIM_CTRL_REQ_WIRETAPPING {
log::info!("received sim ctrl request: {:?}", sim_ctrl_request);
log::info!("received sim ctrl request: {sim_ctrl_request:?}");
}
match sim_ctrl_request {
SimCtrlRequest::Ping => {
@@ -139,7 +139,7 @@ impl SimController {
) -> Result<(), SimRequestError> {
let mgm_request = MgmRequestLis3Mdl::from_sim_message(request)?;
if MGM_REQ_WIRETAPPING {
log::info!("received MGM request: {:?}", mgm_request);
log::info!("received MGM request: {mgm_request:?}");
}
match mgm_request {
MgmRequestLis3Mdl::RequestSensorData => {
@@ -160,7 +160,7 @@ impl SimController {
fn handle_pcdu_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let pcdu_request = PcduRequest::from_sim_message(request)?;
if PCDU_REQ_WIRETAPPING {
log::info!("received PCDU request: {:?}", pcdu_request);
log::info!("received PCDU request: {pcdu_request:?}");
}
match pcdu_request {
PcduRequest::RequestSwitchInfo => {
@@ -188,7 +188,7 @@ impl SimController {
fn handle_mgt_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let mgt_request = MgtRequest::from_sim_message(request)?;
if MGT_REQ_WIRETAPPING {
log::info!("received MGT request: {:?}", mgt_request);
log::info!("received MGT request: {mgt_request:?}");
}
match mgt_request {
MgtRequest::ApplyTorque { duration, dipole } => self

View File

@@ -130,7 +130,7 @@ fn main() {
let mut udp_server =
SimUdpServer::new(SIM_CTRL_PORT, request_sender, reply_receiver, 200, None)
.expect("could not create UDP request server");
log::info!("starting UDP server on port {}", SIM_CTRL_PORT);
log::info!("starting UDP server on port {SIM_CTRL_PORT}");
// This thread manages the simulator UDP server.
let udp_tc_thread = thread::spawn(move || {
udp_server.run();

View File

@@ -8,6 +8,14 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.2.3] 2025-07-22
`spacepackets` range v0.14 to v0.15
# [v0.2.2] 2025-05-10
- Bump to `spacepackests` v0.14
# [v0.2.1] 2024-11-15
Increased allowed spacepackets to v0.13
@@ -41,3 +49,7 @@ Allow `spacepackets` range starting with v0.10 and v0.11.
# [v0.1.0] 2024-02-12
Initial release.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.3...HEAD
[v0.2.3]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.1...satrs-shared-v0.2.3
[v0.2.2]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.1...satrs-shared-v0.2.2

View File

@@ -1,7 +1,7 @@
[package]
name = "satrs-shared"
description = "Components shared by multiple sat-rs crates"
version = "0.2.1"
version = "0.2.3"
edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
@@ -11,6 +11,7 @@ license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
spacepackets = { version = ">=0.14, <=0.16", default-features = false }
[dependencies.serde]
version = "1"
@@ -18,12 +19,9 @@ default-features = false
optional = true
[dependencies.defmt]
version = "0.3"
version = "1"
optional = true
[dependencies.spacepackets]
version = ">0.9, <=0.13"
default-features = false
[features]
serde = ["dep:serde", "spacepackets/serde"]

View File

@@ -8,6 +8,29 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.3.0-alpha.3] 2025-09-??
- Bump `sat-rs` edition to 2024.
- Bumped `spacepackets` to v0.16
## Changed
Some trait renaming to be more in-line with Rust naming conventions.
- `EventTmHookProvider` -> `EventTmHook`
- `ActiveRequestProvider` -> `ActiveRequest`
- `EcssTcInMemConversionProvider` -> `CacheAndReadRawEcssTc`
- `ActiveRequestMapProvider` -> `ActiveRequestStore`
- `CountdownProvider` -> `Countdown`
# [v0.3.0-alpha.2] 2025-07-22
`satrs-shared` update
# [v0.3.0-alpha.1] 2025-07-22
`spacepackets` range v0.14 to v0.15
# [v0.3.0-alpha.0] 2025-02-18
`spacepackets` v0.13
@@ -198,3 +221,9 @@ docs-rs hotfix
# [v0.1.0] 2024-02-12
Initial release.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.3.0-alpha.3...HEAD
[v0.3.0-alpha.3]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.3.0-alpha.2...satrs-v0.3.0-alpha.3
[v0.3.0-alpha.2]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.3.0-alpha.1...satrs-v0.3.0-alpha.2
[v0.3.0-alpha.1]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.3.0-alpha.0...satrs-v0.3.0-alpha.1
[v0.3.0-alpha.0]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.2.1...satrs-v0.3.0-alpha.0

View File

@@ -1,11 +1,11 @@
[package]
name = "satrs"
version = "0.3.0-alpha.0"
edition = "2021"
rust-version = "1.82.0"
version = "0.3.0-alpha.2"
edition = "2024"
rust-version = "1.85.0"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "A framework to build software for remote systems"
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
description = "A library collection to build software for remote systems"
homepage = "https://github.com/us-irs/sat-rs"
repository = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
license = "Apache-2.0"
keywords = ["no-std", "space", "aerospace"]
@@ -13,29 +13,29 @@ keywords = ["no-std", "space", "aerospace"]
categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-support", "embedded"]
[dependencies]
satrs-shared = ">=0.1.3, <=0.2"
satrs-shared = { version = "0.2", path = "../satrs-shared" }
spacepackets = { version = "0.16", default-features = false }
delegate = ">0.7, <=0.13"
paste = "1"
derive-new = ">=0.6, <=0.7"
smallvec = "1"
crc = "3"
num_enum = { version = ">0.5, <=0.7", default-features = false }
spacepackets = { version = "0.13", default-features = false }
cobs = { version = "0.3", default-features = false }
num-traits = { version = "0.2", default-features = false }
cobs = { version = "0.4", default-features = false }
thiserror = { version = "2", default-features = false }
hashbrown = { version = ">=0.14, <=0.15", optional = true }
static_cell = { version = "2", optional = true }
static_cell = { version = "2" }
heapless = { version = "0.9", optional = true }
dyn-clone = { version = "1", optional = true }
heapless = { version = "0.8", optional = true }
downcast-rs = { version = "2", default-features = false, optional = true }
bus = { version = "2.2", optional = true }
crossbeam-channel = { version = "0.5", default-features = false, optional = true }
postcard = { version = "1", features = ["alloc"] }
serde = { version = "1", default-features = false, optional = true }
socket2 = { version = "0.5", features = ["all"], optional = true }
socket2 = { version = "0.6", features = ["all"], optional = true }
arbitrary-int = "2"
mio = { version = "1", features = ["os-poll", "net"], optional = true }
defmt = { version = "0.3", optional = true }
defmt = { version = "1", optional = true }
[dev-dependencies]
serde = "1"
@@ -49,7 +49,7 @@ tempfile = "3"
version = "1"
[features]
default = ["std"]
default = ["std", "heapless"]
std = [
"downcast-rs/std",
"alloc",
@@ -71,7 +71,7 @@ alloc = [
]
serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"]
crossbeam = ["crossbeam-channel"]
heapless = ["dep:heapless", "static_cell"]
# heapless = ["dep:heapless", "static_cell"]
defmt = ["dep:defmt", "spacepackets/defmt"]
test_util = []

View File

@@ -4,5 +4,5 @@
sat-rs
======
This crate contains the primary components of the sat-rs framework.
This crate contains the primary components of the sat-rs library collection.
You can find more information on the [homepage](https://egit.irs.uni-stuttgart.de/rust/sat-rs).

1
satrs/src/ccsds/mod.rs Normal file
View File

@@ -0,0 +1 @@
pub mod scheduler;

View File

@@ -0,0 +1,129 @@
use core::{hash::Hash, time::Duration};
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
use spacepackets::{
ByteConversionError, PacketId, PacketSequenceControl,
time::{TimestampError, UnixTime},
};
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum ScheduleError {
/// The release time is within the time-margin added on top of the current time.
/// The first parameter is the current time, the second one the time margin, and the third one
/// the release time.
#[error("release time in margin")]
ReleaseTimeInTimeMargin {
current_time: UnixTime,
time_margin: Duration,
release_time: UnixTime,
},
/// Nested time-tagged commands are not allowed.
#[error("nested scheduled tc")]
NestedScheduledTc,
#[error("tc data empty")]
TcDataEmpty,
#[error("timestamp error: {0}")]
TimestampError(#[from] TimestampError),
#[error("wrong subservice number {0}")]
WrongSubservice(u8),
#[error("wrong service number {0}")]
WrongService(u8),
#[error("byte conversion error: {0}")]
ByteConversionError(#[from] ByteConversionError),
}
#[derive(Debug, PartialEq, Eq, Clone)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct CcsdsPacketId {
pub packet_id: PacketId,
pub psc: PacketSequenceControl,
pub crc16: u16,
}
impl Hash for CcsdsPacketId {
fn hash<H: core::hash::Hasher>(&self, state: &mut H) {
self.packet_id.hash(state);
self.psc.raw().hash(state);
self.crc16.hash(state);
}
}
pub mod alloc_mod {
use core::time::Duration;
#[cfg(feature = "std")]
use std::time::SystemTimeError;
use spacepackets::time::UnixTime;
use crate::ccsds::scheduler::CcsdsPacketId;
pub struct CcsdsScheduler {
tc_map: alloc::collections::BTreeMap<
UnixTime,
alloc::vec::Vec<(CcsdsPacketId, alloc::vec::Vec<u8>)>,
>,
packet_limit: usize,
pub(crate) current_time: UnixTime,
time_margin: Duration,
enabled: bool,
}
impl CcsdsScheduler {
pub fn new(current_time: UnixTime, packet_limit: usize, time_margin: Duration) -> Self {
Self {
tc_map: alloc::collections::BTreeMap::new(),
packet_limit,
current_time,
time_margin,
enabled: true,
}
}
/// Like [Self::new], but sets the `init_current_time` parameter to the current system time.
#[cfg(feature = "std")]
pub fn new_with_current_init_time(
packet_limit: usize,
time_margin: Duration,
) -> Result<Self, SystemTimeError> {
Ok(Self::new(UnixTime::now()?, packet_limit, time_margin))
}
pub fn num_of_entries(&self) -> usize {
self.tc_map
.values()
.map(|v| v.iter().map(|(_, v)| v.len()).sum::<usize>())
.sum()
}
#[inline]
pub fn enable(&mut self) {
self.enabled = true;
}
#[inline]
pub fn disable(&mut self) {
self.enabled = false;
}
#[inline]
pub fn update_time(&mut self, current_time: UnixTime) {
self.current_time = current_time;
}
#[inline]
pub fn current_time(&self) -> &UnixTime {
&self.current_time
}
// TODO: Implementation
pub fn insert_telecommand(
&mut self,
packet_id: CcsdsPacketId,
packet: alloc::vec::Vec<u8>,
release_time: UnixTime,
) {
}
}
}

View File

@@ -1,9 +1,9 @@
use crate::{
ComponentId,
mode::{ModeAndSubmode, ModeReply, ModeRequest, ModeRequestSender},
mode_tree::{ModeStoreProvider, ModeStoreVec},
queue::{GenericSendError, GenericTargetedMessagingError},
request::{GenericMessage, RequestId},
ComponentId,
};
use core::fmt::Debug;
@@ -270,7 +270,7 @@ impl<UserHook: DevManagerUserHook> DevManagerCommandingHelper<UserHook> {
#[cfg(test)]
mod tests {
use crate::{
mode::{tests::ModeReqSenderMock, UNKNOWN_MODE},
mode::{UNKNOWN_MODE, tests::ModeReqSenderMock},
request::MessageMetadata,
};

View File

@@ -1,6 +1,6 @@
use spacepackets::{CcsdsPacket, SpHeader};
use spacepackets::SpHeader;
use crate::{tmtc::PacketSenderRaw, ComponentId};
use crate::{ComponentId, tmtc::PacketSenderRaw};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SpValidity {
@@ -63,7 +63,7 @@ pub fn parse_buffer_for_ccsds_space_packets<SendError>(
let sp_header = SpHeader::from_be_bytes(&buf[current_idx..]).unwrap().0;
match packet_validator.validate(&sp_header, &buf[current_idx..]) {
SpValidity::Valid => {
let packet_size = sp_header.total_len();
let packet_size = sp_header.packet_len();
if (current_idx + packet_size) <= buf_len {
packet_sender
.send_packet(sender_id, &buf[current_idx..current_idx + packet_size])?;
@@ -76,7 +76,7 @@ pub fn parse_buffer_for_ccsds_space_packets<SendError>(
continue;
}
SpValidity::Skip => {
current_idx += sp_header.total_len();
current_idx += sp_header.packet_len();
}
// We might have lost sync. Try to find the start of a new space packet header.
SpValidity::Invalid => {
@@ -89,18 +89,19 @@ pub fn parse_buffer_for_ccsds_space_packets<SendError>(
#[cfg(test)]
mod tests {
use arbitrary_int::{u11, u14};
use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
CcsdsPacket, PacketId, PacketSequenceCtrl, PacketType, SequenceFlags, SpHeader,
CcsdsPacket, PacketId, PacketSequenceControl, PacketType, SequenceFlags, SpHeader,
ecss::{CreatorConfig, tc::PusTcCreator},
};
use crate::{encoding::tests::TcCacher, ComponentId};
use crate::{ComponentId, encoding::tests::TcCacher};
use super::{parse_buffer_for_ccsds_space_packets, SpValidity, SpacePacketValidator};
use super::{SpValidity, SpacePacketValidator, parse_buffer_for_ccsds_space_packets};
const PARSER_ID: ComponentId = 0x05;
const TEST_APID_0: u16 = 0x02;
const TEST_APID_1: u16 = 0x10;
const TEST_APID_0: u11 = u11::new(0x02);
const TEST_APID_1: u11 = u11::new(0x10);
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
@@ -131,7 +132,7 @@ mod tests {
#[test]
fn test_basic() {
let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], CreatorConfig::default());
let mut buffer: [u8; 32] = [0; 32];
let packet_len = ping_tc
.write_to_bytes(&mut buffer)
@@ -156,8 +157,8 @@ mod tests {
#[test]
fn test_multi_packet() {
let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], CreatorConfig::default());
let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], CreatorConfig::default());
let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer)
@@ -191,9 +192,9 @@ mod tests {
#[test]
fn test_multi_apid() {
let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], CreatorConfig::default());
let sph = SpHeader::new_from_apid(TEST_APID_1);
let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], CreatorConfig::default());
let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer)
@@ -221,10 +222,20 @@ mod tests {
#[test]
fn test_split_packet_multi() {
let ping_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let action_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
let ping_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
17,
1,
&[],
CreatorConfig::default(),
);
let action_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_1),
8,
0,
&[],
CreatorConfig::default(),
);
let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer)
@@ -255,8 +266,13 @@ mod tests {
#[test]
fn test_one_split_packet() {
let ping_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
17,
1,
&[],
CreatorConfig::default(),
);
let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer)
@@ -281,7 +297,7 @@ mod tests {
fn test_smallest_packet() {
let ccsds_header_only = SpHeader::new(
PacketId::new(PacketType::Tc, true, TEST_APID_0),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0),
PacketSequenceControl::new(SequenceFlags::Unsegmented, u14::new(0)),
0,
);
let mut buf: [u8; 7] = [0; 7];

View File

@@ -1,4 +1,4 @@
use crate::{tmtc::PacketSenderRaw, ComponentId};
use crate::{ComponentId, tmtc::PacketSenderRaw};
use cobs::{decode_in_place, encode, max_encoding_length};
/// This function encodes the given packet with COBS and also wraps the encoded packet with
@@ -24,8 +24,8 @@ use cobs::{decode_in_place, encode, max_encoding_length};
/// assert!(encode_packet_with_cobs(&INVERTED_PACKET, &mut encoding_buf, &mut current_idx));
/// assert_eq!(encoding_buf[0], 0);
/// let dec_report = decode_in_place_report(&mut encoding_buf[1..]).expect("decoding failed");
/// assert_eq!(encoding_buf[1 + dec_report.src_used], 0);
/// assert_eq!(dec_report.dst_used, 5);
/// assert_eq!(encoding_buf[1 + dec_report.parsed_size()], 0);
/// assert_eq!(dec_report.frame_size(), 5);
/// assert_eq!(current_idx, 16);
/// ```
pub fn encode_packet_with_cobs(
@@ -104,8 +104,8 @@ pub(crate) mod tests {
use cobs::encode;
use crate::{
encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET},
ComponentId,
encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET, TcCacher, encode_simple_packet},
};
use super::parse_buffer_for_cobs_encoded_packets;

View File

@@ -11,8 +11,8 @@ pub(crate) mod tests {
use alloc::collections::VecDeque;
use crate::{
tmtc::{PacketAsVec, PacketSenderRaw},
ComponentId,
tmtc::{PacketAsVec, PacketSenderRaw},
};
use super::cobs::encode_packet_with_cobs;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,850 @@
//! # Event management and forwarding
//!
//! This is a legacy module. It is recommended to use [super::event_man] instead.
//!
//! It is recommended to read the
//! [sat-rs book chapter](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/events.html)
//! about events first.
//!
//! This module provides components to perform event routing. The most important component for this
//! task is the [EventManager]. It receives all events and then routes them to event subscribers
//! where appropriate.
//!
//! The event manager has a listener table abstracted by the [ListenerMapProvider], which maps
//! listener groups identified by [ListenerKey]s to a [listener ID][ComponentId].
//! It also contains a sender table abstracted by the [SenderMapProvider] which maps these sender
//! IDs to concrete [EventSendProvider]s. A simple approach would be to use one send event provider
//! for each OBSW thread and then subscribe for all interesting events for a particular thread
//! using the send event provider ID.
//!
//! This can be done with the [EventManager] like this:
//!
//! 1. Provide a concrete [EventReceiveProvider] implementation. This abstraction allow to use different
//! message queue backends. A straightforward implementation where dynamic memory allocation is
//! not a big concern would be to use the [std::sync::mpsc::Receiver] handle. The trait is
//! already implemented for this type.
//! 2. To set up event creators, create channel pairs using some message queue implementation.
//! Each event creator gets a (cloned) sender component which allows it to send events to the
//! manager.
//! 3. The event manager receives the receiver component as part of a [EventReceiveProvider]
//! implementation so all events are routed to the manager.
//! 4. Create the [event sender map][SenderMapProvider]s which allow routing events to
//! subscribers. You can now use the subscriber component IDs to subscribe
//! for event groups, for example by using the [EventManager::subscribe_single] method.
//! 5. Add the send provider as well using the [EventManager::add_sender] call so the event
//! manager can route listener groups to a the send provider.
//!
//! Some components like a PUS Event Service or PUS Event Action Service might require all
//! events to package them as telemetry or start actions where applicable.
//! Other components might only be interested in certain events. For example, a thermal system
//! handler might only be interested in temperature events generated by a thermal sensor component.
//!
//! # Examples
//!
//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/pus_events.rs)
//! for a concrete example using multi-threading where events are routed to
//! different threads.
//!
//! The [satrs-example](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example)
//! also contains a full event manager instance and exposes a test event via the PUS test service.
//! The [PUS event](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/pus/event.rs)
//! module and the generic [events module](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/events.rs)
//! show how the event management modules can be integrated into a more complex software.
use crate::events_legacy::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw};
use crate::params::Params;
use crate::queue::GenericSendError;
use core::fmt::Debug;
use core::marker::PhantomData;
use core::slice::Iter;
use crate::ComponentId;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "std")]
pub use std_mod::*;
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub enum ListenerKey {
Single(LargestEventRaw),
Group(LargestGroupIdRaw),
All,
}
#[derive(Debug)]
pub struct EventMessage<Event: GenericEvent, Parameters: Debug = Params> {
sender_id: ComponentId,
event: Event,
params: Option<Parameters>,
}
impl<Event: GenericEvent, Parameters: Debug + Clone> EventMessage<Event, Parameters> {
pub fn new_generic(sender_id: ComponentId, event: Event, params: Option<&Parameters>) -> Self {
Self {
sender_id,
event,
params: params.cloned(),
}
}
pub fn sender_id(&self) -> ComponentId {
self.sender_id
}
pub fn event(&self) -> Event {
self.event
}
pub fn params(&self) -> Option<&Parameters> {
self.params.as_ref()
}
pub fn new(sender_id: ComponentId, event: Event) -> Self {
Self::new_generic(sender_id, event, None)
}
pub fn new_with_params(sender_id: ComponentId, event: Event, params: &Parameters) -> Self {
Self::new_generic(sender_id, event, Some(params))
}
}
pub type EventMessageU32 = EventMessage<EventU32, Params>;
pub type EventMessageU16 = EventMessage<EventU16, Params>;
/// Generic abstraction
pub trait EventSendProvider<Event: GenericEvent, ParamProvider: Debug = Params> {
type Error;
fn target_id(&self) -> ComponentId;
fn send(&self, message: EventMessage<Event, ParamProvider>) -> Result<(), Self::Error>;
}
/// Generic abstraction for an event receiver.
pub trait EventReceiveProvider<Event: GenericEvent, ParamsProvider: Debug = Params> {
type Error;
/// This function has to be provided by any event receiver. A call may or may not return
/// an event and optional auxiliary data.
fn try_recv_event(&self) -> Result<Option<EventMessage<Event, ParamsProvider>>, Self::Error>;
}
pub trait ListenerMapProvider {
#[cfg(feature = "alloc")]
fn get_listeners(&self) -> alloc::vec::Vec<ListenerKey>;
fn contains_listener(&self, key: &ListenerKey) -> bool;
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<'_, ComponentId>>;
fn add_listener(&mut self, key: ListenerKey, listener_id: ComponentId) -> bool;
fn remove_duplicates(&mut self, key: &ListenerKey);
}
pub trait SenderMapProvider<
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent = EventU32,
ParamProvider: Debug = Params,
>
{
fn contains_send_event_provider(&self, target_id: &ComponentId) -> bool;
fn get_send_event_provider(&self, target_id: &ComponentId) -> Option<&EventSender>;
fn add_send_event_provider(&mut self, send_provider: EventSender) -> bool;
}
/// Generic event manager implementation.
///
/// # Generics
///
/// * `EventReceiver`: [EventReceiveProvider] used to receive all events.
/// * `SenderMap`: [SenderMapProvider] which maps channel IDs to send providers.
/// * `ListenerMap`: [ListenerMapProvider] which maps listener keys to channel IDs.
/// * `EventSender`: [EventSendProvider] contained within the sender map which sends the events.
/// * `Event`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32]
/// and [EventU16] are supported.
/// * `ParamProvider`: Auxiliary data which is sent with the event to provide optional context
/// information
pub struct EventManager<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSender, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent = EventU32,
ParamProvider: Debug = Params,
> {
event_receiver: EventReceiver,
sender_map: SenderMap,
listener_map: ListenerMap,
phantom: core::marker::PhantomData<(EventSender, Event, ParamProvider)>,
}
#[derive(Debug)]
pub enum EventRoutingResult<Event: GenericEvent, ParamProvider: Debug> {
/// No event was received
Empty,
/// An event was received and routed to listeners.
Handled {
num_recipients: u32,
event_msg: EventMessage<Event, ParamProvider>,
},
}
#[derive(Debug)]
pub enum EventRoutingError {
Send(GenericSendError),
NoSendersForKey(ListenerKey),
NoSenderForId(ComponentId),
}
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSender, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent + Copy,
ParamProvider: Debug,
> EventManager<EventReceiver, SenderMap, ListenerMap, EventSender, Event, ParamProvider>
{
pub fn remove_duplicates(&mut self, key: &ListenerKey) {
self.listener_map.remove_duplicates(key)
}
/// Subscribe for a unique event.
pub fn subscribe_single(&mut self, event: &Event, sender_id: ComponentId) {
self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id);
}
/// Subscribe for an event group.
pub fn subscribe_group(&mut self, group_id: LargestGroupIdRaw, sender_id: ComponentId) {
self.update_listeners(ListenerKey::Group(group_id), sender_id);
}
/// Subscribe for all events received by the manager.
///
/// For example, this can be useful for a handler component which sends every event as
/// a telemetry packet.
pub fn subscribe_all(&mut self, sender_id: ComponentId) {
self.update_listeners(ListenerKey::All, sender_id);
}
}
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSenderMap, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSenderMap: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent + Copy,
ParamProvider: Debug,
> EventManager<EventReceiver, SenderMap, ListenerMap, EventSenderMap, Event, ParamProvider>
{
pub fn new_with_custom_maps(
event_receiver: EventReceiver,
sender_map: SenderMap,
listener_map: ListenerMap,
) -> Self {
EventManager {
listener_map,
sender_map,
event_receiver,
phantom: PhantomData,
}
}
/// Add a new sender component which can be used to send events to subscribers.
pub fn add_sender(&mut self, send_provider: EventSenderMap) {
if !self
.sender_map
.contains_send_event_provider(&send_provider.target_id())
{
self.sender_map.add_send_event_provider(send_provider);
}
}
/// Generic function to update the event subscribers.
fn update_listeners(&mut self, key: ListenerKey, sender_id: ComponentId) {
self.listener_map.add_listener(key, sender_id);
}
}
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSenderMap, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSenderMap: EventSendProvider<Event, ParamProvider, Error = GenericSendError>,
Event: GenericEvent + Copy,
ParamProvider: Clone + Debug,
> EventManager<EventReceiver, SenderMap, ListenerMap, EventSenderMap, Event, ParamProvider>
{
/// This function will use the cached event receiver and try to receive one event.
/// If an event was received, it will try to route that event to all subscribed event listeners.
/// If this works without any issues, the [EventRoutingResult] will contain context information
/// about the routed event.
///
/// If an error occurs during the routing, the error handler will be called. The error handler
/// should take a reference to the event message as the first argument, and the routing error
/// as the second argument.
pub fn try_event_handling<E: FnMut(&EventMessage<Event, ParamProvider>, EventRoutingError)>(
&self,
mut error_handler: E,
) -> EventRoutingResult<Event, ParamProvider> {
let mut num_recipients = 0;
let mut send_handler =
|key: &ListenerKey, event_msg: &EventMessage<Event, ParamProvider>| {
if self.listener_map.contains_listener(key) {
if let Some(ids) = self.listener_map.get_listener_ids(key) {
for id in ids {
if let Some(sender) = self.sender_map.get_send_event_provider(id) {
if let Err(e) = sender.send(EventMessage::new_generic(
event_msg.sender_id,
event_msg.event,
event_msg.params.as_ref(),
)) {
error_handler(event_msg, EventRoutingError::Send(e));
} else {
num_recipients += 1;
}
} else {
error_handler(event_msg, EventRoutingError::NoSenderForId(*id));
}
}
} else {
error_handler(event_msg, EventRoutingError::NoSendersForKey(*key));
}
}
};
if let Ok(Some(event_msg)) = self.event_receiver.try_recv_event() {
let single_key = ListenerKey::Single(event_msg.event.raw_as_largest_type());
send_handler(&single_key, &event_msg);
let group_key = ListenerKey::Group(event_msg.event.group_id_as_largest_type());
send_handler(&group_key, &event_msg);
send_handler(&ListenerKey::All, &event_msg);
return EventRoutingResult::Handled {
num_recipients,
event_msg,
};
}
EventRoutingResult::Empty
}
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use alloc::vec::Vec;
use hashbrown::HashMap;
use super::*;
/// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap]
/// and the [DefaultListenerMap]. It uses regular mpsc channels as the message queue backend.
pub type EventManagerWithMpsc<Event = EventU32, ParamProvider = Params> = EventManager<
EventU32ReceiverMpsc<ParamProvider>,
DefaultSenderMap<EventSenderMpsc<Event>, Event, ParamProvider>,
DefaultListenerMap,
EventSenderMpsc<Event>,
>;
/// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap]
/// and the [DefaultListenerMap]. It uses
/// [bounded mpsc senders](https://doc.rust-lang.org/std/sync/mpsc/struct.SyncSender.html) as the
/// message queue backend.
pub type EventManagerWithBoundedMpsc<Event = EventU32, ParamProvider = Params> = EventManager<
EventU32ReceiverMpsc<ParamProvider>,
DefaultSenderMap<EventSenderMpscBounded<Event>, Event, ParamProvider>,
DefaultListenerMap,
EventSenderMpscBounded<Event>,
>;
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent + Copy,
ParamProvider: 'static + Debug,
>
EventManager<
EventReceiver,
DefaultSenderMap<EventSender, Event, ParamProvider>,
DefaultListenerMap,
EventSender,
Event,
ParamProvider,
>
{
/// Create an event manager where the sender table will be the [DefaultSenderMap]
/// and the listener table will be the [DefaultListenerMap].
pub fn new(event_receiver: EventReceiver) -> Self {
Self {
listener_map: DefaultListenerMap::default(),
sender_map: DefaultSenderMap::default(),
event_receiver,
phantom: PhantomData,
}
}
}
/// Default listener map.
///
/// Simple implementation which uses a [HashMap] and a [Vec] internally.
#[derive(Default)]
pub struct DefaultListenerMap {
listeners: HashMap<ListenerKey, Vec<ComponentId>>,
}
impl ListenerMapProvider for DefaultListenerMap {
fn get_listeners(&self) -> Vec<ListenerKey> {
let mut key_list = Vec::new();
for key in self.listeners.keys() {
key_list.push(*key);
}
key_list
}
fn contains_listener(&self, key: &ListenerKey) -> bool {
self.listeners.contains_key(key)
}
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<'_, ComponentId>> {
self.listeners.get(key).map(|vec| vec.iter())
}
fn add_listener(&mut self, key: ListenerKey, sender_id: ComponentId) -> bool {
if let Some(existing_list) = self.listeners.get_mut(&key) {
existing_list.push(sender_id);
} else {
let new_list = alloc::vec![sender_id];
self.listeners.insert(key, new_list);
}
true
}
fn remove_duplicates(&mut self, key: &ListenerKey) {
if let Some(list) = self.listeners.get_mut(key) {
list.sort_unstable();
list.dedup();
}
}
}
/// Default sender map.
///
/// Simple implementation which uses a [HashMap] internally.
pub struct DefaultSenderMap<
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent = EventU32,
ParamProvider: Debug = Params,
> {
senders: HashMap<ComponentId, EventSender>,
phantom: PhantomData<(Event, ParamProvider)>,
}
impl<
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent,
ParamProvider: Debug,
> Default for DefaultSenderMap<EventSender, Event, ParamProvider>
{
fn default() -> Self {
Self {
senders: Default::default(),
phantom: Default::default(),
}
}
}
impl<
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent,
ParamProvider: Debug,
> SenderMapProvider<EventSender, Event, ParamProvider>
for DefaultSenderMap<EventSender, Event, ParamProvider>
{
fn contains_send_event_provider(&self, id: &ComponentId) -> bool {
self.senders.contains_key(id)
}
fn get_send_event_provider(&self, id: &ComponentId) -> Option<&EventSender> {
self.senders
.get(id)
.filter(|sender| sender.target_id() == *id)
}
fn add_send_event_provider(&mut self, send_provider: EventSender) -> bool {
let id = send_provider.target_id();
if self.senders.contains_key(&id) {
return false;
}
self.senders.insert(id, send_provider).is_none()
}
}
}
#[cfg(feature = "std")]
pub mod std_mod {
use crate::queue::GenericReceiveError;
use super::*;
use std::sync::mpsc;
impl<Event: GenericEvent + Send, ParamProvider: Debug>
EventReceiveProvider<Event, ParamProvider>
for mpsc::Receiver<EventMessage<Event, ParamProvider>>
{
type Error = GenericReceiveError;
fn try_recv_event(
&self,
) -> Result<Option<EventMessage<Event, ParamProvider>>, Self::Error> {
match self.try_recv() {
Ok(msg) => Ok(Some(msg)),
Err(e) => match e {
mpsc::TryRecvError::Empty => Ok(None),
mpsc::TryRecvError::Disconnected => {
Err(GenericReceiveError::TxDisconnected(None))
}
},
}
}
}
pub type EventU32ReceiverMpsc<ParamProvider = Params> =
mpsc::Receiver<EventMessage<EventU32, ParamProvider>>;
pub type EventU16ReceiverMpsc<ParamProvider = Params> =
mpsc::Receiver<EventMessage<EventU16, ParamProvider>>;
/// Generic event sender which uses a regular [mpsc::Sender] as the messaging backend to
/// send events.
#[derive(Clone)]
pub struct EventSenderMpsc<Event: GenericEvent + Send> {
target_id: ComponentId,
sender: mpsc::Sender<EventMessage<Event>>,
}
impl<Event: GenericEvent + Send> EventSenderMpsc<Event> {
pub fn new(target_id: ComponentId, sender: mpsc::Sender<EventMessage<Event>>) -> Self {
Self { target_id, sender }
}
}
impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpsc<Event> {
type Error = GenericSendError;
fn target_id(&self) -> ComponentId {
self.target_id
}
fn send(&self, event_msg: EventMessage<Event>) -> Result<(), GenericSendError> {
self.sender
.send(event_msg)
.map_err(|_| GenericSendError::RxDisconnected)
}
}
/// Generic event sender which uses the [mpsc::SyncSender] as the messaging backend to send
/// events. This has the advantage that the channel is bounded and thus more deterministic.
#[derive(Clone)]
pub struct EventSenderMpscBounded<Event: GenericEvent + Send> {
target_id: ComponentId,
sender: mpsc::SyncSender<EventMessage<Event>>,
capacity: usize,
}
impl<Event: GenericEvent + Send> EventSenderMpscBounded<Event> {
pub fn new(
target_id: ComponentId,
sender: mpsc::SyncSender<EventMessage<Event>>,
capacity: usize,
) -> Self {
Self {
target_id,
sender,
capacity,
}
}
}
impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpscBounded<Event> {
type Error = GenericSendError;
fn target_id(&self) -> ComponentId {
self.target_id
}
fn send(&self, event_msg: EventMessage<Event>) -> Result<(), Self::Error> {
if let Err(e) = self.sender.try_send(event_msg) {
return match e {
mpsc::TrySendError::Full(_) => {
Err(GenericSendError::QueueFull(Some(self.capacity as u32)))
}
mpsc::TrySendError::Disconnected(_) => Err(GenericSendError::RxDisconnected),
};
}
Ok(())
}
}
pub type EventU32SenderMpsc = EventSenderMpsc<EventU32>;
pub type EventU16SenderMpsc = EventSenderMpsc<EventU16>;
pub type EventU32SenderMpscBounded = EventSenderMpscBounded<EventU32>;
pub type EventU16SenderMpscBounded = EventSenderMpscBounded<EventU16>;
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event_man_legacy::EventManager;
use crate::events_legacy::{EventU32, GenericEvent, Severity};
use crate::params::{ParamsHeapless, ParamsRaw};
use crate::pus::test_util::{TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1};
use std::format;
use std::sync::mpsc::{self};
const TEST_EVENT: EventU32 = EventU32::new(Severity::Info, 0, 5);
fn check_next_event(
expected: EventU32,
receiver: &mpsc::Receiver<EventMessageU32>,
) -> Option<Params> {
if let Ok(event_msg) = receiver.try_recv() {
assert_eq!(event_msg.event, expected);
return event_msg.params;
}
None
}
fn check_handled_event(
res: EventRoutingResult<EventU32, Params>,
expected: EventU32,
expected_num_sent: u32,
expected_sender_id: ComponentId,
) {
assert!(matches!(res, EventRoutingResult::Handled { .. }));
if let EventRoutingResult::Handled {
num_recipients,
event_msg,
} = res
{
assert_eq!(event_msg.event, expected);
assert_eq!(event_msg.sender_id, expected_sender_id);
assert_eq!(num_recipients, expected_num_sent);
}
}
fn generic_event_man() -> (mpsc::Sender<EventMessageU32>, EventManagerWithMpsc) {
let (event_sender, event_receiver) = mpsc::channel();
(event_sender, EventManager::new(event_receiver))
}
#[test]
fn test_basic() {
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let event_grp_1_0 = EventU32::new(Severity::High, 1, 0);
let (single_event_sender, single_event_receiver) = mpsc::channel();
let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.target_id());
event_man.add_sender(single_event_listener);
let (group_event_sender_0, group_event_receiver_0) = mpsc::channel();
let group_event_listener = EventU32SenderMpsc::new(1, group_event_sender_0);
event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.target_id());
event_man.add_sender(group_event_listener);
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
// Test event with one listener
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0))
.expect("Sending single error failed");
let res = event_man.try_event_handling(&error_handler);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
check_next_event(event_grp_0, &single_event_receiver);
// Test event which is sent to all group listeners
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0))
.expect("Sending group error failed");
let res = event_man.try_event_handling(&error_handler);
check_handled_event(res, event_grp_1_0, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_grp_1_0, &group_event_receiver_0);
}
#[test]
fn test_with_basic_params() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let (single_event_sender, single_event_receiver) = mpsc::channel();
let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.target_id());
event_man.add_sender(single_event_listener);
event_sender
.send(EventMessage::new_with_params(
TEST_COMPONENT_ID_0.id(),
event_grp_0,
&Params::Heapless((2_u32, 3_u32).into()),
))
.expect("Sending group error failed");
let res = event_man.try_event_handling(&error_handler);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
let aux = check_next_event(event_grp_0, &single_event_receiver);
assert!(aux.is_some());
let aux = aux.unwrap();
if let Params::Heapless(ParamsHeapless::Raw(ParamsRaw::U32Pair(pair))) = aux {
assert_eq!(pair.0, 2);
assert_eq!(pair.1, 3);
} else {
panic!("{}", format!("Unexpected auxiliary value type {:?}", aux));
}
}
/// Test listening for multiple groups
#[test]
fn test_multi_group() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man();
let res = event_man.try_event_handling(error_handler);
assert!(matches!(res, EventRoutingResult::Empty));
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let event_grp_1_0 = EventU32::new(Severity::High, 1, 0);
let (event_grp_0_sender, event_grp_0_receiver) = mpsc::channel();
let event_grp_0_and_1_listener = EventU32SenderMpsc::new(0, event_grp_0_sender);
event_man.subscribe_group(
event_grp_0.group_id(),
event_grp_0_and_1_listener.target_id(),
);
event_man.subscribe_group(
event_grp_1_0.group_id(),
event_grp_0_and_1_listener.target_id(),
);
event_man.add_sender(event_grp_0_and_1_listener);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0))
.expect("Sending Event Group 0 failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0))
.expect("Sendign Event Group 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_grp_1_0, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_grp_0, &event_grp_0_receiver);
check_next_event(event_grp_1_0, &event_grp_0_receiver);
}
/// Test listening to the same event from multiple listeners. Also test listening
/// to both group and single events from one listener
#[test]
fn test_listening_to_same_event_and_multi_type() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man();
let event_0 = EventU32::new(Severity::Info, 0, 5);
let event_1 = EventU32::new(Severity::High, 1, 0);
let (event_0_tx_0, event_0_rx_0) = mpsc::channel();
let (event_0_tx_1, event_0_rx_1) = mpsc::channel();
let event_listener_0 = EventU32SenderMpsc::new(0, event_0_tx_0);
let event_listener_1 = EventU32SenderMpsc::new(1, event_0_tx_1);
let event_listener_0_sender_id = event_listener_0.target_id();
event_man.subscribe_single(&event_0, event_listener_0_sender_id);
event_man.add_sender(event_listener_0);
let event_listener_1_sender_id = event_listener_1.target_id();
event_man.subscribe_single(&event_0, event_listener_1_sender_id);
event_man.add_sender(event_listener_1);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering Event 0 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 2, TEST_COMPONENT_ID_0.id());
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_0, &event_0_rx_1);
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering Event 0 failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1))
.expect("Triggering Event 1 failed");
// 3 Events messages will be sent now
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 2, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_1.id());
// Both the single event and the group event should arrive now
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_1, &event_0_rx_0);
// Do double insertion and then remove duplicates
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
event_man.remove_duplicates(&ListenerKey::Group(event_1.group_id()));
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_1))
.expect("Triggering Event 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_0.id());
}
#[test]
fn test_all_events_listener() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, event_receiver) = mpsc::channel();
let mut event_man = EventManagerWithMpsc::new(event_receiver);
let event_0 = EventU32::new(Severity::Info, 0, 5);
let event_1 = EventU32::new(Severity::High, 1, 0);
let (event_0_tx_0, all_events_rx) = mpsc::channel();
let all_events_listener = EventU32SenderMpsc::new(0, event_0_tx_0);
event_man.subscribe_all(all_events_listener.target_id());
event_man.add_sender(all_events_listener);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering event 0 failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1))
.expect("Triggering event 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 1, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_0, &all_events_rx);
check_next_event(event_1, &all_events_rx);
}
#[test]
fn test_bounded_event_sender_queue_full() {
let (event_sender, _event_receiver) = mpsc::sync_channel(3);
let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed");
let error = event_sender.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT));
if let Err(e) = error {
assert!(matches!(e, GenericSendError::QueueFull(Some(3))));
} else {
panic!("unexpected error {error:?}");
}
}
#[test]
fn test_bounded_event_sender_rx_dropped() {
let (event_sender, event_receiver) = mpsc::sync_channel(3);
let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3);
drop(event_receiver);
if let Err(e) = event_sender.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT)) {
assert!(matches!(e, GenericSendError::RxDisconnected));
} else {
panic!("Expected error");
}
}
}

View File

@@ -1,8 +1,13 @@
//! Event support module
//! # Event support module
//!
//! This module includes the basic event structs [EventU32] and [EventU16] and versions with the
//! ECSS severity levels as a type parameter. These structs are simple abstractions on top of the
//! [u32] and [u16] types where the raw value is the unique identifier for a particular event.
//! The user can define events as custom structs or enumerations.
//! The event structures defined here are type erased and rely on some properties which
//! should be provided by the user through the [Event] and [serde::Serialize] trait.
//!
//! This in turn allows to use higher-level abstractions like the event manger.
//!
//! This module includes the basic type erased event structs [EventErasedAlloc] and
//! [EventErasedHeapless].
//! The abstraction also allows to group related events using a group ID, and the severity
//! of an event is encoded inside the raw value itself with four possible [Severity] levels:
//!
@@ -10,29 +15,11 @@
//! - LOW
//! - MEDIUM
//! - HIGH
//!
//! All event structs implement the [EcssEnumeration] trait and can be created as constants.
//! This allows to easily create a static list of constant events which can then be used to generate
//! event telemetry using the PUS event manager modules.
//!
//! # Examples
//!
//! ```
//! use satrs::events::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
//!
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(1, 0);
//! const MSG_FAILED: EventU32 = EventU32::new(Severity::Low, 1, 1);
//!
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(2, 0);
//!
//! let small_event = EventU16::new(Severity::Info, 3, 0);
//! ```
use core::fmt::Debug;
use core::hash::Hash;
use core::marker::PhantomData;
use delegate::delegate;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::util::{ToBeBytes, UnsignedEnum};
use arbitrary_int::{prelude::*, u14};
#[cfg(feature = "heapless")]
use spacepackets::ByteConversionError;
/// Using a type definition allows to change this to u64 in the future more easily
@@ -40,12 +27,14 @@ pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily
pub type LargestGroupIdRaw = u16;
pub const MAX_GROUP_ID_U32_EVENT: u16 = 2_u16.pow(14) - 1;
pub const MAX_GROUP_ID_U16_EVENT: u16 = 2_u16.pow(6) - 1;
pub const MAX_GROUP_ID_U32_EVENT: u16 = u14::MAX.value();
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[derive(
Copy, Clone, PartialEq, Eq, Debug, Hash, num_enum::TryFromPrimitive, num_enum::IntoPrimitive,
)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Severity {
Info = 0,
Low = 1,
@@ -57,801 +46,203 @@ pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
const SEVERITY: Severity;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityInfo {}
impl HasSeverity for SeverityInfo {
const SEVERITY: Severity = Severity::Info;
pub trait Event: Clone {
fn id(&self) -> EventId;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityLow {}
impl HasSeverity for SeverityLow {
const SEVERITY: Severity = Severity::Low;
}
pub type GroupId = u14;
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityMedium {}
impl HasSeverity for SeverityMedium {
const SEVERITY: Severity = Severity::Medium;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityHigh {}
impl HasSeverity for SeverityHigh {
const SEVERITY: Severity = Severity::High;
}
pub trait GenericEvent: EcssEnumeration + Copy + Clone {
type Raw;
type GroupId;
type UniqueId;
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
}
impl TryFrom<u8> for Severity {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == Severity::Info as u8 => Ok(Severity::Info),
x if x == Severity::Low as u8 => Ok(Severity::Low),
x if x == Severity::Medium as u8 => Ok(Severity::Medium),
x if x == Severity::High as u8 => Ok(Severity::High),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
/// Unique event identifier.
///
/// Consists of a group ID, a unique ID and the severity.
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
struct EventBase<Raw, GroupId, UniqueId> {
severity: Severity,
pub struct EventId {
group_id: GroupId,
unique_id: UniqueId,
phantom: PhantomData<Raw>,
unique_id: u16,
severity: Severity,
}
impl<Raw: ToBeBytes, GroupId, UniqueId> EventBase<Raw, GroupId, UniqueId> {
fn write_to_bytes(
&self,
raw: Raw,
buf: &mut [u8],
width: usize,
) -> Result<usize, ByteConversionError> {
if buf.len() < width {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: width,
});
impl EventId {
pub fn new(severity: Severity, group_id: u14, unique_id: u16) -> Self {
Self {
severity,
group_id,
unique_id,
}
buf.copy_from_slice(raw.to_be_bytes().as_ref());
Ok(raw.written_len())
}
}
impl EventBase<u32, u16, u16> {
#[inline]
fn raw(&self) -> u32 {
((self.severity as u32) << 30) | ((self.group_id as u32) << 16) | self.unique_id as u32
}
}
impl EventBase<u16, u8, u8> {
#[inline]
fn raw(&self) -> u16 {
((self.severity as u16) << 14) | ((self.group_id as u16) << 8) | self.unique_id as u16
}
}
impl<RAW, GID, UID> EventBase<RAW, GID, UID> {
#[inline]
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, GID> EventBase<RAW, GID, u16> {
#[inline]
pub fn unique_id(&self) -> u16 {
self.unique_id
}
}
impl<RAW, GID> EventBase<RAW, GID, u8> {
#[inline]
pub fn unique_id(&self) -> u8 {
self.unique_id
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, UID> EventBase<RAW, u16, UID> {
#[inline]
pub fn group_id(&self) -> u16 {
pub fn group_id(&self) -> u14 {
self.group_id
}
}
impl<RAW, UID> EventBase<RAW, u8, UID> {
#[inline]
pub fn group_id(&self) -> u8 {
self.group_id
pub fn raw(&self) -> u32 {
((self.severity as u32) << 30)
| ((self.group_id.as_u16() as u32) << 16)
| (self.unique_id as u32)
}
}
macro_rules! event_provider_impl {
() => {
#[inline]
fn raw(&self) -> Self::Raw {
self.base.raw()
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
#[inline]
fn severity(&self) -> Severity {
self.base.severity()
}
#[inline]
fn group_id(&self) -> Self::GroupId {
self.base.group_id()
}
#[inline]
fn unique_id(&self) -> Self::UniqueId {
self.base.unique_id()
}
};
}
macro_rules! impl_event_provider {
($BaseIdent: ident, $TypedIdent: ident, $raw: ty, $gid: ty, $uid: ty) => {
impl GenericEvent for $BaseIdent {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
event_provider_impl!();
fn raw_as_largest_type(&self) -> LargestEventRaw {
self.raw().into()
}
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id().into()
}
}
impl<SEVERITY: HasSeverity> GenericEvent for $TypedIdent<SEVERITY> {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
delegate!(to self.event {
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
});
}
}
}
macro_rules! try_from_impls {
($SevIdent: ident, $severity: path, $raw: ty, $TypedSevIdent: ident) => {
impl TryFrom<$raw> for $TypedSevIdent<$SevIdent> {
type Error = Severity;
fn try_from(raw: $raw) -> Result<Self, Self::Error> {
Self::try_from_generic($severity, raw)
}
}
};
}
macro_rules! const_from_fn {
($from_fn_name: ident, $TypedIdent: ident, $SevIdent: ident) => {
pub const fn $from_fn_name(event: $TypedIdent<$SevIdent>) -> Self {
Self {
base: event.event.base,
}
}
};
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct EventU32 {
base: EventBase<u32, u16, u16>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU32TypedSev<SEVERITY> {
event: EventU32,
phantom: PhantomData<SEVERITY>,
}
impl<SEVERITY: HasSeverity> From<EventU32TypedSev<SEVERITY>> for EventU32 {
fn from(e: EventU32TypedSev<SEVERITY>) -> Self {
Self { base: e.event.base }
}
}
impl<Severity: HasSeverity> AsRef<EventU32> for EventU32TypedSev<Severity> {
fn as_ref(&self) -> &EventU32 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU32> for EventU32TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU32 {
&mut self.event
}
}
impl_event_provider!(EventU32, EventU32TypedSev, u32, u16, u16);
impl EventU32 {
/// Generate an event. The raw representation of an event has 32 bits.
/// If the passed group ID is invalid (too large), None wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > MAX_GROUP_ID_U32_EVENT {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
})
}
/// This constructor will panic if the passed group is is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > MAX_GROUP_ID_U32_EVENT {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
pub fn from_be_bytes(bytes: [u8; 4]) -> Self {
Self::from(u32::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
}
impl From<u32> for EventU32 {
impl From<u32> for EventId {
fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
let group_id = ((raw >> 16) & 0x3FFF) as u16;
let group_id = u14::new(((raw >> 16) & 0x3FFF) as u16);
let unique_id = (raw & 0xFFFF) as u16;
// Sanitized input, should never fail
Self::new(severity, group_id, unique_id)
}
}
impl UnsignedEnum for EventU32 {
fn size(&self) -> usize {
core::mem::size_of::<u32>()
/// Event which was type erased and serialized into a [alloc::vec::Vec].
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg(feature = "alloc")]
pub struct EventErasedAlloc {
id: EventId,
event_raw: alloc::vec::Vec<u8>,
}
#[cfg(feature = "alloc")]
impl EventErasedAlloc {
#[cfg(feature = "serde")]
/// Creates a new event by serializing the given event using [postcard].
pub fn new(event: &(impl serde::Serialize + Event)) -> Self {
Self {
id: event.id(),
event_raw: postcard::to_allocvec(event).unwrap(),
}
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
pub fn new_with_raw_event(id: EventId, event_raw: &[u8]) -> Self {
Self {
id,
event_raw: event_raw.to_vec(),
}
}
fn value(&self) -> u64 {
self.raw().into()
#[inline]
pub fn raw(&self) -> &[u8] {
&self.event_raw
}
}
impl EcssEnumeration for EventU32 {
fn pfc(&self) -> u8 {
u32::BITS as u8
#[cfg(feature = "serde")]
#[cfg(feature = "alloc")]
impl<T: serde::Serialize + Event> From<T> for EventErasedAlloc {
fn from(event: T) -> Self {
Self::new(&event)
}
}
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
#[cfg(feature = "alloc")]
impl Event for EventErasedAlloc {
fn id(&self) -> EventId {
self.id
}
}
/// Event which was type erased and serialized into a [heapless::vec::Vec].
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg(feature = "heapless")]
pub struct EventErasedHeapless<const N: usize> {
id: EventId,
event_raw: heapless::vec::Vec<u8, N>,
}
#[cfg(feature = "heapless")]
impl<const N: usize> Event for EventErasedHeapless<N> {
fn id(&self) -> EventId {
self.id
}
}
#[cfg(feature = "heapless")]
impl<const N: usize> EventErasedHeapless<N> {
#[cfg(feature = "serde")]
/// Creates a new event by serializing the given event using [postcard].
pub fn new(event: &(impl serde::Serialize + Event)) -> Result<Self, ByteConversionError> {
let ser_size = postcard::experimental::serialized_size(event).unwrap();
if ser_size > N {
return Err(ByteConversionError::ToSliceTooSmall {
found: N,
expected: ser_size,
});
}
let mut vec = heapless::Vec::<u8, N>::new();
vec.resize(N, 0).unwrap();
postcard::to_slice(event, vec.as_mut_slice()).unwrap();
Ok(Self {
id: event.id(),
event_raw: vec,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
pub fn new_with_raw_event(id: EventId, event_raw: heapless::Vec<u8, N>) -> Self {
Self { id, event_raw }
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
try_from_impls!(SeverityInfo, Severity::Info, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u32, EventU32TypedSev);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EventU16 {
base: EventBase<u16, u8, u8>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU16TypedSev<SEVERITY> {
event: EventU16,
phantom: PhantomData<SEVERITY>,
}
impl<Severity: HasSeverity> AsRef<EventU16> for EventU16TypedSev<Severity> {
fn as_ref(&self) -> &EventU16 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU16> for EventU16TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU16 {
&mut self.event
}
}
impl EventU16 {
/// Generate a small event. The raw representation of a small event has 16 bits.
/// If the passed group ID is invalid (too large), [None] wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > (2u8.pow(6) - 1) {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: Default::default(),
},
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > (2u8.pow(6) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
}
impl From<u16> for EventU16 {
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::new(severity, group_id, unique_id)
}
}
impl UnsignedEnum for EventU16 {
fn size(&self) -> usize {
core::mem::size_of::<u16>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
}
fn value(&self) -> u64 {
self.raw().into()
}
}
impl EcssEnumeration for EventU16 {
#[inline]
fn pfc(&self) -> u8 {
u16::BITS as u8
}
}
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(((raw >> 8) & 0x3F) as u8, (raw & 0xFF) as u8))
}
}
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
try_from_impls!(SeverityInfo, Severity::Info, u16, EventU16TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u16, EventU16TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u16, EventU16TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u16, EventU16TypedSev);
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU32) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU32TypedSev<Severity>> for EventU32 {
#[inline]
fn eq(&self, other: &EventU32TypedSev<Severity>) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16> for EventU16TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU16) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16TypedSev<Severity>> for EventU16 {
#[inline]
fn eq(&self, other: &EventU16TypedSev<Severity>) -> bool {
self.raw() == other.raw()
pub fn raw(&self) -> &[u8] {
&self.event_raw
}
}
#[cfg(test)]
mod tests {
use super::EventU32TypedSev;
use super::*;
use spacepackets::ByteConversionError;
use std::mem::size_of;
fn assert_size<T>(_: T, val: usize) {
assert_eq!(size_of::<T>(), val);
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TestEvent {
Info,
ErrorOtherGroup,
}
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::new(0, 0);
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> = EventU16TypedSev::new(0x3F, 0xff);
/// This working is a test in itself.
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
#[test]
fn test_normal_from_raw_conversion() {
let conv_from_raw = EventU32TypedSev::<SeverityInfo>::try_from(INFO_EVENT.raw())
.expect("Creating typed EventU32 failed");
assert_eq!(conv_from_raw, INFO_EVENT);
}
#[test]
fn test_small_from_raw_conversion() {
let conv_from_raw = EventU16TypedSev::<SeverityInfo>::try_from(INFO_EVENT_SMALL.raw())
.expect("Creating typed EventU16 failed");
assert_eq!(conv_from_raw, INFO_EVENT_SMALL);
}
#[test]
fn verify_normal_size() {
assert_size(INFO_EVENT.raw(), 4)
}
#[test]
fn verify_small_size() {
assert_size(INFO_EVENT_SMALL.raw(), 2)
impl Event for TestEvent {
fn id(&self) -> EventId {
match self {
TestEvent::Info => EventId::new(Severity::Info, u14::new(0), 0),
TestEvent::ErrorOtherGroup => EventId::new(Severity::High, u14::new(1), 1),
}
}
}
#[test]
fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::Info);
assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw();
assert_eq!(TestEvent::Info.id().severity(), Severity::Info);
assert_eq!(TestEvent::Info.id().unique_id(), 0);
assert_eq!(TestEvent::Info.id().group_id().value(), 0);
assert_eq!(TestEvent::ErrorOtherGroup.id().group_id().value(), 1);
assert_eq!(TestEvent::ErrorOtherGroup.id().unique_id(), 1);
let raw_event = TestEvent::Info.id().raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn test_small_event_getters() {
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::Info);
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw();
assert_eq!(raw_event, 0x00000000);
#[cfg(feature = "serde")]
fn test_basic_erased_alloc_event() {
let event = EventErasedAlloc::new(&TestEvent::Info);
let test_event: TestEvent = postcard::from_bytes(event.raw()).unwrap();
assert_eq!(test_event, TestEvent::Info);
}
#[test]
fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw();
assert_eq!(raw_event, 0xFFFFFFFF);
}
#[test]
fn all_ones_event_small() {
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
assert_eq!(raw_event, 0xFFFF);
}
#[test]
fn invalid_group_id_normal() {
assert!(EventU32TypedSev::<SeverityMedium>::new_checked(2_u16.pow(14), 0).is_none());
}
#[test]
fn invalid_group_id_small() {
assert!(EventU16TypedSev::<SeverityMedium>::new_checked(2_u8.pow(6), 0).is_none());
}
#[test]
fn regular_new() {
assert_eq!(
EventU32TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT
);
}
#[test]
fn small_new() {
assert_eq!(
EventU16TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT_SMALL
);
}
#[test]
fn as_largest_type() {
let event_raw = HIGH_SEV_EVENT.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFFFFFF);
}
#[test]
fn as_largest_type_for_small_event() {
let event_raw = HIGH_SEV_EVENT_SMALL.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFF);
}
#[test]
fn as_largest_group_id() {
let group_id = HIGH_SEV_EVENT.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3FFF);
}
#[test]
fn as_largest_group_id_small_event() {
let group_id = HIGH_SEV_EVENT_SMALL.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3F);
}
#[test]
fn write_to_buf() {
let mut buf: [u8; 4] = [0; 4];
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF);
let event_read_back = EventU32::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT);
}
#[test]
fn write_to_buf_small() {
let mut buf: [u8; 2] = [0; 2];
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF);
let event_read_back = EventU16::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT_SMALL);
}
#[test]
fn write_to_buf_insufficient_buf() {
let mut buf: [u8; 3] = [0; 3];
let err = HIGH_SEV_EVENT.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
assert_eq!(expected, 4);
assert_eq!(found, 3);
}
}
#[test]
fn write_to_buf_small_insufficient_buf() {
let mut buf: [u8; 1] = [0; 1];
let err = HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
assert_eq!(expected, 2);
assert_eq!(found, 1);
}
}
#[test]
fn severity_from_invalid_raw_val() {
let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::High as u8 + 1;
assert!(Severity::try_from(invalid).is_err());
}
#[test]
fn reduction() {
let event = EventU32TypedSev::<SeverityInfo>::new(1, 1);
let raw = event.raw();
let reduced: EventU32 = event.into();
assert_eq!(reduced.group_id(), 1);
assert_eq!(reduced.unique_id(), 1);
assert_eq!(raw, reduced.raw());
}
#[test]
fn const_reducation() {
assert_eq!(INFO_REDUCED.raw(), INFO_EVENT.raw());
#[cfg(all(feature = "serde", feature = "alloc"))]
fn test_basic_erased_heapless_event() {
let event = EventErasedHeapless::<8>::new(&TestEvent::Info).unwrap();
let test_event: TestEvent = postcard::from_bytes(event.raw()).unwrap();
assert_eq!(test_event, TestEvent::Info);
}
}

859
satrs/src/events_legacy.rs Normal file
View File

@@ -0,0 +1,859 @@
//! # Event support module
//!
//! This is a legacy module. It is recommended to use [super::events] instead.
//!
//! This module includes the basic event structs [EventU32] and [EventU16] and versions with the
//! ECSS severity levels as a type parameter. These structs are simple abstractions on top of the
//! [u32] and [u16] types where the raw value is the unique identifier for a particular event.
//! The abstraction also allows to group related events using a group ID, and the severity
//! of an event is encoded inside the raw value itself with four possible [Severity] levels:
//!
//! - INFO
//! - LOW
//! - MEDIUM
//! - HIGH
//!
//! All event structs implement the [EcssEnumeration] trait and can be created as constants.
//! This allows to easily create a static list of constant events which can then be used to generate
//! event telemetry using the PUS event manager modules.
//!
//! # Examples
//!
//! ```
//! use satrs::events_legacy::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
//!
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(1, 0);
//! const MSG_FAILED: EventU32 = EventU32::new(Severity::Low, 1, 1);
//!
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(2, 0);
//!
//! let small_event = EventU16::new(Severity::Info, 3, 0);
//! ```
use core::fmt::Debug;
use core::hash::Hash;
use core::marker::PhantomData;
use delegate::delegate;
use spacepackets::ByteConversionError;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::util::{ToBeBytes, UnsignedEnum};
/// Using a type definition allows to change this to u64 in the future more easily
pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily
pub type LargestGroupIdRaw = u16;
pub const MAX_GROUP_ID_U32_EVENT: u16 = 2_u16.pow(14) - 1;
pub const MAX_GROUP_ID_U16_EVENT: u16 = 2_u16.pow(6) - 1;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Severity {
Info = 0,
Low = 1,
Medium = 2,
High = 3,
}
pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
const SEVERITY: Severity;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityInfo {}
impl HasSeverity for SeverityInfo {
const SEVERITY: Severity = Severity::Info;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityLow {}
impl HasSeverity for SeverityLow {
const SEVERITY: Severity = Severity::Low;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityMedium {}
impl HasSeverity for SeverityMedium {
const SEVERITY: Severity = Severity::Medium;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityHigh {}
impl HasSeverity for SeverityHigh {
const SEVERITY: Severity = Severity::High;
}
pub trait GenericEvent: EcssEnumeration + Copy + Clone {
type Raw;
type GroupId;
type UniqueId;
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
}
impl TryFrom<u8> for Severity {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == Severity::Info as u8 => Ok(Severity::Info),
x if x == Severity::Low as u8 => Ok(Severity::Low),
x if x == Severity::Medium as u8 => Ok(Severity::Medium),
x if x == Severity::High as u8 => Ok(Severity::High),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
struct EventBase<Raw, GroupId, UniqueId> {
severity: Severity,
group_id: GroupId,
unique_id: UniqueId,
phantom: PhantomData<Raw>,
}
impl<Raw: ToBeBytes, GroupId, UniqueId> EventBase<Raw, GroupId, UniqueId> {
fn write_to_bytes(
&self,
raw: Raw,
buf: &mut [u8],
width: usize,
) -> Result<usize, ByteConversionError> {
if buf.len() < width {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: width,
});
}
buf.copy_from_slice(raw.to_be_bytes().as_ref());
Ok(raw.written_len())
}
}
impl EventBase<u32, u16, u16> {
#[inline]
fn raw(&self) -> u32 {
((self.severity as u32) << 30) | ((self.group_id as u32) << 16) | self.unique_id as u32
}
}
impl EventBase<u16, u8, u8> {
#[inline]
fn raw(&self) -> u16 {
((self.severity as u16) << 14) | ((self.group_id as u16) << 8) | self.unique_id as u16
}
}
impl<RAW, GID, UID> EventBase<RAW, GID, UID> {
#[inline]
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, GID> EventBase<RAW, GID, u16> {
#[inline]
pub fn unique_id(&self) -> u16 {
self.unique_id
}
}
impl<RAW, GID> EventBase<RAW, GID, u8> {
#[inline]
pub fn unique_id(&self) -> u8 {
self.unique_id
}
}
impl<RAW, UID> EventBase<RAW, u16, UID> {
#[inline]
pub fn group_id(&self) -> u16 {
self.group_id
}
}
impl<RAW, UID> EventBase<RAW, u8, UID> {
#[inline]
pub fn group_id(&self) -> u8 {
self.group_id
}
}
macro_rules! event_provider_impl {
() => {
#[inline]
fn raw(&self) -> Self::Raw {
self.base.raw()
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
#[inline]
fn severity(&self) -> Severity {
self.base.severity()
}
#[inline]
fn group_id(&self) -> Self::GroupId {
self.base.group_id()
}
#[inline]
fn unique_id(&self) -> Self::UniqueId {
self.base.unique_id()
}
};
}
macro_rules! impl_event_provider {
($BaseIdent: ident, $TypedIdent: ident, $raw: ty, $gid: ty, $uid: ty) => {
impl GenericEvent for $BaseIdent {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
event_provider_impl!();
fn raw_as_largest_type(&self) -> LargestEventRaw {
self.raw().into()
}
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id().into()
}
}
impl<SEVERITY: HasSeverity> GenericEvent for $TypedIdent<SEVERITY> {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
delegate!(to self.event {
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
});
}
}
}
macro_rules! try_from_impls {
($SevIdent: ident, $severity: path, $raw: ty, $TypedSevIdent: ident) => {
impl TryFrom<$raw> for $TypedSevIdent<$SevIdent> {
type Error = Severity;
fn try_from(raw: $raw) -> Result<Self, Self::Error> {
Self::try_from_generic($severity, raw)
}
}
};
}
macro_rules! const_from_fn {
($from_fn_name: ident, $TypedIdent: ident, $SevIdent: ident) => {
pub const fn $from_fn_name(event: $TypedIdent<$SevIdent>) -> Self {
Self {
base: event.event.base,
}
}
};
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct EventU32 {
base: EventBase<u32, u16, u16>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU32TypedSev<SEVERITY> {
event: EventU32,
phantom: PhantomData<SEVERITY>,
}
impl<SEVERITY: HasSeverity> From<EventU32TypedSev<SEVERITY>> for EventU32 {
fn from(e: EventU32TypedSev<SEVERITY>) -> Self {
Self { base: e.event.base }
}
}
impl<Severity: HasSeverity> AsRef<EventU32> for EventU32TypedSev<Severity> {
fn as_ref(&self) -> &EventU32 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU32> for EventU32TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU32 {
&mut self.event
}
}
impl_event_provider!(EventU32, EventU32TypedSev, u32, u16, u16);
impl EventU32 {
/// Generate an event. The raw representation of an event has 32 bits.
/// If the passed group ID is invalid (too large), None wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > MAX_GROUP_ID_U32_EVENT {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
})
}
/// This constructor will panic if the passed group is is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > MAX_GROUP_ID_U32_EVENT {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
pub fn from_be_bytes(bytes: [u8; 4]) -> Self {
Self::from(u32::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
}
impl From<u32> for EventU32 {
fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
let group_id = ((raw >> 16) & 0x3FFF) as u16;
let unique_id = (raw & 0xFFFF) as u16;
// Sanitized input, should never fail
Self::new(severity, group_id, unique_id)
}
}
impl UnsignedEnum for EventU32 {
fn size(&self) -> usize {
core::mem::size_of::<u32>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
}
fn value(&self) -> u64 {
self.raw().into()
}
}
impl EcssEnumeration for EventU32 {
fn pfc(&self) -> u8 {
u32::BITS as u8
}
}
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
try_from_impls!(SeverityInfo, Severity::Info, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u32, EventU32TypedSev);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EventU16 {
base: EventBase<u16, u8, u8>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU16TypedSev<SEVERITY> {
event: EventU16,
phantom: PhantomData<SEVERITY>,
}
impl<Severity: HasSeverity> AsRef<EventU16> for EventU16TypedSev<Severity> {
fn as_ref(&self) -> &EventU16 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU16> for EventU16TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU16 {
&mut self.event
}
}
impl EventU16 {
/// Generate a small event. The raw representation of a small event has 16 bits.
/// If the passed group ID is invalid (too large), [None] wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > (2u8.pow(6) - 1) {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: Default::default(),
},
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > (2u8.pow(6) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
}
impl From<u16> for EventU16 {
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::new(severity, group_id, unique_id)
}
}
impl UnsignedEnum for EventU16 {
fn size(&self) -> usize {
core::mem::size_of::<u16>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
}
fn value(&self) -> u64 {
self.raw().into()
}
}
impl EcssEnumeration for EventU16 {
#[inline]
fn pfc(&self) -> u8 {
u16::BITS as u8
}
}
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(((raw >> 8) & 0x3F) as u8, (raw & 0xFF) as u8))
}
}
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
try_from_impls!(SeverityInfo, Severity::Info, u16, EventU16TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u16, EventU16TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u16, EventU16TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u16, EventU16TypedSev);
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU32) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU32TypedSev<Severity>> for EventU32 {
#[inline]
fn eq(&self, other: &EventU32TypedSev<Severity>) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16> for EventU16TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU16) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16TypedSev<Severity>> for EventU16 {
#[inline]
fn eq(&self, other: &EventU16TypedSev<Severity>) -> bool {
self.raw() == other.raw()
}
}
#[cfg(test)]
mod tests {
use super::EventU32TypedSev;
use super::*;
use spacepackets::ByteConversionError;
use std::mem::size_of;
fn assert_size<T>(_: T, val: usize) {
assert_eq!(size_of::<T>(), val);
}
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::new(0, 0);
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> = EventU16TypedSev::new(0x3F, 0xff);
/// This working is a test in itself.
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
#[test]
fn test_normal_from_raw_conversion() {
let conv_from_raw = EventU32TypedSev::<SeverityInfo>::try_from(INFO_EVENT.raw())
.expect("Creating typed EventU32 failed");
assert_eq!(conv_from_raw, INFO_EVENT);
}
#[test]
fn test_small_from_raw_conversion() {
let conv_from_raw = EventU16TypedSev::<SeverityInfo>::try_from(INFO_EVENT_SMALL.raw())
.expect("Creating typed EventU16 failed");
assert_eq!(conv_from_raw, INFO_EVENT_SMALL);
}
#[test]
fn verify_normal_size() {
assert_size(INFO_EVENT.raw(), 4)
}
#[test]
fn verify_small_size() {
assert_size(INFO_EVENT_SMALL.raw(), 2)
}
#[test]
fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::Info);
assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn test_small_event_getters() {
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::Info);
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw();
assert_eq!(raw_event, 0xFFFFFFFF);
}
#[test]
fn all_ones_event_small() {
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
assert_eq!(raw_event, 0xFFFF);
}
#[test]
fn invalid_group_id_normal() {
assert!(EventU32TypedSev::<SeverityMedium>::new_checked(2_u16.pow(14), 0).is_none());
}
#[test]
fn invalid_group_id_small() {
assert!(EventU16TypedSev::<SeverityMedium>::new_checked(2_u8.pow(6), 0).is_none());
}
#[test]
fn regular_new() {
assert_eq!(
EventU32TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT
);
}
#[test]
fn small_new() {
assert_eq!(
EventU16TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT_SMALL
);
}
#[test]
fn as_largest_type() {
let event_raw = HIGH_SEV_EVENT.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFFFFFF);
}
#[test]
fn as_largest_type_for_small_event() {
let event_raw = HIGH_SEV_EVENT_SMALL.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFF);
}
#[test]
fn as_largest_group_id() {
let group_id = HIGH_SEV_EVENT.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3FFF);
}
#[test]
fn as_largest_group_id_small_event() {
let group_id = HIGH_SEV_EVENT_SMALL.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3F);
}
#[test]
fn write_to_buf() {
let mut buf: [u8; 4] = [0; 4];
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF);
let event_read_back = EventU32::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT);
}
#[test]
fn write_to_buf_small() {
let mut buf: [u8; 2] = [0; 2];
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF);
let event_read_back = EventU16::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT_SMALL);
}
#[test]
fn write_to_buf_insufficient_buf() {
let mut buf: [u8; 3] = [0; 3];
let err = HIGH_SEV_EVENT.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
assert_eq!(expected, 4);
assert_eq!(found, 3);
}
}
#[test]
fn write_to_buf_small_insufficient_buf() {
let mut buf: [u8; 1] = [0; 1];
let err = HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
assert_eq!(expected, 2);
assert_eq!(found, 1);
}
}
#[test]
fn severity_from_invalid_raw_val() {
let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::High as u8 + 1;
assert!(Severity::try_from(invalid).is_err());
}
#[test]
fn reduction() {
let event = EventU32TypedSev::<SeverityInfo>::new(1, 1);
let raw = event.raw();
let reduced: EventU32 = event.into();
assert_eq!(reduced.group_id(), 1);
assert_eq!(reduced.unique_id(), 1);
assert_eq!(raw, reduced.raw());
}
#[test]
fn const_reducation() {
assert_eq!(INFO_REDUCED.raw(), INFO_EVENT.raw());
}
}

View File

@@ -39,9 +39,9 @@ pub trait ExecutableWithType: Executable {
///
/// * `executable`: Executable task
/// * `task_freq`: Optional frequency of task. Required for periodic and fixed cycle tasks.
/// If [None] is passed, no sleeping will be performed.
/// If [None] is passed, no sleeping will be performed.
/// * `op_code`: Operation code which is passed to the executable task
/// [operation call][Executable::periodic_op]
/// [operation call][Executable::periodic_op]
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
pub fn exec_sched_single<
T: ExecutableWithType<Error = E> + Send + 'static + ?Sized,
@@ -55,33 +55,35 @@ pub fn exec_sched_single<
let mut cycle_count = 0;
thread::Builder::new()
.name(String::from(executable.task_name()))
.spawn(move || loop {
if let Some(ref mut terminator) = termination {
match terminator.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
return Ok(OpResult::Ok);
}
Err(TryRecvError::Empty) => (),
}
}
match executable.exec_type() {
ExecutionType::OneShot => {
executable.periodic_op(op_code)?;
return Ok(OpResult::Ok);
}
ExecutionType::Infinite => {
executable.periodic_op(op_code)?;
}
ExecutionType::Cycles(cycles) => {
executable.periodic_op(op_code)?;
cycle_count += 1;
if cycle_count == cycles {
return Ok(OpResult::Ok);
.spawn(move || {
loop {
if let Some(ref mut terminator) = termination {
match terminator.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
return Ok(OpResult::Ok);
}
Err(TryRecvError::Empty) => (),
}
}
}
if let Some(freq) = task_freq {
thread::sleep(freq);
match executable.exec_type() {
ExecutionType::OneShot => {
executable.periodic_op(op_code)?;
return Ok(OpResult::Ok);
}
ExecutionType::Infinite => {
executable.periodic_op(op_code)?;
}
ExecutionType::Cycles(cycles) => {
executable.periodic_op(op_code)?;
cycle_count += 1;
if cycle_count == cycles {
return Ok(OpResult::Ok);
}
}
}
if let Some(freq) = task_freq {
thread::sleep(freq);
}
}
})
}
@@ -110,51 +112,53 @@ pub fn exec_sched_multi<
thread::Builder::new()
.name(String::from(task_name))
.spawn(move || loop {
if let Some(ref mut terminator) = termination {
match terminator.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
removal_flags.iter_mut().for_each(|x| *x = true);
.spawn(move || {
loop {
if let Some(ref mut terminator) = termination {
match terminator.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
removal_flags.iter_mut().for_each(|x| *x = true);
}
Err(TryRecvError::Empty) => (),
}
Err(TryRecvError::Empty) => (),
}
}
for (idx, executable) in executable_vec.iter_mut().enumerate() {
match executable.exec_type() {
ExecutionType::OneShot => {
executable.periodic_op(op_code)?;
removal_flags[idx] = true;
}
ExecutionType::Infinite => {
executable.periodic_op(op_code)?;
}
ExecutionType::Cycles(cycles) => {
executable.periodic_op(op_code)?;
cycle_counts[idx] += 1;
if cycle_counts[idx] == cycles {
for (idx, executable) in executable_vec.iter_mut().enumerate() {
match executable.exec_type() {
ExecutionType::OneShot => {
executable.periodic_op(op_code)?;
removal_flags[idx] = true;
}
ExecutionType::Infinite => {
executable.periodic_op(op_code)?;
}
ExecutionType::Cycles(cycles) => {
executable.periodic_op(op_code)?;
cycle_counts[idx] += 1;
if cycle_counts[idx] == cycles {
removal_flags[idx] = true;
}
}
}
}
let mut removal_iter = removal_flags.iter();
executable_vec.retain(|_| !*removal_iter.next().unwrap());
removal_iter = removal_flags.iter();
cycle_counts.retain(|_| !*removal_iter.next().unwrap());
removal_flags.retain(|&i| !i);
if executable_vec.is_empty() {
return Ok(OpResult::Ok);
}
let freq = task_freq.unwrap_or_else(|| panic!("No task frequency specified"));
thread::sleep(freq);
}
let mut removal_iter = removal_flags.iter();
executable_vec.retain(|_| !*removal_iter.next().unwrap());
removal_iter = removal_flags.iter();
cycle_counts.retain(|_| !*removal_iter.next().unwrap());
removal_flags.retain(|&i| !i);
if executable_vec.is_empty() {
return Ok(OpResult::Ok);
}
let freq = task_freq.unwrap_or_else(|| panic!("No task frequency specified"));
thread::sleep(freq);
})
}
#[cfg(test)]
mod tests {
use super::{
exec_sched_multi, exec_sched_single, Executable, ExecutableWithType, ExecutionType,
OpResult,
Executable, ExecutableWithType, ExecutionType, OpResult, exec_sched_multi,
exec_sched_single,
};
use bus::Bus;
use std::boxed::Box;

View File

@@ -13,10 +13,10 @@ use crate::encoding::parse_buffer_for_cobs_encoded_packets;
use crate::tmtc::PacketSenderRaw;
use crate::tmtc::PacketSource;
use crate::ComponentId;
use crate::hal::std::tcp_server::{
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
};
use crate::ComponentId;
use super::tcp_server::HandledConnectionHandler;
use super::tcp_server::HandledConnectionInfo;
@@ -136,12 +136,12 @@ pub struct TcpTmtcInCobsServer<
}
impl<
TmSource: PacketSource<Error = TmError>,
TcReceiver: PacketSenderRaw<Error = TcError>,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcError: 'static,
> TcpTmtcInCobsServer<TmSource, TcReceiver, HandledConnection, TmError, TcError>
TmSource: PacketSource<Error = TmError>,
TcReceiver: PacketSenderRaw<Error = TcError>,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcError: 'static,
> TcpTmtcInCobsServer<TmSource, TcReceiver, HandledConnection, TmError, TcError>
{
/// Create a new TCP TMTC server which exchanges TMTC packets encoded with
/// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing).
@@ -150,9 +150,9 @@ impl<
///
/// * `cfg` - Configuration of the server.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be
/// forwarded to this TC receiver.
/// forwarded to this TC receiver.
pub fn new(
cfg: ServerConfig,
tm_source: TmSource,
@@ -206,14 +206,14 @@ mod tests {
};
use crate::{
ComponentId,
encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET},
hal::std::tcp_server::{
tests::{ConnectionFinishedHandler, SyncTmSource},
ConnectionResult, ServerConfig,
tests::{ConnectionFinishedHandler, SyncTmSource},
},
queue::GenericSendError,
tmtc::PacketAsVec,
ComponentId,
};
use alloc::sync::Arc;
use cobs::encode;
@@ -377,13 +377,13 @@ mod tests {
current_idx += 1;
let mut dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
.expect("COBS decoding failed");
assert_eq!(dec_report.dst_used, 5);
assert_eq!(dec_report.frame_size(), 5);
// Skip first sentinel byte.
assert_eq!(
&read_buf[current_idx..current_idx + INVERTED_PACKET.len()],
&INVERTED_PACKET
);
current_idx += dec_report.src_used;
current_idx += dec_report.parsed_size();
// End sentinel.
assert_eq!(read_buf[current_idx], 0, "invalid sentinel end byte");
current_idx += 1;
@@ -393,13 +393,13 @@ mod tests {
current_idx += 1;
dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
.expect("COBS decoding failed");
assert_eq!(dec_report.dst_used, 5);
assert_eq!(dec_report.frame_size(), 5);
// Skip first sentinel byte.
assert_eq!(
&read_buf[current_idx..current_idx + SIMPLE_PACKET.len()],
&SIMPLE_PACKET
);
current_idx += dec_report.src_used;
current_idx += dec_report.parsed_size();
// End sentinel.
assert_eq!(read_buf[current_idx], 0);
break;
@@ -435,17 +435,19 @@ mod tests {
generic_tmtc_server(&auto_port_addr, tc_sender.clone(), tm_source, None);
let start = Instant::now();
// Call the connection handler in separate thread, does block.
let thread_jh = thread::spawn(move || loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
let thread_jh = thread::spawn(move || {
loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
}
}
});
thread_jh.join().expect("thread join failed");
@@ -469,20 +471,22 @@ mod tests {
let stop_signal_copy = stop_signal.clone();
let start = Instant::now();
// Call the connection handler in separate thread, does block.
let thread_jh = thread::spawn(move || loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
panic!("unexpected accept timeout");
}
if stop_signal_copy.load(Ordering::Relaxed) {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
let thread_jh = thread::spawn(move || {
loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
panic!("unexpected accept timeout");
}
if stop_signal_copy.load(Ordering::Relaxed) {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
}
}
});
// We connect but do not do anything.

View File

@@ -11,8 +11,8 @@ use std::io::{self, Read};
use std::net::SocketAddr;
use std::thread;
use crate::tmtc::{PacketSenderRaw, PacketSource};
use crate::ComponentId;
use crate::tmtc::{PacketSenderRaw, PacketSource};
use thiserror::Error;
// Re-export the TMTC in COBS server.
@@ -25,22 +25,22 @@ pub use crate::hal::std::tcp_spacepackets_server::{SpacepacketsTmSender, TcpSpac
///
/// * `addr` - Address of the TCP server.
/// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
/// to reduce CPU load.
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
/// to reduce CPU load.
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [PacketSource] and
/// encoding of that data. This buffer should at large enough to hold the maximum expected
/// TM size read from the packet source.
/// encoding of that data. This buffer should at large enough to hold the maximum expected
/// TM size read from the packet source.
/// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from
/// the client. It is recommended to make this buffer larger to allow reading multiple
/// consecutive packets as well, for example by using common buffer sizes like 4096 or 8192
/// byte. The buffer should at the very least be large enough to hold the maximum expected
/// telecommand size.
/// the client. It is recommended to make this buffer larger to allow reading multiple
/// consecutive packets as well, for example by using common buffer sizes like 4096 or 8192
/// byte. The buffer should at the very least be large enough to hold the maximum expected
/// telecommand size.
/// * `reuse_addr` - Can be used to set the `SO_REUSEADDR` option on the raw socket. This is
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// * `reuse_port` - Can be used to set the `SO_REUSEPORT` option on the raw socket. This is
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// especially useful if the address and port are static for the server. Set to false by
/// default.
#[derive(Debug, Copy, Clone)]
pub struct ServerConfig {
pub id: ComponentId,
@@ -187,14 +187,14 @@ pub struct TcpTmtcGenericServer<
}
impl<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcSendError>,
TmSender: TcpTmSender<TmError, TcSendError>,
TcParser: TcpTcParser<TmError, TcSendError>,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcSendError: 'static,
>
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcSendError>,
TmSender: TcpTmSender<TmError, TcSendError>,
TcParser: TcpTcParser<TmError, TcSendError>,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcSendError: 'static,
>
TcpTmtcGenericServer<
TmSource,
TcSender,
@@ -211,12 +211,12 @@ impl<
///
/// * `cfg` - Configuration of the server.
/// * `tc_parser` - Parser which extracts telecommands from the raw bytestream received from
/// the client.
/// the client.
/// * `tm_sender` - Sends back telemetry to the client using the specified TM source.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_sender` - Any received telecommand which was decoded successfully will be forwarded
/// using this TC sender.
/// using this TC sender.
/// * `stop_signal` - Can be used to stop the server even if a connection is ongoing.
pub fn new(
cfg: ServerConfig,

View File

@@ -5,9 +5,9 @@ use mio::net::{TcpListener, TcpStream};
use std::{io::Write, net::SocketAddr};
use crate::{
ComponentId,
encoding::{ccsds::SpacePacketValidator, parse_buffer_for_ccsds_space_packets},
tmtc::{PacketSenderRaw, PacketSource},
ComponentId,
};
use super::tcp_server::{
@@ -107,28 +107,28 @@ pub struct TcpSpacepacketsServer<
}
impl<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcError>,
Validator: SpacePacketValidator,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcError: 'static,
> TcpSpacepacketsServer<TmSource, TcSender, Validator, HandledConnection, TmError, TcError>
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcError>,
Validator: SpacePacketValidator,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcError: 'static,
> TcpSpacepacketsServer<TmSource, TcSender, Validator, HandledConnection, TmError, TcError>
{
///
/// ## Parameter
///
/// * `cfg` - Configuration of the server.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_sender` - Any received telecommands which were decoded successfully will be
/// forwarded using this [PacketSenderRaw].
/// forwarded using this [PacketSenderRaw].
/// * `validator` - Used to determine the space packets relevant for further processing and
/// to detect broken space packets.
/// to detect broken space packets.
/// * `handled_connection_hook` - Called to notify the user about a succesfully handled
/// connection.
/// connection.
/// * `stop_signal` - Can be used to shut down the TCP server even for longer running
/// connections.
/// connections.
pub fn new(
cfg: ServerConfig,
tm_source: TmSource,
@@ -183,29 +183,30 @@ mod tests {
};
use alloc::sync::Arc;
use arbitrary_int::u11;
use hashbrown::HashSet;
use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
CcsdsPacket, PacketId, SpHeader,
ecss::{CreatorConfig, WritablePusPacket, tc::PusTcCreator},
};
use crate::{
ComponentId,
encoding::ccsds::{SpValidity, SpacePacketValidator},
hal::std::tcp_server::{
tests::{ConnectionFinishedHandler, SyncTmSource},
ConnectionResult, ServerConfig,
tests::{ConnectionFinishedHandler, SyncTmSource},
},
queue::GenericSendError,
tmtc::PacketAsVec,
ComponentId,
};
use super::TcpSpacepacketsServer;
const TCP_SERVER_ID: ComponentId = 0x05;
const TEST_APID_0: u16 = 0x02;
const TEST_APID_0: u11 = u11::new(0x02);
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_APID_1: u16 = 0x10;
const TEST_APID_1: u11 = u11::new(0x10);
const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
#[derive(Default)]
@@ -283,8 +284,13 @@ mod tests {
.check_no_connections_left();
set_if_done.store(true, Ordering::Relaxed);
});
let ping_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
17,
1,
&[],
CreatorConfig::default(),
);
let tc_0 = ping_tc.to_vec().expect("packet generation failed");
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
stream
@@ -314,13 +320,23 @@ mod tests {
// Add telemetry
let mut total_tm_len = 0;
let verif_tm =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 1, 1, &[], true);
let verif_tm = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
1,
1,
&[],
CreatorConfig::default(),
);
let tm_0 = verif_tm.to_vec().expect("writing packet failed");
total_tm_len += tm_0.len();
tm_source.add_tm(&tm_0);
let verif_tm =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 1, 3, &[], true);
let verif_tm = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_1),
1,
3,
&[],
CreatorConfig::default(),
);
let tm_1 = verif_tm.to_vec().expect("writing packet failed");
total_tm_len += tm_1.len();
tm_source.add_tm(&tm_1);
@@ -366,14 +382,24 @@ mod tests {
.expect("setting reas timeout failed");
// Send telecommands
let ping_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
17,
1,
&[],
CreatorConfig::default(),
);
let tc_0 = ping_tc.to_vec().expect("ping tc creation failed");
stream
.write_all(&tc_0)
.expect("writing to TCP server failed");
let action_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
let action_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_1),
8,
0,
&[],
CreatorConfig::default(),
);
let tc_1 = action_tc.to_vec().expect("action tc creation failed");
stream
.write_all(&tc_1)

View File

@@ -1,6 +1,6 @@
//! Generic UDP TC server.
use crate::tmtc::PacketSenderRaw;
use crate::ComponentId;
use crate::tmtc::PacketSenderRaw;
use core::fmt::Debug;
use std::io::{self, ErrorKind};
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
@@ -26,7 +26,8 @@ use std::vec::Vec;
/// use satrs::ComponentId;
/// use satrs::tmtc::PacketSenderRaw;
/// use spacepackets::SpHeader;
/// use spacepackets::ecss::tc::PusTcCreator;
/// use spacepackets::ecss::tc::{PusTcCreator, CreatorConfig};
/// use arbitrary_int::u11;
///
/// const UDP_SERVER_ID: ComponentId = 0x05;
///
@@ -34,8 +35,8 @@ use std::vec::Vec;
/// let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
/// let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, packet_sender)
/// .expect("Creating UDP TMTC server failed");
/// let sph = SpHeader::new_from_apid(0x02);
/// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
/// let sph = SpHeader::new_from_apid(u11::new(0x02));
/// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], CreatorConfig::default());
/// // Can not fail.
/// let ping_tc_raw = pus_tc.to_vec().unwrap();
///
@@ -105,7 +106,7 @@ impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
Err(ReceiveResult::NothingReceived)
} else {
Err(e.into())
}
};
}
};
let (num_bytes, from) = res;
@@ -123,14 +124,15 @@ impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
#[cfg(test)]
mod tests {
use crate::ComponentId;
use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use crate::queue::GenericSendError;
use crate::tmtc::PacketSenderRaw;
use crate::ComponentId;
use arbitrary_int::u11;
use core::cell::RefCell;
use spacepackets::ecss::tc::PusTcCreator;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::SpHeader;
use spacepackets::ecss::CreatorConfig;
use spacepackets::ecss::tc::PusTcCreator;
use std::collections::VecDeque;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::vec::Vec;
@@ -166,8 +168,8 @@ mod tests {
let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, ping_receiver)
.expect("Creating UDP TMTC server failed");
is_send(&udp_tc_server);
let sph = SpHeader::new_from_apid(0x02);
let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let sph = SpHeader::new_from_apid(u11::new(0x02));
let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], CreatorConfig::default());
let len = pus_tc
.write_to_bytes(&mut buf)
.expect("Error writing PUS TC packet");

View File

@@ -15,7 +15,7 @@
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
#![no_std]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(feature = "alloc")]
#[cfg(any(feature = "alloc", test))]
extern crate alloc;
#[cfg(feature = "alloc")]
extern crate downcast_rs;
@@ -27,7 +27,9 @@ pub mod action;
pub mod dev_mgmt;
pub mod encoding;
pub mod event_man;
pub mod event_man_legacy;
pub mod events;
pub mod events_legacy;
#[cfg(feature = "std")]
pub mod executable;
pub mod hal;
@@ -49,6 +51,7 @@ pub mod scheduling;
pub mod subsystem;
pub mod time;
pub mod tmtc;
pub mod ccsds;
pub use spacepackets;

View File

@@ -11,11 +11,11 @@ pub use alloc_mod::*;
pub use std_mod::*;
use crate::{
ComponentId,
queue::{GenericReceiveError, GenericSendError},
request::{
GenericMessage, MessageMetadata, MessageReceiverProvider, MessageReceiverWithId, RequestId,
},
ComponentId,
};
pub type Mode = u32;
@@ -257,7 +257,7 @@ pub trait ModeRequestHandler: ModeProvider {
pub trait ModeReplyReceiver {
fn try_recv_mode_reply(&self)
-> Result<Option<GenericMessage<ModeReply>>, GenericReceiveError>;
-> Result<Option<GenericMessage<ModeReply>>, GenericReceiveError>;
}
impl<R: MessageReceiverProvider<ModeReply>> ModeReplyReceiver
@@ -309,12 +309,11 @@ pub mod alloc_mod {
}
impl<
From,
Sender: MessageSenderProvider<ModeReply>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<ModeReply, Sender>,
> ModeReplySender
for MessageSenderAndReceiver<ModeReply, From, Sender, Receiver, SenderStore>
From,
Sender: MessageSenderProvider<ModeReply>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<ModeReply, Sender>,
> ModeReplySender for MessageSenderAndReceiver<ModeReply, From, Sender, Receiver, SenderStore>
{
fn local_channel_id(&self) -> ComponentId {
self.local_channel_id_generic()
@@ -334,12 +333,11 @@ pub mod alloc_mod {
}
impl<
To,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<ModeReply>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> ModeReplyReceiver
for MessageSenderAndReceiver<To, ModeReply, Sender, Receiver, SenderStore>
To,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<ModeReply>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> ModeReplyReceiver for MessageSenderAndReceiver<To, ModeReply, Sender, Receiver, SenderStore>
{
fn try_recv_mode_reply(
&self,
@@ -349,15 +347,15 @@ pub mod alloc_mod {
}
impl<
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
RequestAndReplySenderAndReceiver<
Request,
ReqSender,
@@ -376,14 +374,14 @@ pub mod alloc_mod {
}
impl<
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
ReplySender: MessageSenderProvider<ModeReply>,
ReplyReceiver: MessageReceiverProvider<ModeReply>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, ReplySender>,
> ModeReplySender
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
ReplySender: MessageSenderProvider<ModeReply>,
ReplyReceiver: MessageReceiverProvider<ModeReply>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, ReplySender>,
> ModeReplySender
for RequestAndReplySenderAndReceiver<
Request,
ReqSender,
@@ -413,14 +411,14 @@ pub mod alloc_mod {
}
impl<
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
ReplySender: MessageSenderProvider<ModeReply>,
ReplyReceiver: MessageReceiverProvider<ModeReply>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, ReplySender>,
> ModeReplyReceiver
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
ReplySender: MessageSenderProvider<ModeReply>,
ReplyReceiver: MessageReceiverProvider<ModeReply>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, ReplySender>,
> ModeReplyReceiver
for RequestAndReplySenderAndReceiver<
Request,
ReqSender,
@@ -444,10 +442,10 @@ pub mod alloc_mod {
MessageSenderAndReceiver<ModeReply, ModeRequest, Sender, Receiver, ReplySenderStore>;
impl<
Sender: MessageSenderProvider<ModeReply>,
Receiver: MessageReceiverProvider<ModeRequest>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, Sender>,
> ModeRequestHandlerInterface<Sender, Receiver, ReplySenderStore>
Sender: MessageSenderProvider<ModeReply>,
Receiver: MessageReceiverProvider<ModeRequest>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, Sender>,
> ModeRequestHandlerInterface<Sender, Receiver, ReplySenderStore>
{
pub fn try_recv_mode_request(
&self,
@@ -474,10 +472,10 @@ pub mod alloc_mod {
MessageSenderAndReceiver<ModeRequest, ModeReply, Sender, Receiver, RequestSenderStore>;
impl<
Sender: MessageSenderProvider<ModeRequest>,
Receiver: MessageReceiverProvider<ModeReply>,
RequestSenderStore: MessageSenderStoreProvider<ModeRequest, Sender>,
> ModeRequestorInterface<Sender, Receiver, RequestSenderStore>
Sender: MessageSenderProvider<ModeRequest>,
Receiver: MessageReceiverProvider<ModeReply>,
RequestSenderStore: MessageSenderStoreProvider<ModeRequest, Sender>,
> ModeRequestorInterface<Sender, Receiver, RequestSenderStore>
{
pub fn try_recv_mode_reply(
&self,
@@ -531,11 +529,11 @@ pub mod alloc_mod {
}
impl<
To,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<ModeRequest>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> ModeRequestReceiver
To,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<ModeRequest>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> ModeRequestReceiver
for MessageSenderAndReceiver<To, ModeRequest, Sender, Receiver, SenderStore>
{
fn try_recv_mode_request(
@@ -546,11 +544,11 @@ pub mod alloc_mod {
}
impl<
From,
Sender: MessageSenderProvider<ModeRequest>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<ModeRequest, Sender>,
> ModeRequestSender
From,
Sender: MessageSenderProvider<ModeRequest>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<ModeRequest, Sender>,
> ModeRequestSender
for MessageSenderAndReceiver<ModeRequest, From, Sender, Receiver, SenderStore>
{
fn local_channel_id(&self) -> ComponentId {
@@ -572,14 +570,14 @@ pub mod alloc_mod {
}
impl<
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
RequestAndReplySenderAndReceiver<
ModeRequest,
ReqSender,
@@ -598,14 +596,14 @@ pub mod alloc_mod {
}
impl<
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
> ModeRequestSender
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
> ModeRequestSender
for RequestAndReplySenderAndReceiver<
ModeRequest,
ReqSender,
@@ -636,14 +634,14 @@ pub mod alloc_mod {
}
impl<
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
> ModeRequestReceiver
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
> ModeRequestReceiver
for RequestAndReplySenderAndReceiver<
ModeRequest,
ReqSender,
@@ -726,7 +724,7 @@ pub(crate) mod tests {
use core::cell::RefCell;
use std::collections::VecDeque;
use crate::{request::RequestId, ComponentId};
use crate::{ComponentId, request::RequestId};
use super::*;

View File

@@ -2,9 +2,9 @@ use alloc::vec::Vec;
use hashbrown::HashMap;
use crate::{
ComponentId,
mode::{Mode, ModeAndSubmode, ModeReply, ModeRequest, Submode},
request::MessageSenderProvider,
ComponentId,
};
#[cfg(feature = "alloc")]

View File

@@ -47,10 +47,10 @@ use crate::pool::PoolAddr;
use core::fmt::Debug;
use core::mem::size_of;
use paste::paste;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU64, EcssEnumU8};
use spacepackets::ByteConversionError;
use spacepackets::ecss::{EcssEnumU8, EcssEnumU16, EcssEnumU32, EcssEnumU64};
pub use spacepackets::util::ToBeBytes;
use spacepackets::util::UnsignedEnum;
use spacepackets::ByteConversionError;
#[cfg(feature = "alloc")]
use alloc::string::{String, ToString};

View File

@@ -222,7 +222,7 @@ impl Error for PoolError {
}
}
/// Generic trait for pool providers which provide memory pools for variable sized data.
/// Generic trait for pool providers which provide memory pools for variable sized packet data.
///
/// It specifies a basic API to [Self::add], [Self::modify], [Self::read] and [Self::delete] data
/// in the store at its core. The API was designed so internal optimizations can be performed
@@ -250,7 +250,7 @@ pub trait PoolProvider {
/// call the user-provided closure and pass a mutable reference to the memory block
/// to the closure. This allows the user to modify the memory block.
fn modify<U: FnMut(&mut [u8])>(&mut self, addr: &PoolAddr, updater: U)
-> Result<(), PoolError>;
-> Result<(), PoolError>;
/// The provider should copy the data from the memory block to the user-provided buffer if
/// it exists.
@@ -258,6 +258,9 @@ pub trait PoolProvider {
/// Delete data inside the pool given a [PoolAddr].
fn delete(&mut self, addr: PoolAddr) -> Result<(), PoolError>;
fn clear(&mut self) -> Result<(), PoolError>;
fn has_element_at(&self, addr: &PoolAddr) -> Result<bool, PoolError>;
/// Retrieve the length of the data at the given store address.
@@ -281,7 +284,7 @@ pub trait PoolProviderWithGuards: PoolProvider {
/// This can prevent memory leaks. Users can read the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely.
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self>;
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<'_, Self>;
/// This function behaves like [PoolProvider::modify], but consumes the provided
/// address and returns a RAII conformant guard object.
@@ -291,7 +294,7 @@ pub trait PoolProviderWithGuards: PoolProvider {
/// This can prevent memory leaks. Users can read (and modify) the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely.
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self>;
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<'_, Self>;
}
pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
@@ -510,11 +513,11 @@ pub mod heapless_mod {
///
/// * `subpool_memory` - Static memory for a particular subpool to store the actual data.
/// * `sizes_list` - Static sizes list structure to store the size of the data which is
/// actually stored.
/// actually stored.
/// * `num_blocks ` - The number of memory blocks inside the subpool.
/// * `set_sizes_list_to_all_free` - If this is set to true, the method will take care
/// of setting all values in the sizes list to [super::STORE_FREE]. This does not have
/// to be done if the user initializes the sizes list to that value themselves.
/// of setting all values in the sizes list to [super::STORE_FREE]. This does not have
/// to be done if the user initializes the sizes list to that value themselves.
pub fn grow(
&mut self,
subpool_memory: &'static mut [u8],
@@ -602,7 +605,7 @@ pub mod heapless_mod {
if i < start_at_subpool as usize {
continue;
}
if pool_cfg.block_size as usize >= req_size {
if pool_cfg.block_size >= req_size {
return Ok(i as u16);
}
}
@@ -638,7 +641,7 @@ pub mod heapless_mod {
fn raw_pos(&self, addr: &StaticPoolAddr) -> Option<usize> {
let (pool_cfg, _) = self.pool.get(addr.pool_idx as usize)?;
Some(addr.packet_idx as usize * pool_cfg.block_size as usize)
Some(addr.packet_idx as usize * pool_cfg.block_size)
}
}
@@ -707,13 +710,20 @@ pub mod heapless_mod {
let subpool_cfg = self.pool.get(addr.pool_idx as usize).unwrap().0;
let raw_pos = self.raw_pos(&addr).unwrap();
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap().1
[raw_pos..raw_pos + subpool_cfg.block_size as usize];
[raw_pos..raw_pos + subpool_cfg.block_size];
let size_list = self.sizes_lists.get_mut(addr.pool_idx as usize).unwrap();
size_list[addr.packet_idx as usize] = STORE_FREE;
block.fill(0);
Ok(())
}
fn clear(&mut self) -> Result<(), PoolError> {
for size in self.sizes_lists.iter_mut() {
size.fill(STORE_FREE);
}
Ok(())
}
fn has_element_at(&self, addr: &PoolAddr) -> Result<bool, PoolError> {
let addr = StaticPoolAddr::from(*addr);
self.validate_addr(&addr)?;
@@ -742,11 +752,11 @@ pub mod heapless_mod {
impl<const MAX_NUM_SUBPOOLS: usize> PoolProviderWithGuards
for StaticHeaplessMemoryPool<MAX_NUM_SUBPOOLS>
{
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self> {
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<'_, Self> {
PoolRwGuard::new(self, addr)
}
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self> {
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<'_, Self> {
PoolGuard::new(self, addr)
}
}
@@ -770,11 +780,11 @@ mod alloc_mod {
/// # Parameters
///
/// * `cfg` - Vector of tuples which represent a subpool. The first entry in the tuple specifies
/// the number of memory blocks in the subpool, the second entry the size of the blocks
/// the number of memory blocks in the subpool, the second entry the size of the blocks
/// * `spill_to_higher_subpools` - Specifies whether data will be spilled to higher subpools
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
/// for all data sizes as long as possible. However, an undesirable side-effect might be
/// the chocking of larger subpools by underdimensioned smaller subpools.
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
/// for all data sizes as long as possible. However, an undesirable side-effect might be
/// the chocking of larger subpools by underdimensioned smaller subpools.
#[derive(Debug, Clone)]
pub struct StaticPoolConfig {
cfg: Vec<SubpoolConfig>,
@@ -1055,14 +1065,21 @@ mod alloc_mod {
_ => size,
})
}
fn clear(&mut self) -> Result<(), PoolError> {
for size in self.sizes_lists.iter_mut() {
size.fill(STORE_FREE);
}
Ok(())
}
}
impl PoolProviderWithGuards for StaticMemoryPool {
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self> {
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<'_, Self> {
PoolRwGuard::new(self, addr)
}
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self> {
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<'_, Self> {
PoolGuard::new(self, addr)
}
}
@@ -1307,9 +1324,11 @@ mod tests {
let addr = pool_provider.add(&test_buf).expect("Adding data failed");
let read_guard = PoolGuard::new(pool_provider, addr);
drop(read_guard);
assert!(!pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
!pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_test_pool_guard_deletion(pool_provider: &mut impl PoolProviderWithGuards) {
@@ -1317,9 +1336,11 @@ mod tests {
let addr = pool_provider.add(&test_buf).expect("Adding data failed");
let read_guard = pool_provider.read_with_guard(addr);
drop(read_guard);
assert!(!pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
!pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_test_pool_guard_with_release(pool_provider: &mut impl PoolProviderWithGuards) {
@@ -1328,9 +1349,11 @@ mod tests {
let mut read_guard = PoolGuard::new(pool_provider, addr);
read_guard.release();
drop(read_guard);
assert!(pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_test_pool_modify_guard_man_creation(
@@ -1341,9 +1364,11 @@ mod tests {
let mut rw_guard = PoolRwGuard::new(pool_provider, addr);
rw_guard.update(&mut |_| {}).expect("modify failed");
drop(rw_guard);
assert!(!pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
!pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_test_pool_modify_guard(pool_provider: &mut impl PoolProviderWithGuards) {
@@ -1352,9 +1377,11 @@ mod tests {
let mut rw_guard = pool_provider.modify_with_guard(addr);
rw_guard.update(&mut |_| {}).expect("modify failed");
drop(rw_guard);
assert!(!pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
!pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_modify_pool_index_above_0(pool_provider: &mut impl PoolProvider) {
@@ -1587,228 +1614,243 @@ mod tests {
mod heapless_tests {
use super::*;
use crate::static_subpool;
use std::cell::UnsafeCell;
use std::sync::Mutex;
const SUBPOOL_1_BLOCK_SIZE: usize = 4;
const SUBPOOL_1_NUM_ELEMENTS: u16 = 4;
static SUBPOOL_1: static_cell::ConstStaticCell<
[u8; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
);
static SUBPOOL_1_SIZES: Mutex<UnsafeCell<[usize; SUBPOOL_1_NUM_ELEMENTS as usize]>> =
Mutex::new(UnsafeCell::new(
[STORE_FREE; SUBPOOL_1_NUM_ELEMENTS as usize],
));
const SUBPOOL_2_NUM_ELEMENTS: u16 = 2;
const SUBPOOL_2_BLOCK_SIZE: usize = 8;
static SUBPOOL_2: static_cell::ConstStaticCell<
[u8; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
);
static SUBPOOL_2_SIZES: static_cell::ConstStaticCell<
[usize; SUBPOOL_2_NUM_ELEMENTS as usize],
> = static_cell::ConstStaticCell::new([STORE_FREE; SUBPOOL_2_NUM_ELEMENTS as usize]);
const SUBPOOL_3_NUM_ELEMENTS: u16 = 1;
const SUBPOOL_3_BLOCK_SIZE: usize = 16;
static_subpool!(
SUBPOOL_3,
SUBPOOL_3_SIZES,
SUBPOOL_3_NUM_ELEMENTS as usize,
SUBPOOL_3_BLOCK_SIZE
);
const SUBPOOL_4_NUM_ELEMENTS: u16 = 2;
const SUBPOOL_4_BLOCK_SIZE: usize = 16;
static_subpool!(
SUBPOOL_4,
SUBPOOL_4_SIZES,
SUBPOOL_4_NUM_ELEMENTS as usize,
SUBPOOL_4_BLOCK_SIZE
);
const SUBPOOL_5_NUM_ELEMENTS: u16 = 1;
const SUBPOOL_5_BLOCK_SIZE: usize = 8;
static_subpool!(
SUBPOOL_5,
SUBPOOL_5_SIZES,
SUBPOOL_5_NUM_ELEMENTS as usize,
SUBPOOL_5_BLOCK_SIZE
);
const SUBPOOL_6_NUM_ELEMENTS: u16 = 1;
const SUBPOOL_6_BLOCK_SIZE: usize = 12;
static_subpool!(
SUBPOOL_6,
SUBPOOL_6_SIZES,
SUBPOOL_6_NUM_ELEMENTS as usize,
SUBPOOL_6_BLOCK_SIZE
);
fn small_heapless_pool() -> StaticHeaplessMemoryPool<3> {
let mut heapless_pool: StaticHeaplessMemoryPool<3> =
StaticHeaplessMemoryPool::new(false);
assert!(heapless_pool
.grow(
SUBPOOL_1.take(),
unsafe { &mut *SUBPOOL_1_SIZES.lock().unwrap().get() },
SUBPOOL_1_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_2.take(),
SUBPOOL_2_SIZES.take(),
SUBPOOL_2_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok());
heapless_pool
macro_rules! make_heapless_pool {
($prefix:ident) => {{
paste::paste! {
static [<$prefix _SUBPOOL_1>]: static_cell::ConstStaticCell<
[u8; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
);
static [<$prefix _SUBPOOL_1_SIZES>]: std::sync::Mutex<
std::cell::UnsafeCell<[usize; SUBPOOL_1_NUM_ELEMENTS as usize]>
> = std::sync::Mutex::new(
std::cell::UnsafeCell::new([STORE_FREE; SUBPOOL_1_NUM_ELEMENTS as usize])
);
static [<$prefix _SUBPOOL_2>]: static_cell::ConstStaticCell<
[u8; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
);
static [<$prefix _SUBPOOL_2_SIZES>]: static_cell::ConstStaticCell<
[usize; SUBPOOL_2_NUM_ELEMENTS as usize],
> = static_cell::ConstStaticCell::new(
[STORE_FREE; SUBPOOL_2_NUM_ELEMENTS as usize]
);
static [<$prefix _SUBPOOL_3>]: static_cell::ConstStaticCell<
[u8; SUBPOOL_3_NUM_ELEMENTS as usize * SUBPOOL_3_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_3_NUM_ELEMENTS as usize * SUBPOOL_3_BLOCK_SIZE],
);
static [<$prefix _SUBPOOL_3_SIZES>]: static_cell::ConstStaticCell<
[usize; SUBPOOL_3_NUM_ELEMENTS as usize],
> = static_cell::ConstStaticCell::new(
[STORE_FREE; SUBPOOL_3_NUM_ELEMENTS as usize]
);
let mut heapless_pool: StaticHeaplessMemoryPool<3> =
StaticHeaplessMemoryPool::new(false);
heapless_pool
.grow(
[<$prefix _SUBPOOL_1>].take(),
unsafe { &mut *[<$prefix _SUBPOOL_1_SIZES>].lock().unwrap().get() },
SUBPOOL_1_NUM_ELEMENTS,
true
)
.unwrap();
heapless_pool
.grow(
[<$prefix _SUBPOOL_2>].take(),
[<$prefix _SUBPOOL_2_SIZES>].take(),
SUBPOOL_2_NUM_ELEMENTS,
true
)
.unwrap();
heapless_pool
.grow(
[<$prefix _SUBPOOL_3>].take(),
[<$prefix _SUBPOOL_3_SIZES>].take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.unwrap();
heapless_pool
}
}};
}
#[test]
fn test_heapless_add_and_read() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T0);
generic_test_add_and_read::<16>(&mut pool_provider);
}
#[test]
fn test_add_smaller_than_full_slot() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T1);
generic_test_add_smaller_than_full_slot(&mut pool_provider);
}
#[test]
fn test_delete() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T2);
generic_test_delete(&mut pool_provider);
}
#[test]
fn test_modify() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T3);
generic_test_modify(&mut pool_provider);
}
#[test]
fn test_consecutive_reservation() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T4);
generic_test_consecutive_reservation(&mut pool_provider);
}
#[test]
fn test_read_does_not_exist() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T5);
generic_test_read_does_not_exist(&mut pool_provider);
}
#[test]
fn test_store_full() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T6);
generic_test_store_full(&mut pool_provider);
}
#[test]
fn test_invalid_pool_idx() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T7);
generic_test_invalid_pool_idx(&mut pool_provider);
}
#[test]
fn test_invalid_packet_idx() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T8);
generic_test_invalid_packet_idx(&mut pool_provider);
}
#[test]
fn test_add_too_large() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T9);
generic_test_add_too_large(&mut pool_provider);
}
#[test]
fn test_data_too_large_1() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T10);
generic_test_data_too_large_1(&mut pool_provider);
}
#[test]
fn test_free_element_too_large() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T11);
generic_test_free_element_too_large(&mut pool_provider);
}
#[test]
fn test_pool_guard_deletion_man_creation() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T12);
generic_test_pool_guard_deletion_man_creation(&mut pool_provider);
}
#[test]
fn test_pool_guard_deletion() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T13);
generic_test_pool_guard_deletion(&mut pool_provider);
}
#[test]
fn test_pool_guard_with_release() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T14);
generic_test_pool_guard_with_release(&mut pool_provider);
}
#[test]
fn test_pool_modify_guard_man_creation() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T15);
generic_test_pool_modify_guard_man_creation(&mut pool_provider);
}
#[test]
fn test_pool_modify_guard() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T16);
generic_test_pool_modify_guard(&mut pool_provider);
}
#[test]
fn modify_pool_index_above_0() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T17);
generic_modify_pool_index_above_0(&mut pool_provider);
}
#[test]
fn test_spills_to_higher_subpools() {
static_subpool!(
SUBPOOL_2_T18,
SUBPOOL_2_SIZES_T18,
SUBPOOL_2_NUM_ELEMENTS as usize,
SUBPOOL_2_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_4_T18,
SUBPOOL_4_SIZES_T18,
SUBPOOL_4_NUM_ELEMENTS as usize,
SUBPOOL_4_BLOCK_SIZE
);
let mut heapless_pool: StaticHeaplessMemoryPool<2> =
StaticHeaplessMemoryPool::new(true);
assert!(heapless_pool
.grow(
SUBPOOL_2.take(),
SUBPOOL_2_SIZES.take(),
SUBPOOL_2_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_4.take(),
SUBPOOL_4_SIZES.take(),
SUBPOOL_4_NUM_ELEMENTS,
true
)
.is_ok());
assert!(
heapless_pool
.grow(
SUBPOOL_2_T18.take(),
SUBPOOL_2_SIZES_T18.take(),
SUBPOOL_2_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_4_T18.take(),
SUBPOOL_4_SIZES_T18.take(),
SUBPOOL_4_NUM_ELEMENTS,
true
)
.is_ok()
);
generic_test_spills_to_higher_subpools(&mut heapless_pool);
}
@@ -1816,22 +1858,38 @@ mod tests {
fn test_spillage_fails_as_well() {
let mut heapless_pool: StaticHeaplessMemoryPool<2> =
StaticHeaplessMemoryPool::new(true);
assert!(heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok());
static_subpool!(
SUBPOOL_5,
SUBPOOL_5_SIZES,
SUBPOOL_5_NUM_ELEMENTS as usize,
SUBPOOL_5_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_3,
SUBPOOL_3_SIZES,
SUBPOOL_3_NUM_ELEMENTS as usize,
SUBPOOL_3_BLOCK_SIZE
);
assert!(
heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok()
);
generic_test_spillage_fails_as_well(&mut heapless_pool);
}
@@ -1839,61 +1897,109 @@ mod tests {
fn test_spillage_works_across_multiple_subpools() {
let mut heapless_pool: StaticHeaplessMemoryPool<3> =
StaticHeaplessMemoryPool::new(true);
assert!(heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_6.take(),
SUBPOOL_6_SIZES.take(),
SUBPOOL_6_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok());
static_subpool!(
SUBPOOL_3,
SUBPOOL_3_SIZES,
SUBPOOL_3_NUM_ELEMENTS as usize,
SUBPOOL_3_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_5,
SUBPOOL_5_SIZES,
SUBPOOL_5_NUM_ELEMENTS as usize,
SUBPOOL_5_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_6,
SUBPOOL_6_SIZES,
SUBPOOL_6_NUM_ELEMENTS as usize,
SUBPOOL_6_BLOCK_SIZE
);
assert!(
heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_6.take(),
SUBPOOL_6_SIZES.take(),
SUBPOOL_6_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok()
);
generic_test_spillage_works_across_multiple_subpools(&mut heapless_pool);
}
#[test]
fn test_spillage_fails_across_multiple_subpools() {
static_subpool!(
SUBPOOL_3,
SUBPOOL_3_SIZES,
SUBPOOL_3_NUM_ELEMENTS as usize,
SUBPOOL_3_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_5,
SUBPOOL_5_SIZES,
SUBPOOL_5_NUM_ELEMENTS as usize,
SUBPOOL_5_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_6,
SUBPOOL_6_SIZES,
SUBPOOL_6_NUM_ELEMENTS as usize,
SUBPOOL_6_BLOCK_SIZE
);
let mut heapless_pool: StaticHeaplessMemoryPool<3> =
StaticHeaplessMemoryPool::new(true);
assert!(heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_6.take(),
SUBPOOL_6_SIZES.take(),
SUBPOOL_6_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok());
assert!(
heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_6.take(),
SUBPOOL_6_SIZES.take(),
SUBPOOL_6_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok()
);
generic_test_spillage_fails_across_multiple_subpools(&mut heapless_pool);
}
}

View File

@@ -190,7 +190,7 @@ mod tests {
use std::sync::mpsc::{self, TryRecvError};
use crate::{queue::GenericSendError, request::GenericMessage, ComponentId};
use crate::{ComponentId, queue::GenericSendError, request::GenericMessage};
use super::*;

View File

@@ -65,13 +65,13 @@ impl GenericActionReplyPus {
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use crate::{
ComponentId,
action::ActionRequest,
queue::{GenericReceiveError, GenericSendError},
request::{
GenericMessage, MessageReceiverProvider, MessageSenderAndReceiver,
MessageSenderProvider, MessageSenderStoreProvider, RequestId,
},
ComponentId,
};
use super::ActionReplyPus;
@@ -81,10 +81,10 @@ pub mod alloc_mod {
MessageSenderAndReceiver<ActionReplyPus, ActionRequest, Sender, Receiver, ReplySenderStore>;
impl<
Sender: MessageSenderProvider<ActionReplyPus>,
Receiver: MessageReceiverProvider<ActionRequest>,
ReplySender: MessageSenderStoreProvider<ActionReplyPus, Sender>,
> ActionRequestHandlerInterface<Sender, Receiver, ReplySender>
Sender: MessageSenderProvider<ActionReplyPus>,
Receiver: MessageReceiverProvider<ActionRequest>,
ReplySender: MessageSenderStoreProvider<ActionReplyPus, Sender>,
> ActionRequestHandlerInterface<Sender, Receiver, ReplySender>
{
pub fn try_recv_action_request(
&self,
@@ -114,10 +114,10 @@ pub mod alloc_mod {
>;
impl<
Sender: MessageSenderProvider<ActionRequest>,
Receiver: MessageReceiverProvider<ActionReplyPus>,
RequestSenderStore: MessageSenderStoreProvider<ActionRequest, Sender>,
> ActionRequestorInterface<Sender, Receiver, RequestSenderStore>
Sender: MessageSenderProvider<ActionRequest>,
Receiver: MessageReceiverProvider<ActionReplyPus>,
RequestSenderStore: MessageSenderStoreProvider<ActionRequest, Sender>,
> ActionRequestorInterface<Sender, Receiver, RequestSenderStore>
{
pub fn try_recv_action_reply(
&self,
@@ -141,12 +141,12 @@ pub mod std_mod {
use std::sync::mpsc;
use crate::{
ComponentId,
pus::{
ActivePusRequestStd, ActiveRequest, DefaultActiveRequestMap,
verification::{self, TcStateToken},
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap,
},
request::{MessageSenderMap, OneMessageSender},
ComponentId,
};
use super::*;
@@ -157,7 +157,7 @@ pub mod std_mod {
common: ActivePusRequestStd,
}
impl ActiveRequestProvider for ActivePusActionRequestStd {
impl ActiveRequest for ActivePusActionRequestStd {
delegate::delegate! {
to self.common {
fn target_id(&self) -> ComponentId;

View File

@@ -1,9 +1,11 @@
use crate::pus::source_buffer_large_enough;
use arbitrary_int::u11;
use spacepackets::ByteConversionError;
use spacepackets::SpHeader;
use spacepackets::ecss::CreatorConfig;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::tm::PusTmSecondaryHeader;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::ByteConversionError;
use spacepackets::{SpHeader, MAX_APID};
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
@@ -11,16 +13,13 @@ pub use alloc_mod::*;
pub use spacepackets::ecss::event::*;
pub struct EventReportCreator {
apid: u16,
apid: u11,
pub dest_id: u16,
}
impl EventReportCreator {
pub fn new(apid: u16, dest_id: u16) -> Option<Self> {
if apid > MAX_APID {
return None;
}
Some(Self { dest_id, apid })
pub fn new(apid: u11, dest_id: u16) -> Self {
Self { dest_id, apid }
}
pub fn event_info<'time, 'src_data>(
@@ -124,7 +123,7 @@ impl EventReportCreator {
SpHeader::new_from_apid(self.apid),
sec_header,
&src_data_buf[0..current_idx],
true,
CreatorConfig::default(),
))
}
}
@@ -132,64 +131,64 @@ impl EventReportCreator {
#[cfg(feature = "alloc")]
mod alloc_mod {
use super::*;
use crate::pus::{EcssTmSender, EcssTmtcError};
use crate::ComponentId;
use crate::pus::{EcssTmSender, EcssTmtcError};
use alloc::vec;
use alloc::vec::Vec;
use core::cell::RefCell;
use spacepackets::ecss::PusError;
pub trait EventTmHookProvider {
pub trait EventTmHook {
fn modify_tm(&self, tm: &mut PusTmCreator);
}
#[derive(Default)]
pub struct DummyEventHook {}
impl EventTmHookProvider for DummyEventHook {
impl EventTmHook for DummyEventHook {
fn modify_tm(&self, _tm: &mut PusTmCreator) {}
}
pub struct EventReporter<EventTmHook: EventTmHookProvider = DummyEventHook> {
pub struct EventReporter<EventTmHookInstance: EventTmHook = DummyEventHook> {
id: ComponentId,
// Use interior mutability pattern here. This is just an intermediate buffer to the PUS event packet
// generation.
source_data_buf: RefCell<Vec<u8>>,
pub report_creator: EventReportCreator,
pub tm_hook: EventTmHook,
pub tm_hook: EventTmHookInstance,
}
impl EventReporter<DummyEventHook> {
pub fn new(
id: ComponentId,
default_apid: u16,
default_apid: u11,
default_dest_id: u16,
max_event_id_and_aux_data_size: usize,
) -> Option<Self> {
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
Some(Self {
) -> Self {
let reporter = EventReportCreator::new(default_apid, default_dest_id);
Self {
id,
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
report_creator: reporter,
tm_hook: DummyEventHook::default(),
})
}
}
}
impl<EventTmHook: EventTmHookProvider> EventReporter<EventTmHook> {
impl<EventTmHookInstance: EventTmHook> EventReporter<EventTmHookInstance> {
pub fn new_with_hook(
id: ComponentId,
default_apid: u16,
default_apid: u11,
default_dest_id: u16,
max_event_id_and_aux_data_size: usize,
tm_hook: EventTmHook,
) -> Option<Self> {
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
Some(Self {
tm_hook: EventTmHookInstance,
) -> Self {
let reporter = EventReportCreator::new(default_apid, default_dest_id);
Self {
id,
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
report_creator: reporter,
tm_hook,
})
}
}
pub fn event_info(
@@ -265,18 +264,18 @@ mod alloc_mod {
#[cfg(test)]
mod tests {
use super::*;
use crate::events::{EventU32, Severity};
use crate::ComponentId;
use crate::events_legacy::{EventU32, Severity};
use crate::pus::test_util::TEST_COMPONENT_ID_0;
use crate::pus::tests::CommonTmInfo;
use crate::pus::{ChannelWithId, EcssTmSender, EcssTmtcError, PusTmVariant};
use crate::ComponentId;
use spacepackets::ecss::PusError;
use spacepackets::ByteConversionError;
use spacepackets::ecss::PusError;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::vec::Vec;
const EXAMPLE_APID: u16 = 0xee;
const EXAMPLE_APID: u11 = u11::new(0xee);
const EXAMPLE_GROUP_ID: u16 = 2;
const EXAMPLE_EVENT_ID_0: u16 = 1;
#[allow(dead_code)]
@@ -376,14 +375,12 @@ mod tests {
error_data: Option<&[u8]>,
) {
let mut sender = TestSender::default();
let reporter = EventReporter::new(
let mut reporter = EventReporter::new(
TEST_COMPONENT_ID_0.id(),
EXAMPLE_APID,
0,
max_event_aux_data_buf,
);
assert!(reporter.is_some());
let mut reporter = reporter.unwrap();
let time_stamp_empty: [u8; 7] = [0; 7];
let mut error_copy = Vec::new();
if let Some(err_data) = error_data {
@@ -474,9 +471,7 @@ mod tests {
fn insufficient_buffer() {
let mut sender = TestSender::default();
for i in 0..3 {
let reporter = EventReporter::new(0, EXAMPLE_APID, 0, i);
assert!(reporter.is_some());
let mut reporter = reporter.unwrap();
let mut reporter = EventReporter::new(0, EXAMPLE_APID, 0, i);
check_buf_too_small(&mut reporter, &mut sender, i);
}
}

View File

@@ -1,18 +1,18 @@
use crate::events::{EventU32, GenericEvent, Severity};
use crate::events_legacy::{EventU32, GenericEvent, Severity};
#[cfg(feature = "alloc")]
use crate::events::{EventU32TypedSev, HasSeverity};
use crate::events_legacy::{EventU32TypedSev, HasSeverity};
#[cfg(feature = "alloc")]
use core::hash::Hash;
#[cfg(feature = "alloc")]
use hashbrown::HashSet;
#[cfg(feature = "alloc")]
pub use crate::pus::event::EventReporter;
use crate::pus::verification::TcStateToken;
#[cfg(feature = "alloc")]
use crate::pus::EcssTmSender;
use crate::pus::EcssTmtcError;
#[cfg(feature = "alloc")]
pub use crate::pus::event::EventReporter;
use crate::pus::verification::TcStateToken;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "heapless")]
pub use heapless_mod::*;
@@ -46,7 +46,7 @@ pub mod heapless_mod {
// regular Event type again.
#[derive(Default)]
pub struct HeaplessPusMgmtBackendProvider<const N: usize, Provider: GenericEvent> {
disabled: heapless::FnvIndexSet<LargestEventRaw, N>,
disabled: heapless::index_set::FnvIndexSet<LargestEventRaw, N>,
phantom: PhantomData<Provider>,
}
@@ -100,9 +100,9 @@ pub mod alloc_mod {
use core::marker::PhantomData;
use crate::{
events::EventU16,
events_legacy::EventU16,
params::{Params, WritableToBeBytes},
pus::event::{DummyEventHook, EventTmHookProvider},
pus::event::{DummyEventHook, EventTmHook},
};
use super::*;
@@ -151,20 +151,20 @@ pub mod alloc_mod {
pub struct PusEventTmCreatorWithMap<
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHook: EventTmHookProvider = DummyEventHook,
EventTmHookInstance: EventTmHook = DummyEventHook,
> {
pub reporter: EventReporter<EventTmHook>,
pub reporter: EventReporter<EventTmHookInstance>,
reporting_map: ReportingMap,
phantom: PhantomData<Event>,
}
impl<
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHook: EventTmHookProvider,
> PusEventTmCreatorWithMap<ReportingMap, Event, EventTmHook>
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHookInstance: EventTmHook,
> PusEventTmCreatorWithMap<ReportingMap, Event, EventTmHookInstance>
{
pub fn new(reporter: EventReporter<EventTmHook>, backend: ReportingMap) -> Self {
pub fn new(reporter: EventReporter<EventTmHookInstance>, backend: ReportingMap) -> Self {
Self {
reporter,
reporting_map: backend,
@@ -262,10 +262,10 @@ pub mod alloc_mod {
}
}
impl<Event: GenericEvent + Copy + PartialEq + Eq + Hash, EventTmHook: EventTmHookProvider>
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<Event>, Event, EventTmHook>
impl<Event: GenericEvent + Copy + PartialEq + Eq + Hash, EventTmHookInstance: EventTmHook>
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<Event>, Event, EventTmHookInstance>
{
pub fn new_with_default_backend(reporter: EventReporter<EventTmHook>) -> Self {
pub fn new_with_default_backend(reporter: EventReporter<EventTmHookInstance>) -> Self {
Self {
reporter,
reporting_map: DefaultPusEventReportingMap::default(),
@@ -311,29 +311,28 @@ pub mod alloc_mod {
mod tests {
use alloc::string::{String, ToString};
use alloc::vec;
use arbitrary_int::u11;
use spacepackets::ecss::PusPacket;
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::PusPacket;
use super::*;
use crate::request::UniqueApidTargetId;
use crate::{events::SeverityInfo, tmtc::PacketAsVec};
use crate::{events_legacy::SeverityInfo, tmtc::PacketAsVec};
use std::sync::mpsc::{self, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u16 = 0x02;
const TEST_APID: u11 = u11::new(0x02);
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05);
fn create_basic_man_1() -> DefaultPusEventU32TmCreator {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
.expect("Creating event repoter failed");
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128);
PusEventTmCreatorWithMap::new_with_default_backend(reporter)
}
fn create_basic_man_2() -> DefaultPusEventU32TmCreator {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
.expect("Creating event repoter failed");
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128);
let backend = DefaultPusEventReportingMap::default();
PusEventTmCreatorWithMap::new(reporter, backend)
}
@@ -409,7 +408,7 @@ mod tests {
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
let tm = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
@@ -437,7 +436,7 @@ mod tests {
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
let tm = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
assert_eq!(tm.user_data().len(), 4 + param_data.len());

View File

@@ -1,22 +1,22 @@
use crate::events::EventU32;
use crate::events_legacy::EventU32;
use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::TcStateToken;
use crate::pus::{DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError};
use crate::queue::GenericSendError;
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::PusPacket;
use spacepackets::ecss::event::Subservice;
use std::sync::mpsc::Sender;
use super::verification::VerificationReportingProvider;
use super::{
EcssTcInMemConversionProvider, EcssTcReceiver, EcssTmSender, GenericConversionError,
CacheAndReadRawEcssTc, EcssTcReceiver, EcssTmSender, GenericConversionError,
GenericRoutingError, HandlingStatus, PusServiceHelper,
};
pub struct PusEventServiceHandler<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> {
pub service_helper:
@@ -25,11 +25,11 @@ pub struct PusEventServiceHandler<
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
VerificationReporter: VerificationReportingProvider,
> PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
service_helper: PusServiceHelper<
@@ -122,7 +122,7 @@ impl<
| Subservice::TmHighSeverityReport => {
return Err(PusPacketHandlingError::RequestConversion(
GenericConversionError::WrongService(tc.subservice()),
))
));
}
Subservice::TcEnableEventGeneration => {
handle_enable_disable_request(true)?;
@@ -144,16 +144,19 @@ impl<
#[cfg(test)]
mod tests {
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use delegate::delegate;
use spacepackets::ecss::CreatorConfig;
use spacepackets::ecss::event::Subservice;
use spacepackets::time::{cds, TimeWriter};
use spacepackets::time::{TimeWriter, cds};
use spacepackets::util::UnsignedEnum;
use spacepackets::{
SpHeader,
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
},
SpHeader,
};
use std::sync::mpsc::{self, Sender};
@@ -165,25 +168,25 @@ mod tests {
use crate::pus::{GenericConversionError, HandlingStatus, MpscTcReceiver};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::{
events::EventU32,
events_legacy::EventU32,
pus::{
DirectPusPacketHandlerResult, EcssTcInSharedPoolCacher, PusPacketHandlingError,
event_man::EventRequestWithToken,
tests::PusServiceHandlerWithSharedStoreCommon,
verification::{TcStateAccepted, VerificationToken},
DirectPusPacketHandlerResult, EcssTcInSharedPoolConverter, PusPacketHandlingError,
},
};
use super::PusEventServiceHandler;
const TEST_EVENT_0: EventU32 = EventU32::new(crate::events::Severity::Info, 5, 25);
const TEST_EVENT_0: EventU32 = EventU32::new(crate::events_legacy::Severity::Info, 5, 25);
struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusEventServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
>,
}
@@ -199,8 +202,12 @@ mod tests {
}
impl PusTestHarness for Pus5HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -238,14 +245,14 @@ mod tests {
expected_event_req: EventRequest,
event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
) {
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8);
let mut app_data = [0; 4];
TEST_EVENT_0
.write_to_be_bytes(&mut app_data)
.expect("writing test event failed");
let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = test_harness.init_verification(&ping_tc);
let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let request_id = token.request_id();
test_harness.handle_one_tc().unwrap();
@@ -303,10 +310,11 @@ mod tests {
fn test_sending_custom_subservice() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(5, 200);
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc);
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
@@ -322,11 +330,12 @@ mod tests {
fn test_sending_invalid_app_data() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8);
let ping_tc = PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], true);
let token = test_harness.init_verification(&ping_tc);
let ping_tc =
PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());

View File

@@ -2,6 +2,7 @@
//!
//! This module contains structures to make working with the PUS C standard easier.
//! The satrs-example application contains various usage examples of these components.
use crate::ComponentId;
use crate::pool::{PoolAddr, PoolError};
use crate::pus::verification::{TcStateAccepted, TcStateToken, VerificationToken};
use crate::queue::{GenericReceiveError, GenericSendError};
@@ -9,19 +10,18 @@ use crate::request::{GenericMessage, MessageMetadata, RequestId};
#[cfg(feature = "alloc")]
use crate::tmtc::PacketAsVec;
use crate::tmtc::PacketInPool;
use crate::ComponentId;
use core::fmt::{Display, Formatter};
use core::time::Duration;
#[cfg(feature = "alloc")]
use downcast_rs::{impl_downcast, Downcast};
use downcast_rs::{Downcast, impl_downcast};
#[cfg(feature = "alloc")]
use dyn_clone::DynClone;
#[cfg(feature = "std")]
use std::error::Error;
use spacepackets::ecss::PusError;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::PusError;
use spacepackets::{ByteConversionError, SpHeader};
pub mod action;
@@ -296,7 +296,7 @@ pub trait PacketSenderPusTc: Send {
) -> Result<(), Self::Error>;
}
pub trait ActiveRequestMapProvider<V>: Sized {
pub trait ActiveRequestStore<V>: Sized {
fn insert(&mut self, request_id: &RequestId, request_info: V);
fn get(&self, request_id: RequestId) -> Option<&V>;
fn get_mut(&mut self, request_id: RequestId) -> Option<&mut V>;
@@ -309,7 +309,7 @@ pub trait ActiveRequestMapProvider<V>: Sized {
fn for_each_mut<F: FnMut(&RequestId, &mut V)>(&mut self, f: F);
}
pub trait ActiveRequestProvider {
pub trait ActiveRequest {
fn target_id(&self) -> ComponentId;
fn token(&self) -> TcStateToken;
fn set_token(&mut self, token: TcStateToken);
@@ -330,7 +330,7 @@ pub trait PusRequestRouter<Request> {
) -> Result<(), Self::Error>;
}
pub trait PusReplyHandler<ActiveRequestInfo: ActiveRequestProvider, ReplyType> {
pub trait PusReplyHandler<ActiveRequestInfo: ActiveRequest, ReplyType> {
type Error;
/// This function handles a reply for a given PUS request and returns whether that request
@@ -449,7 +449,7 @@ pub mod alloc_mod {
/// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard.
/// The only requirement is that a valid active request information instance and a request
/// are returned by the core conversion function. The active request type needs to fulfill
/// the [ActiveRequestProvider] trait bound.
/// the [ActiveRequest] trait bound.
///
/// The user should take care of performing the error handling as well. Some of the following
/// aspects might be relevant:
@@ -459,7 +459,7 @@ pub mod alloc_mod {
///
/// A [VerificationReportingProvider] instance is passed to the user to also allow handling
/// of the verification process as part of the PUS standard requirements.
pub trait PusTcToRequestConverter<ActiveRequestInfo: ActiveRequestProvider, Request> {
pub trait PusTcToRequestConverter<ActiveRequestInfo: ActiveRequest, Request> {
type Error;
fn convert(
&mut self,
@@ -480,7 +480,7 @@ pub mod alloc_mod {
}
}
impl<V> ActiveRequestMapProvider<V> for DefaultActiveRequestMap<V> {
impl<V> ActiveRequestStore<V> for DefaultActiveRequestMap<V> {
fn insert(&mut self, request_id: &RequestId, request: V) {
self.0.insert(*request_id, request);
}
@@ -659,18 +659,18 @@ pub mod alloc_mod {
#[cfg(feature = "std")]
pub mod std_mod {
use super::*;
use crate::ComponentId;
use crate::pool::{
PoolAddr, PoolError, PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool,
};
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use crate::ComponentId;
use alloc::vec::Vec;
use core::time::Duration;
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::StdTimestampError;
use spacepackets::ByteConversionError;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::time::StdTimestampError;
use std::string::String;
use std::sync::mpsc;
use std::sync::mpsc::TryRecvError;
@@ -680,7 +680,7 @@ pub mod std_mod {
pub use cb_mod::*;
use super::verification::{TcStateToken, VerificationReportingProvider};
use super::{AcceptedEcssTcAndToken, ActiveRequestProvider, TcInMemory};
use super::{AcceptedEcssTcAndToken, ActiveRequest, TcInMemory};
use crate::tmtc::PacketInPool;
impl From<mpsc::SendError<PoolAddr>> for EcssTmtcError {
@@ -845,7 +845,7 @@ pub mod std_mod {
}
}
impl ActiveRequestProvider for ActivePusRequestStd {
impl ActiveRequest for ActivePusRequestStd {
fn target_id(&self) -> ComponentId {
self.target_id
}
@@ -947,7 +947,9 @@ pub mod std_mod {
}
}
pub trait EcssTcInMemConversionProvider {
/// This trait provides an abstraction for caching a raw ECSS telecommand and then
/// providing the [PusTcReader] abstraction to read the cache raw telecommand.
pub trait CacheAndReadRawEcssTc {
fn cache(&mut self, possible_packet: &TcInMemory) -> Result<(), PusTcFromMemError>;
fn tc_slice_raw(&self) -> &[u8];
@@ -959,15 +961,11 @@ pub mod std_mod {
possible_packet: &TcInMemory,
) -> Result<PusTcReader<'_>, PusTcFromMemError> {
self.cache(possible_packet)?;
Ok(PusTcReader::new(self.tc_slice_raw())
.map_err(EcssTmtcError::Pus)?
.0)
Ok(PusTcReader::new(self.tc_slice_raw()).map_err(EcssTmtcError::Pus)?)
}
fn convert(&self) -> Result<PusTcReader<'_>, PusTcFromMemError> {
Ok(PusTcReader::new(self.tc_slice_raw())
.map_err(EcssTmtcError::Pus)?
.0)
Ok(PusTcReader::new(self.tc_slice_raw()).map_err(EcssTmtcError::Pus)?)
}
}
@@ -975,12 +973,12 @@ pub mod std_mod {
/// Please note that this structure is not able to convert TCs which are stored inside a
/// [SharedStaticMemoryPool].
#[derive(Default, Clone)]
pub struct EcssTcInVecConverter {
pub struct EcssTcVecCacher {
sender_id: Option<ComponentId>,
pub pus_tc_raw: Option<Vec<u8>>,
}
impl EcssTcInMemConversionProvider for EcssTcInVecConverter {
impl CacheAndReadRawEcssTc for EcssTcVecCacher {
fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> {
self.pus_tc_raw = None;
match tc_in_memory {
@@ -1012,13 +1010,13 @@ pub mod std_mod {
/// packets should be avoided. Please note that this structure is not able to convert TCs which
/// are stored as a `Vec<u8>`.
#[derive(Clone)]
pub struct EcssTcInSharedPoolConverter {
pub struct EcssTcInSharedPoolCacher {
sender_id: Option<ComponentId>,
shared_tc_pool: SharedStaticMemoryPool,
pus_buf: Vec<u8>,
}
impl EcssTcInSharedPoolConverter {
impl EcssTcInSharedPoolCacher {
pub fn new(shared_tc_store: SharedStaticMemoryPool, max_expected_tc_size: usize) -> Self {
Self {
sender_id: None,
@@ -1049,7 +1047,7 @@ pub mod std_mod {
}
}
impl EcssTcInMemConversionProvider for EcssTcInSharedPoolConverter {
impl CacheAndReadRawEcssTc for EcssTcInSharedPoolCacher {
fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> {
match tc_in_memory {
super::TcInMemory::Pool(packet_in_pool) => {
@@ -1074,38 +1072,38 @@ pub mod std_mod {
// TODO: alloc feature flag?
#[derive(Clone)]
pub enum EcssTcInMemConverter {
Static(EcssTcInSharedPoolConverter),
Heap(EcssTcInVecConverter),
pub enum EcssTcCacher {
Static(EcssTcInSharedPoolCacher),
Heap(EcssTcVecCacher),
}
impl EcssTcInMemConverter {
pub fn new_static(static_store_converter: EcssTcInSharedPoolConverter) -> Self {
EcssTcInMemConverter::Static(static_store_converter)
impl EcssTcCacher {
pub fn new_static(static_store_converter: EcssTcInSharedPoolCacher) -> Self {
Self::Static(static_store_converter)
}
pub fn new_heap(heap_converter: EcssTcInVecConverter) -> Self {
EcssTcInMemConverter::Heap(heap_converter)
pub fn new_heap(heap_converter: EcssTcVecCacher) -> Self {
Self::Heap(heap_converter)
}
}
impl EcssTcInMemConversionProvider for EcssTcInMemConverter {
impl CacheAndReadRawEcssTc for EcssTcCacher {
fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> {
match self {
EcssTcInMemConverter::Static(converter) => converter.cache(tc_in_memory),
EcssTcInMemConverter::Heap(converter) => converter.cache(tc_in_memory),
Self::Static(converter) => converter.cache(tc_in_memory),
Self::Heap(converter) => converter.cache(tc_in_memory),
}
}
fn tc_slice_raw(&self) -> &[u8] {
match self {
EcssTcInMemConverter::Static(converter) => converter.tc_slice_raw(),
EcssTcInMemConverter::Heap(converter) => converter.tc_slice_raw(),
Self::Static(converter) => converter.tc_slice_raw(),
Self::Heap(converter) => converter.tc_slice_raw(),
}
}
fn sender_id(&self) -> Option<ComponentId> {
match self {
EcssTcInMemConverter::Static(converter) => converter.sender_id(),
EcssTcInMemConverter::Heap(converter) => converter.sender_id(),
Self::Static(converter) => converter.sender_id(),
Self::Heap(converter) => converter.sender_id(),
}
}
}
@@ -1128,12 +1126,12 @@ pub mod std_mod {
/// groups (for example individual services).
///
/// This base class can handle PUS telecommands backed by different memory storage machanisms
/// by using the [EcssTcInMemConverter] abstraction. This object provides some convenience
/// by using the [CacheAndReadRawEcssTc] abstraction. This object provides some convenience
/// methods to make the generic parts of TC handling easier.
pub struct PusServiceHelper<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> {
pub common: PusServiceBase<TcReceiver, TmSender, VerificationReporter>,
@@ -1141,11 +1139,11 @@ pub mod std_mod {
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
VerificationReporter: VerificationReportingProvider,
> PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
id: ComponentId,
@@ -1260,16 +1258,17 @@ pub(crate) fn source_buffer_large_enough(
#[cfg(any(feature = "test_util", test))]
pub mod test_util {
use arbitrary_int::u11;
use spacepackets::ecss::{tc::PusTcCreator, tm::PusTmReader};
use crate::request::UniqueApidTargetId;
use super::{
verification::{self, TcStateAccepted, VerificationToken},
DirectPusPacketHandlerResult, PusPacketHandlingError,
verification::{self, TcStateAccepted, VerificationToken},
};
pub const TEST_APID: u16 = 0x101;
pub const TEST_APID: u11 = u11::new(0x101);
pub const TEST_UNIQUE_ID_0: u32 = 0x05;
pub const TEST_UNIQUE_ID_1: u32 = 0x06;
@@ -1279,7 +1278,7 @@ pub mod test_util {
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_1);
pub trait PusTestHarness {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator);
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
@@ -1292,7 +1291,7 @@ pub mod test_util {
pub trait SimplePusPacketHandler {
fn handle_one_tc(&mut self)
-> Result<DirectPusPacketHandlerResult, PusPacketHandlingError>;
-> Result<DirectPusPacketHandlerResult, PusPacketHandlingError>;
}
}
@@ -1300,33 +1299,35 @@ pub mod test_util {
pub mod tests {
use core::cell::RefCell;
use std::sync::mpsc::TryRecvError;
use std::sync::{mpsc, RwLock};
use std::sync::{RwLock, mpsc};
use alloc::collections::VecDeque;
use alloc::vec::Vec;
use arbitrary_int::{u11, u14};
use satrs_shared::res_code::ResultU16;
use spacepackets::CcsdsPacket;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
use spacepackets::ecss::tm::{GenericPusTmSecondaryHeader, PusTmCreator, PusTmReader};
use spacepackets::ecss::{PusPacket, WritablePusPacket};
use spacepackets::CcsdsPacket;
use test_util::{TEST_APID, TEST_COMPONENT_ID_0};
use crate::ComponentId;
use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig};
use crate::pus::verification::{RequestId, VerificationReporter};
use crate::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool, SharedPacketPool};
use crate::ComponentId;
use super::verification::test_util::TestVerificationReporter;
use super::verification::{
TcStateAccepted, VerificationReporterCfg, VerificationReportingProvider, VerificationToken,
TcStateAccepted, VerificationReporterConfig, VerificationReportingProvider,
VerificationToken,
};
use super::*;
#[derive(Debug, Eq, PartialEq, Clone)]
pub(crate) struct CommonTmInfo {
pub subservice: u8,
pub apid: u16,
pub seq_count: u16,
pub apid: u11,
pub seq_count: u14,
pub msg_counter: u16,
pub dest_id: u16,
pub timestamp: Vec<u8>,
@@ -1335,8 +1336,8 @@ pub mod tests {
impl CommonTmInfo {
pub fn new(
subservice: u8,
apid: u16,
seq_count: u16,
apid: u11,
seq_count: u14,
msg_counter: u16,
dest_id: u16,
timestamp: &[u8],
@@ -1352,11 +1353,11 @@ pub mod tests {
}
pub fn new_zero_seq_count(
subservice: u8,
apid: u16,
apid: u11,
dest_id: u16,
timestamp: &[u8],
) -> Self {
Self::new(subservice, apid, 0, 0, dest_id, timestamp)
Self::new(subservice, apid, u14::new(0), 0, dest_id, timestamp)
}
pub fn new_from_tm(tm: &PusTmCreator) -> Self {
@@ -1386,7 +1387,7 @@ pub mod tests {
pub type PusServiceHelperStatic = PusServiceHelper<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
>;
@@ -1408,12 +1409,12 @@ pub mod tests {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::sync_channel(10);
let (tm_tx, tm_rx) = mpsc::sync_channel(10);
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verif_cfg = VerificationReporterConfig::new(TEST_APID, 1, 2, 8);
let verification_handler =
VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &verif_cfg);
let test_srv_tm_sender =
PacketSenderWithSharedPool::new(tm_tx, shared_tm_pool_wrapper.clone());
let in_store_converter = EcssTcInSharedPoolConverter::new(shared_tc_pool.clone(), 2048);
let in_store_converter = EcssTcInSharedPoolCacher::new(shared_tc_pool.clone(), 2048);
(
Self {
pus_buf: RefCell::new([0; 2048]),
@@ -1459,7 +1460,7 @@ pub mod tests {
let tm_pool = self.tm_pool.0.read().unwrap();
let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap();
self.tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw);
PusTmReader::new(&self.tm_buf, 7).unwrap().0
PusTmReader::new(&self.tm_buf, 7).unwrap()
}
pub fn check_no_tm_available(&self) -> bool {
@@ -1476,7 +1477,7 @@ pub mod tests {
let tm_in_pool = next_msg.unwrap();
let tm_pool = self.tm_pool.0.read().unwrap();
let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap();
let tm = PusTmReader::new(&tm_raw, 7).unwrap().0;
let tm = PusTmReader::new(&tm_raw, 7).unwrap();
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);
@@ -1491,12 +1492,8 @@ pub mod tests {
tc_sender: mpsc::Sender<EcssTcAndToken>,
tm_receiver: mpsc::Receiver<PacketAsVec>,
}
pub type PusServiceHelperDynamic = PusServiceHelper<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
VerificationReporter,
>;
pub type PusServiceHelperDynamic =
PusServiceHelper<MpscTcReceiver, MpscTmAsVecSender, EcssTcVecCacher, VerificationReporter>;
impl PusServiceHandlerWithVecCommon {
pub fn new_with_standard_verif_reporter(
@@ -1505,10 +1502,10 @@ pub mod tests {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verif_cfg = VerificationReporterConfig::new(TEST_APID, 1, 2, 8);
let verification_handler =
VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &verif_cfg);
let in_store_converter = EcssTcInVecConverter::default();
let in_store_converter = EcssTcVecCacher::default();
(
Self {
current_tm: None,
@@ -1534,14 +1531,14 @@ pub mod tests {
PusServiceHelper<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
EcssTcVecCacher,
TestVerificationReporter,
>,
) {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let in_store_converter = EcssTcInVecConverter::default();
let in_store_converter = EcssTcVecCacher::default();
let verification_handler = TestVerificationReporter::new(id);
(
Self {
@@ -1584,9 +1581,7 @@ pub mod tests {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
self.current_tm = Some(next_msg.unwrap().packet);
PusTmReader::new(self.current_tm.as_ref().unwrap(), 7)
.unwrap()
.0
PusTmReader::new(self.current_tm.as_ref().unwrap(), 7).unwrap()
}
pub fn check_no_tm_available(&self) -> bool {
@@ -1601,7 +1596,7 @@ pub mod tests {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
let next_msg = next_msg.unwrap();
let tm = PusTmReader::new(next_msg.packet.as_slice(), 7).unwrap().0;
let tm = PusTmReader::new(next_msg.packet.as_slice(), 7).unwrap();
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);

View File

@@ -2,7 +2,8 @@
//!
//! The core data structure of this module is the [PusScheduler]. This structure can be used
//! to perform the scheduling of telecommands like specified in the ECSS standard.
use core::fmt::{Debug, Display, Formatter};
use arbitrary_int::{u11, u14};
use core::fmt::Debug;
use core::time::Duration;
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@@ -11,8 +12,6 @@ use spacepackets::ecss::tc::{GenericPusTcSecondaryHeader, IsPusTelecommand, PusT
use spacepackets::ecss::{PusError, PusPacket, WritablePusPacket};
use spacepackets::time::{CcsdsTimeProvider, TimeReader, TimeWriter, TimestampError, UnixTime};
use spacepackets::{ByteConversionError, CcsdsPacket};
#[cfg(feature = "std")]
use std::error::Error;
use crate::pool::{PoolError, PoolProvider};
#[cfg(feature = "alloc")]
@@ -24,22 +23,26 @@ pub use alloc_mod::*;
/// the source ID found in the secondary header of PUS telecommands.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct RequestId {
pub(crate) source_id: u16,
pub(crate) apid: u16,
pub(crate) seq_count: u16,
pub(crate) apid: u11,
pub(crate) seq_count: u14,
}
impl RequestId {
pub fn source_id(&self) -> u16 {
#[inline]
pub const fn source_id(&self) -> u16 {
self.source_id
}
pub fn apid(&self) -> u16 {
#[inline]
pub const fn apid(&self) -> u11 {
self.apid
}
pub fn seq_count(&self) -> u16 {
#[inline]
pub const fn seq_count(&self) -> u14 {
self.seq_count
}
@@ -53,8 +56,10 @@ impl RequestId {
}
}
pub fn as_u64(&self) -> u64 {
((self.source_id as u64) << 32) | ((self.apid as u64) << 16) | self.seq_count as u64
pub const fn as_u64(&self) -> u64 {
((self.source_id as u64) << 32)
| ((self.apid.value() as u64) << 16)
| self.seq_count.value() as u64
}
}
@@ -137,107 +142,39 @@ impl<TimeProvider: CcsdsTimeProvider + Clone> TimeWindow<TimeProvider> {
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
pub enum ScheduleError {
PusError(PusError),
#[error("pus error: {0}")]
PusError(#[from] PusError),
/// The release time is within the time-margin added on top of the current time.
/// The first parameter is the current time, the second one the time margin, and the third one
/// the release time.
#[error("release time in margin")]
ReleaseTimeInTimeMargin {
current_time: UnixTime,
time_margin: Duration,
release_time: UnixTime,
},
/// Nested time-tagged commands are not allowed.
#[error("nested scheduled tc")]
NestedScheduledTc,
StoreError(PoolError),
#[error("store error")]
Pool(#[from] PoolError),
#[error("tc data empty")]
TcDataEmpty,
TimestampError(TimestampError),
#[error("timestamp error: {0}")]
TimestampError(#[from] TimestampError),
#[error("wrong subservice number {0}")]
WrongSubservice(u8),
#[error("wrong service number {0}")]
WrongService(u8),
ByteConversionError(ByteConversionError),
}
impl Display for ScheduleError {
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match self {
ScheduleError::PusError(e) => {
write!(f, "Pus Error: {e}")
}
ScheduleError::ReleaseTimeInTimeMargin {
current_time,
time_margin,
release_time,
} => {
write!(
f,
"time margin too short, current time: {current_time:?}, time margin: {time_margin:?}, release time: {release_time:?}"
)
}
ScheduleError::NestedScheduledTc => {
write!(f, "nested scheduling is not allowed")
}
ScheduleError::StoreError(e) => {
write!(f, "pus scheduling: {e}")
}
ScheduleError::TcDataEmpty => {
write!(f, "empty TC data field")
}
ScheduleError::TimestampError(e) => {
write!(f, "pus scheduling: {e}")
}
ScheduleError::WrongService(srv) => {
write!(f, "pus scheduling: wrong service number {srv}")
}
ScheduleError::WrongSubservice(subsrv) => {
write!(f, "pus scheduling: wrong subservice number {subsrv}")
}
ScheduleError::ByteConversionError(e) => {
write!(f, "pus scheduling: {e}")
}
}
}
}
impl From<PusError> for ScheduleError {
fn from(e: PusError) -> Self {
Self::PusError(e)
}
}
impl From<PoolError> for ScheduleError {
fn from(e: PoolError) -> Self {
Self::StoreError(e)
}
}
impl From<TimestampError> for ScheduleError {
fn from(e: TimestampError) -> Self {
Self::TimestampError(e)
}
}
impl From<ByteConversionError> for ScheduleError {
fn from(e: ByteConversionError) -> Self {
Self::ByteConversionError(e)
}
}
#[cfg(feature = "std")]
impl Error for ScheduleError {
fn source(&self) -> Option<&(dyn Error + 'static)> {
match self {
ScheduleError::PusError(e) => Some(e),
ScheduleError::StoreError(e) => Some(e),
ScheduleError::TimestampError(e) => Some(e),
ScheduleError::ByteConversionError(e) => Some(e),
_ => None,
}
}
#[error("byte conversion error: {0}")]
ByteConversionError(#[from] ByteConversionError),
}
/// Generic trait for scheduler objects which are able to schedule ECSS PUS C packets.
pub trait PusSchedulerProvider {
pub trait PusScheduler {
type TimeProvider: CcsdsTimeProvider + TimeReader;
fn reset(&mut self, store: &mut (impl PoolProvider + ?Sized)) -> Result<(), PoolError>;
@@ -292,10 +229,10 @@ pub trait PusSchedulerProvider {
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
if PusPacket::service(&check_tc) == 11 && PusPacket::subservice(&check_tc) == 4 {
return Err(ScheduleError::NestedScheduledTc);
}
let req_id = RequestId::from_tc(&check_tc.0);
let req_id = RequestId::from_tc(&check_tc);
match pool.add(tc) {
Ok(addr) => {
@@ -340,8 +277,8 @@ pub fn generate_insert_telecommand_app_data(
pub mod alloc_mod {
use alloc::{
collections::{
btree_map::{Entry, Range},
BTreeMap,
btree_map::{Entry, Range},
},
vec::Vec,
};
@@ -394,7 +331,7 @@ pub mod alloc_mod {
///
/// Currently, sub-schedules and groups are not supported.
#[derive(Debug)]
pub struct PusScheduler {
pub struct PusSchedulerAlloc {
// TODO: Use MonotonicTime from tai-time crate instead of UnixTime and cache leap seconds.
// TODO: Introduce optional limit of commands stored in the TC map. If a limit is set,
// there will be a check for each insertion whether the map is full, making the memory
@@ -404,19 +341,20 @@ pub mod alloc_mod {
time_margin: Duration,
enabled: bool,
}
impl PusScheduler {
impl PusSchedulerAlloc {
/// Create a new PUS scheduler.
///
/// # Arguments
///
/// * `init_current_time` - The time to initialize the scheduler with.
/// * `time_margin` - This time margin is used when inserting new telecommands into the
/// schedule. If the release time of a new telecommand is earlier than the time margin
/// added to the current time, it will not be inserted into the schedule.
/// schedule. If the release time of a new telecommand is earlier than the time margin
/// added to the current time, it will not be inserted into the schedule.
/// * `tc_buf_size` - Buffer for temporary storage of telecommand packets. This buffer
/// should be large enough to accomodate the largest expected TC packets.
/// should be large enough to accomodate the largest expected TC packets.
pub fn new(init_current_time: UnixTime, time_margin: Duration) -> Self {
PusScheduler {
PusSchedulerAlloc {
tc_map: Default::default(),
current_time: init_current_time,
time_margin,
@@ -438,10 +376,12 @@ pub mod alloc_mod {
num_entries
}
#[inline]
pub fn update_time(&mut self, current_time: UnixTime) {
self.current_time = current_time;
}
#[inline]
pub fn current_time(&self) -> &UnixTime {
&self.current_time
}
@@ -480,10 +420,10 @@ pub mod alloc_mod {
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
if PusPacket::service(&check_tc) == 11 && PusPacket::subservice(&check_tc) == 4 {
return Err(ScheduleError::NestedScheduledTc);
}
let req_id = RequestId::from_tc(&check_tc.0);
let req_id = RequestId::from_tc(&check_tc);
match pool.add(tc) {
Ok(addr) => {
@@ -683,10 +623,10 @@ pub mod alloc_mod {
/// # Arguments
///
/// * `releaser` - Closure where the first argument is whether the scheduler is enabled and
/// the second argument is the telecommand information also containing the store
/// address. This closure should return whether the command should be deleted. Please
/// note that returning false might lead to memory leaks if the TC is not cleared from
/// the store in some other way.
/// the second argument is the telecommand information also containing the store
/// address. This closure should return whether the command should be deleted. Please
/// note that returning false might lead to memory leaks if the TC is not cleared from
/// the store in some other way.
/// * `tc_store` - The holding store of the telecommands.
/// * `tc_buf` - Buffer to hold each telecommand being released.
pub fn release_telecommands_with_buffer<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
@@ -785,7 +725,7 @@ pub mod alloc_mod {
}
}
impl PusSchedulerProvider for PusScheduler {
impl PusScheduler for PusSchedulerAlloc {
type TimeProvider = cds::CdsTime;
/// This will disable the scheduler and clear the schedule as specified in 6.11.4.4.
@@ -855,10 +795,12 @@ mod tests {
PoolAddr, PoolError, PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig,
};
use alloc::collections::btree_map::Range;
use arbitrary_int::traits::Integer;
use arbitrary_int::{u11, u14};
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::{cds, TimeWriter, UnixTime};
use spacepackets::{PacketId, PacketSequenceCtrl, PacketType, SequenceFlags, SpHeader};
use spacepackets::ecss::{CreatorConfig, WritablePusPacket};
use spacepackets::time::{TimeWriter, UnixTime, cds};
use spacepackets::{PacketId, PacketSequenceControl, PacketType, SequenceFlags, SpHeader};
use std::time::Duration;
use std::vec::Vec;
#[allow(unused_imports)]
@@ -869,59 +811,66 @@ mod tests {
cds::CdsTime::from_unix_time_with_u16_days(&timestamp, cds::SubmillisPrecision::Absent)
.unwrap();
let len_time_stamp = cds_time.write_to_bytes(buf).unwrap();
let len_packet = base_ping_tc_simple_ctor(0, &[])
let len_packet = base_ping_tc_simple_ctor(u14::new(0), &[])
.write_to_bytes(&mut buf[len_time_stamp..])
.unwrap();
(
SpHeader::new_for_unseg_tc(0x02, 0x34, len_packet as u16),
SpHeader::new_for_unseg_tc(u11::new(0x02), u14::new(0x34), len_packet as u16),
len_packet + len_time_stamp,
)
}
fn scheduled_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
fn scheduled_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator<'_> {
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(sph, 11, 4, &buf[..len_app_data], true)
PusTcCreator::new_simple(sph, 11, 4, &buf[..len_app_data], CreatorConfig::default())
}
fn wrong_tc_service(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
fn wrong_tc_service(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator<'_> {
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(sph, 12, 4, &buf[..len_app_data], true)
PusTcCreator::new_simple(sph, 12, 4, &buf[..len_app_data], CreatorConfig::default())
}
fn wrong_tc_subservice(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
fn wrong_tc_subservice(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator<'_> {
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(sph, 11, 5, &buf[..len_app_data], true)
PusTcCreator::new_simple(sph, 11, 5, &buf[..len_app_data], CreatorConfig::default())
}
fn double_wrapped_time_tagged_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
fn double_wrapped_time_tagged_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator<'_> {
let cds_time =
cds::CdsTime::from_unix_time_with_u16_days(&timestamp, cds::SubmillisPrecision::Absent)
.unwrap();
let len_time_stamp = cds_time.write_to_bytes(buf).unwrap();
let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 0);
let sph = SpHeader::new_for_unseg_tc(u11::new(0x02), u14::new(0x34), 0);
// app data should not matter, double wrapped time-tagged commands should be rejected right
// away
let inner_time_tagged_tc = PusTcCreator::new_simple(sph, 11, 4, &[], true);
let inner_time_tagged_tc =
PusTcCreator::new_simple(sph, 11, 4, &[], CreatorConfig::default());
let packet_len = inner_time_tagged_tc
.write_to_bytes(&mut buf[len_time_stamp..])
.expect("writing inner time tagged tc failed");
PusTcCreator::new_simple(sph, 11, 4, &buf[..len_time_stamp + packet_len], true)
PusTcCreator::new_simple(
sph,
11,
4,
&buf[..len_time_stamp + packet_len],
CreatorConfig::default(),
)
}
fn invalid_time_tagged_cmd() -> PusTcCreator<'static> {
let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 1);
PusTcCreator::new_simple(sph, 11, 4, &[], true)
let sph = SpHeader::new_for_unseg_tc(u11::new(0x02), u14::new(0x34), 1);
PusTcCreator::new_simple(sph, 11, 4, &[], CreatorConfig::default())
}
fn base_ping_tc_simple_ctor(seq_count: u16, app_data: &'static [u8]) -> PusTcCreator<'static> {
let sph = SpHeader::new_for_unseg_tc(0x02, seq_count, 0);
PusTcCreator::new_simple(sph, 17, 1, app_data, true)
fn base_ping_tc_simple_ctor(seq_count: u14, app_data: &'static [u8]) -> PusTcCreator<'static> {
let sph = SpHeader::new_for_unseg_tc(u11::new(0x02), seq_count, 0);
PusTcCreator::new_simple(sph, 17, 1, app_data, CreatorConfig::default())
}
fn ping_tc_to_store(
pool: &mut StaticMemoryPool,
buf: &mut [u8],
seq_count: u16,
seq_count: u14,
app_data: &'static [u8],
) -> TcInfo {
let ping_tc = base_ping_tc_simple_ctor(seq_count, app_data);
@@ -932,7 +881,8 @@ mod tests {
#[test]
fn test_enable_api() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
assert!(scheduler.is_enabled());
scheduler.disable();
assert!(!scheduler.is_enabled());
@@ -946,10 +896,11 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::new(0), &[]);
scheduler
.insert_unwrapped_and_stored_tc(
@@ -959,7 +910,7 @@ mod tests {
.unwrap();
let app_data = &[0, 1, 2];
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, app_data);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), app_data);
scheduler
.insert_unwrapped_and_stored_tc(
UnixTime::new_only_secs(200),
@@ -968,7 +919,7 @@ mod tests {
.unwrap();
let app_data = &[0, 1, 2];
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, app_data);
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, u14::new(2), app_data);
scheduler
.insert_unwrapped_and_stored_tc(
UnixTime::new_only_secs(300),
@@ -988,7 +939,8 @@ mod tests {
#[test]
fn insert_multi_with_same_time() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
scheduler
.insert_unwrapped_and_stored_tc(
@@ -999,8 +951,8 @@ mod tests {
packet_idx: 1,
}),
RequestId {
seq_count: 1,
apid: 0,
seq_count: u14::new(1),
apid: u11::ZERO,
source_id: 0,
},
),
@@ -1016,8 +968,8 @@ mod tests {
packet_idx: 2,
}),
RequestId {
seq_count: 2,
apid: 1,
seq_count: u14::new(2),
apid: u11::new(1),
source_id: 5,
},
),
@@ -1035,8 +987,8 @@ mod tests {
.into(),
RequestId {
source_id: 10,
seq_count: 20,
apid: 23,
seq_count: u14::new(20),
apid: u11::new(23),
},
),
)
@@ -1047,7 +999,8 @@ mod tests {
#[test]
fn test_time_update() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let time = UnixTime::new(1, 2_000_000);
scheduler.update_time(time);
assert_eq!(scheduler.current_time(), &time);
@@ -1077,19 +1030,22 @@ mod tests {
#[test]
fn test_request_id() {
let src_id_to_set = 12;
let apid_to_set = 0x22;
let seq_count = 105;
let sp_header = SpHeader::new_for_unseg_tc(apid_to_set, 105, 0);
let apid_to_set = u11::new(0x22);
let seq_count = u14::new(105);
let sp_header = SpHeader::new_for_unseg_tc(apid_to_set, u14::new(105), 0);
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
sec_header.source_id = src_id_to_set;
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let req_id = RequestId::from_tc(&ping_tc);
assert_eq!(req_id.source_id(), src_id_to_set);
assert_eq!(req_id.apid(), apid_to_set);
assert_eq!(req_id.seq_count(), seq_count);
assert_eq!(
req_id.as_u64(),
((src_id_to_set as u64) << 32) | (apid_to_set as u64) << 16 | seq_count as u64
((src_id_to_set as u64) << 32)
| (apid_to_set.value() as u64) << 16
| seq_count.value() as u64
);
}
#[test]
@@ -1098,16 +1054,17 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed");
@@ -1166,16 +1123,17 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1)
.expect("insertion failed");
@@ -1226,18 +1184,19 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
scheduler.disable();
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed");
@@ -1291,14 +1250,15 @@ mod tests {
#[test]
fn insert_unwrapped_tc() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
false,
));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
let info = scheduler
.insert_unwrapped_tc(
@@ -1313,7 +1273,7 @@ mod tests {
let mut read_buf: [u8; 64] = [0; 64];
pool.read(&tc_info_0.addr(), &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(check_tc, base_ping_tc_simple_ctor(u14::ZERO, &[]));
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@@ -1335,13 +1295,14 @@ mod tests {
let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.packet_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(u14::new(0), &[]));
}
#[test]
fn insert_wrapped_tc() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1362,8 +1323,8 @@ mod tests {
let read_len = pool.read(&info.addr, &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.packet_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(u14::ZERO, &[]));
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@@ -1387,13 +1348,14 @@ mod tests {
let read_len = pool.read(&addr_vec[0], &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.packet_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(u14::new(0), &[]));
}
#[test]
fn insert_wrong_service() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1418,7 +1380,8 @@ mod tests {
#[test]
fn insert_wrong_subservice() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1443,7 +1406,8 @@ mod tests {
#[test]
fn insert_wrapped_tc_faulty_app_data() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
false,
@@ -1460,7 +1424,8 @@ mod tests {
#[test]
fn insert_doubly_wrapped_time_tagged_cmd() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
false,
@@ -1478,7 +1443,7 @@ mod tests {
#[test]
fn test_ctor_from_current() {
let scheduler = PusScheduler::new_with_current_init_time(Duration::from_secs(5))
let scheduler = PusSchedulerAlloc::new_with_current_init_time(Duration::from_secs(5))
.expect("creation from current time failed");
let current_time = scheduler.current_time;
assert!(current_time.as_secs() > 0);
@@ -1486,7 +1451,8 @@ mod tests {
#[test]
fn test_update_from_current() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
assert_eq!(scheduler.current_time.as_secs(), 0);
scheduler
.update_time_from_now()
@@ -1496,7 +1462,8 @@ mod tests {
#[test]
fn release_time_within_time_margin() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (5, 64)],
@@ -1529,9 +1496,10 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
@@ -1566,9 +1534,10 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
@@ -1592,9 +1561,10 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed");
@@ -1613,9 +1583,10 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed");
@@ -1634,17 +1605,18 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1)
.expect("inserting tc failed");
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, &[]);
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, u14::new(2), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_2)
.expect("inserting tc failed");
@@ -1676,7 +1648,8 @@ mod tests {
#[test]
fn insert_full_store_test() {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut pool = StaticMemoryPool::new(StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(1, 64)],
@@ -1692,7 +1665,7 @@ mod tests {
assert!(insert_res.is_err());
let err = insert_res.unwrap_err();
match err {
ScheduleError::StoreError(e) => match e {
ScheduleError::Pool(e) => match e {
PoolError::StoreFull(_) => {}
_ => panic!("unexpected store error {e}"),
},
@@ -1702,8 +1675,8 @@ mod tests {
fn insert_command_with_release_time(
pool: &mut StaticMemoryPool,
scheduler: &mut PusScheduler,
seq_count: u16,
scheduler: &mut PusSchedulerAlloc,
seq_count: u14,
release_secs: u64,
) -> TcInfo {
let mut buf: [u8; 32] = [0; 32];
@@ -1721,9 +1694,10 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
assert_eq!(scheduler.num_scheduled_telecommands(), 2);
let check_range = |range: Range<UnixTime, Vec<TcInfo>>| {
let mut tcs_in_range = 0;
@@ -1753,10 +1727,11 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
let start_stamp = cds::CdsTime::from_unix_time_with_u16_days(
&UnixTime::new_only_secs(100),
cds::SubmillisPrecision::Absent,
@@ -1788,10 +1763,11 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
assert_eq!(scheduler.num_scheduled_telecommands(), 3);
let end_stamp = cds::CdsTime::from_unix_time_with_u16_days(
@@ -1823,11 +1799,12 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 200);
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 200);
assert_eq!(scheduler.num_scheduled_telecommands(), 4);
let start_stamp = cds::CdsTime::from_unix_time_with_u16_days(
@@ -1864,9 +1841,10 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
assert_eq!(scheduler.num_scheduled_telecommands(), 2);
let del_res = scheduler.delete_all(&mut pool);
assert!(del_res.is_ok());
@@ -1875,8 +1853,8 @@ mod tests {
// Contrary to reset, this does not disable the scheduler.
assert!(scheduler.is_enabled());
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
assert_eq!(scheduler.num_scheduled_telecommands(), 2);
let del_res = scheduler
.delete_by_time_filter(TimeWindow::<cds::CdsTime>::new_select_all(), &mut pool);
@@ -1893,10 +1871,13 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let cmd_1_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_0_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let cmd_1_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
assert_eq!(scheduler.num_scheduled_telecommands(), 3);
let start_stamp = cds::CdsTime::from_unix_time_with_u16_days(
&UnixTime::new_only_secs(100),
@@ -1918,10 +1899,13 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let cmd_1_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let cmd_0_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_1_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
assert_eq!(scheduler.num_scheduled_telecommands(), 3);
let end_stamp = cds::CdsTime::from_unix_time_with_u16_days(
@@ -1944,12 +1928,16 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let cmd_out_of_range_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let cmd_1_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let cmd_out_of_range_0 =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_0_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let cmd_1_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
let cmd_out_of_range_1 =
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 200);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 200);
assert_eq!(scheduler.num_scheduled_telecommands(), 4);
let start_stamp = cds::CdsTime::from_unix_time_with_u16_days(
@@ -1979,16 +1967,17 @@ mod tests {
vec![(10, 32), (5, 64)],
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut scheduler =
PusSchedulerAlloc::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed");
@@ -2017,12 +2006,12 @@ mod tests {
fn test_generic_insert_app_data_test() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let sph = SpHeader::new(
PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
PacketId::new(PacketType::Tc, true, u11::new(0x002)),
PacketSequenceControl::new(SequenceFlags::Unsegmented, u14::new(5)),
0,
);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, CreatorConfig::default());
let mut buf: [u8; 64] = [0; 64];
let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc);
assert!(result.is_ok());
@@ -2031,7 +2020,7 @@ mod tests {
assert_eq!(n, 1);
let time_reader = cds::CdsTime::from_bytes_with_u16_days(&buf[2..2 + 7]).unwrap();
assert_eq!(time_reader, time_writer);
let pus_tc_reader = PusTcReader::new(&buf[9..]).unwrap().0;
let pus_tc_reader = PusTcReader::new(&buf[9..]).unwrap();
assert_eq!(pus_tc_reader, ping_tc);
}
@@ -2039,12 +2028,12 @@ mod tests {
fn test_generic_insert_app_data_test_byte_conv_error() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let sph = SpHeader::new(
PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
PacketId::new(PacketType::Tc, true, u11::new(0x002)),
PacketSequenceControl::new(SequenceFlags::Unsegmented, u14::new(5)),
0,
);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, CreatorConfig::default());
let mut buf: [u8; 16] = [0; 16];
let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc);
assert!(result.is_err());
@@ -2068,12 +2057,12 @@ mod tests {
fn test_generic_insert_app_data_test_as_vec() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let sph = SpHeader::new(
PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
PacketId::new(PacketType::Tc, true, u11::new(0x002)),
PacketSequenceControl::new(SequenceFlags::Unsegmented, u14::new(5)),
0,
);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, CreatorConfig::default());
let mut buf: [u8; 64] = [0; 64];
generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc).unwrap();
let vec = generate_insert_telecommand_app_data_as_vec(&time_writer, &ping_tc)

View File

@@ -1,15 +1,15 @@
use super::scheduler::PusSchedulerProvider;
use super::scheduler::PusScheduler;
use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{
DirectPusPacketHandlerResult, EcssTcInMemConversionProvider, EcssTcInSharedPoolConverter,
EcssTcInVecConverter, EcssTcReceiver, EcssTmSender, HandlingStatus, MpscTcReceiver,
PartialPusHandlingError, PusServiceHelper,
CacheAndReadRawEcssTc, DirectPusPacketHandlerResult, EcssTcInSharedPoolCacher, EcssTcReceiver,
EcssTcVecCacher, EcssTmSender, HandlingStatus, MpscTcReceiver, PartialPusHandlingError,
PusServiceHelper,
};
use crate::pool::PoolProvider;
use crate::pus::PusPacketHandlingError;
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::ecss::{PusPacket, scheduling};
use spacepackets::time::cds::CdsTime;
use std::sync::mpsc;
@@ -24,23 +24,22 @@ use std::sync::mpsc;
pub struct PusSchedServiceHandler<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
PusScheduler: PusSchedulerProvider,
PusSchedulerInstance: PusScheduler,
> {
pub service_helper:
PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>,
scheduler: PusScheduler,
scheduler: PusSchedulerInstance,
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
VerificationReporter: VerificationReportingProvider,
Scheduler: PusSchedulerProvider,
>
PusSchedServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter, Scheduler>
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
Scheduler: PusScheduler,
> PusSchedServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter, Scheduler>
{
pub fn new(
service_helper: PusServiceHelper<
@@ -102,11 +101,12 @@ impl<
}
};
self.scheduler.enable();
if self.scheduler.is_enabled() && opt_started_token.is_some() {
if self.scheduler.is_enabled()
&& let Some(started_token) = opt_started_token
{
if let Err(e) = self.service_helper.verif_reporter().completion_success(
&self.service_helper.common.tm_sender,
opt_started_token.unwrap(),
started_token,
time_stamp,
) {
error_callback(&PartialPusHandlingError::Verification(e));
@@ -131,10 +131,12 @@ impl<
};
self.scheduler.disable();
if !self.scheduler.is_enabled() && opt_started_token.is_some() {
if !self.scheduler.is_enabled()
&& let Some(started_token) = opt_started_token
{
if let Err(e) = self.service_helper.verif_reporter().completion_success(
&self.service_helper.common.tm_sender,
opt_started_token.unwrap(),
started_token,
time_stamp,
) {
error_callback(&PartialPusHandlingError::Verification(e));
@@ -211,7 +213,7 @@ impl<
pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver,
mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
PusScheduler,
>;
@@ -220,7 +222,7 @@ pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHand
pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver,
mpsc::SyncSender<PacketAsVec>,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
PusScheduler,
>;
@@ -229,7 +231,7 @@ pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServ
pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
PusScheduler,
>;
@@ -238,7 +240,7 @@ pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceH
pub type PusService11SchedHandlerStaticWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
PusScheduler,
>;
@@ -249,21 +251,23 @@ mod tests {
use crate::pus::test_util::{PusTestHarness, TEST_APID};
use crate::pus::verification::{VerificationReporter, VerificationReportingProvider};
use crate::pus::{DirectPusPacketHandlerResult, MpscTcReceiver, PusPacketHandlingError};
use crate::pus::{
scheduler::{self, PusSchedulerProvider, TcInfo},
EcssTcInSharedPoolCacher,
scheduler::{self, PusScheduler, TcInfo},
tests::PusServiceHandlerWithSharedStoreCommon,
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedPoolConverter,
};
use crate::pus::{DirectPusPacketHandlerResult, MpscTcReceiver, PusPacketHandlingError};
use crate::tmtc::PacketSenderWithSharedPool;
use alloc::collections::VecDeque;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use delegate::delegate;
use spacepackets::SpHeader;
use spacepackets::ecss::scheduling::Subservice;
use spacepackets::ecss::tc::PusTcSecondaryHeader;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::ecss::{CreatorConfig, WritablePusPacket};
use spacepackets::time::TimeWriter;
use spacepackets::SpHeader;
use spacepackets::{
ecss::{tc::PusTcCreator, tm::PusTmReader},
time::cds,
@@ -276,7 +280,7 @@ mod tests {
handler: PusSchedServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
TestScheduler,
>,
@@ -309,8 +313,12 @@ mod tests {
}
impl PusTestHarness for Pus11HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -341,7 +349,7 @@ mod tests {
inserted_tcs: VecDeque<TcInfo>,
}
impl PusSchedulerProvider for TestScheduler {
impl PusScheduler for TestScheduler {
type TimeProvider = cds::CdsTime;
fn reset(
@@ -380,10 +388,11 @@ mod tests {
test_harness: &mut Pus11HandlerWithStoreTester,
subservice: Subservice,
) {
let reply_header = SpHeader::new_for_unseg_tm(TEST_APID, 0, 0);
let reply_header = SpHeader::new_for_unseg_tm(TEST_APID, u14::ZERO, 0);
let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8);
let enable_scheduling = PusTcCreator::new(reply_header, tc_header, &[0; 7], true);
let token = test_harness.init_verification(&enable_scheduling);
let enable_scheduling =
PusTcCreator::new(reply_header, tc_header, &[0; 7], CreatorConfig::default());
let token = test_harness.start_verification(&enable_scheduling);
test_harness.send_tc(&token, &enable_scheduling);
let request_id = token.request_id();
@@ -427,9 +436,9 @@ mod tests {
#[test]
fn test_insert_activity_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
let mut reply_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let mut reply_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new(reply_header, sec_header, &[], true);
let ping_tc = PusTcCreator::new(reply_header, sec_header, &[], CreatorConfig::default());
let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc);
let stamper = cds::CdsTime::now_with_u16_days().expect("time provider failed");
let mut sched_app_data: [u8; 64] = [0; 64];
@@ -437,15 +446,15 @@ mod tests {
let ping_raw = ping_tc.to_vec().expect("generating raw tc failed");
sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw);
written_len += ping_raw.len();
reply_header = SpHeader::new_for_unseg_tc(TEST_APID, 1, 0);
reply_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8);
let enable_scheduling = PusTcCreator::new(
reply_header,
sec_header,
&sched_app_data[..written_len],
true,
CreatorConfig::default(),
);
let token = test_harness.init_verification(&enable_scheduling);
let token = test_harness.start_verification(&enable_scheduling);
test_harness.send_tc(&token, &enable_scheduling);
let request_id = token.request_id();

View File

@@ -2,16 +2,17 @@ use crate::pus::{
DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError, PusTmVariant,
};
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::PusPacket;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use spacepackets::SpHeader;
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::{CreatorConfig, PusPacket};
use std::sync::mpsc;
use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{
EcssTcInMemConversionProvider, EcssTcInSharedPoolConverter, EcssTcInVecConverter,
EcssTcReceiver, EcssTmSender, GenericConversionError, HandlingStatus, MpscTcReceiver,
PusServiceHelper,
CacheAndReadRawEcssTc, EcssTcInSharedPoolCacher, EcssTcReceiver, EcssTcVecCacher, EcssTmSender,
GenericConversionError, HandlingStatus, MpscTcReceiver, PusServiceHelper,
};
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
@@ -19,7 +20,7 @@ use super::{
pub struct PusService17TestHandler<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> {
pub service_helper:
@@ -27,11 +28,11 @@ pub struct PusService17TestHandler<
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
VerificationReporter: VerificationReportingProvider,
> PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
service_helper: PusServiceHelper<
@@ -76,10 +77,14 @@ impl<
// Sequence count will be handled centrally in TM funnel.
// It is assumed that the verification reporter was built with a valid APID, so we use
// the unchecked API here.
let reply_header =
SpHeader::new_for_unseg_tm(self.service_helper.verif_reporter().apid(), 0, 0);
let reply_header = SpHeader::new_for_unseg_tm(
self.service_helper.verif_reporter().apid(),
u14::ZERO,
0,
);
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, time_stamp);
let ping_reply = PusTmCreator::new(reply_header, tc_header, &[], true);
let ping_reply =
PusTmCreator::new(reply_header, tc_header, &[], CreatorConfig::default());
if let Err(e) = self
.service_helper
.common
@@ -112,7 +117,7 @@ impl<
pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
MpscTcReceiver,
mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
>;
/// Helper type definition for a PUS 17 handler with a dynamic TMTC memory backend and bounded MPSC
@@ -120,7 +125,7 @@ pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver,
mpsc::SyncSender<PacketAsVec>,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
>;
/// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded
@@ -128,12 +133,13 @@ pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler<
pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
>;
#[cfg(test)]
mod tests {
use crate::ComponentId;
use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID};
use crate::pus::tests::{
PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon,
@@ -143,18 +149,19 @@ mod tests {
};
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{
DirectPusPacketHandlerResult, EcssTcInSharedPoolConverter, EcssTcInVecConverter,
DirectPusPacketHandlerResult, EcssTcInSharedPoolCacher, EcssTcVecCacher,
GenericConversionError, HandlingStatus, MpscTcReceiver, MpscTmAsVecSender,
PartialPusHandlingError, PusPacketHandlingError,
};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::ComponentId;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use delegate::delegate;
use spacepackets::SpHeader;
use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::PusPacket;
use spacepackets::time::{cds, TimeWriter};
use spacepackets::SpHeader;
use spacepackets::ecss::{CreatorConfig, PusPacket};
use spacepackets::time::{TimeWriter, cds};
use super::PusService17TestHandler;
@@ -163,7 +170,7 @@ mod tests {
handler: PusService17TestHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
>,
}
@@ -180,8 +187,12 @@ mod tests {
}
impl PusTestHarness for Pus17HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -221,7 +232,7 @@ mod tests {
handler: PusService17TestHandler<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
>,
}
@@ -238,8 +249,12 @@ mod tests {
}
impl PusTestHarness for Pus17HandlerWithVecTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -276,10 +291,11 @@ mod tests {
fn ping_test(test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler)) {
// Create a ping TC, verify acceptance.
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc);
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let request_id = token.request_id();
let result = test_harness.handle_one_tc();
@@ -331,10 +347,11 @@ mod tests {
#[test]
fn test_sending_unsupported_service() {
let mut test_harness = Pus17HandlerWithStoreTester::new(0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(3, 1);
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc);
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());
@@ -352,10 +369,11 @@ mod tests {
#[test]
fn test_sending_custom_subservice() {
let mut test_harness = Pus17HandlerWithStoreTester::new(0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 200);
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc);
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,4 @@
use arbitrary_int::u11;
use core::{fmt, marker::PhantomData};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@@ -9,13 +10,13 @@ pub use alloc_mod::*;
pub use std_mod::*;
use spacepackets::{
ecss::{tc::IsPusTelecommand, PusPacket},
ByteConversionError,
ecss::{PusPacket, tc::IsPusTelecommand},
};
use crate::{
queue::{GenericReceiveError, GenericSendError},
ComponentId,
queue::{GenericReceiveError, GenericSendError},
};
/// Generic request ID type. Requests can be associated with an ID to have a unique identifier
@@ -23,7 +24,7 @@ use crate::{
pub type RequestId = u32;
/// CCSDS APID type definition. Please note that the APID is a 14 bit value.
pub type Apid = u16;
pub type Apid = u11;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct UniqueApidTargetId {
@@ -40,7 +41,7 @@ impl UniqueApidTargetId {
}
pub fn raw(&self) -> ComponentId {
((self.apid as u64) << 32) | (self.unique_id as u64)
((self.apid.value() as u64) << 32) | (self.unique_id as u64)
}
pub fn id(&self) -> ComponentId {
@@ -68,7 +69,7 @@ impl UniqueApidTargetId {
impl From<u64> for UniqueApidTargetId {
fn from(raw: u64) -> Self {
Self {
apid: (raw >> 32) as u16,
apid: u11::new((raw >> 32) as u16),
unique_id: raw as u32,
}
}
@@ -335,12 +336,12 @@ pub mod alloc_mod {
}
impl<
To,
From,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> MessageSenderAndReceiver<To, From, Sender, Receiver, SenderStore>
To,
From,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> MessageSenderAndReceiver<To, From, Sender, Receiver, SenderStore>
{
pub fn new(local_channel_id: ComponentId, message_receiver: Receiver) -> Self {
Self {
@@ -403,15 +404,15 @@ pub mod alloc_mod {
}
impl<
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
RequestAndReplySenderAndReceiver<
Request,
ReqSender,
@@ -496,14 +497,18 @@ mod tests {
use std::sync::mpsc;
use alloc::string::ToString;
use arbitrary_int::{u11, u14};
use spacepackets::{
ecss::tc::{PusTcCreator, PusTcSecondaryHeader},
ByteConversionError, SpHeader,
ecss::{
CreatorConfig,
tc::{PusTcCreator, PusTcSecondaryHeader},
},
};
use crate::{
queue::{GenericReceiveError, GenericSendError},
request::{MessageMetadata, MessageSenderMap, MessageSenderStoreProvider},
request::{Apid, MessageMetadata, MessageSenderMap, MessageSenderStoreProvider},
};
use super::{GenericMessage, MessageReceiverWithId, UniqueApidTargetId};
@@ -514,8 +519,8 @@ mod tests {
#[test]
fn test_basic_target_id_with_apid() {
let id = UniqueApidTargetId::new(0x111, 0x01);
assert_eq!(id.apid, 0x111);
let id = UniqueApidTargetId::new(Apid::new(0x111), 0x01);
assert_eq!(id.apid.value(), 0x111);
assert_eq!(id.unique_id, 0x01);
assert_eq!(id.id(), id.raw());
assert_eq!(u64::from(id), id.raw());
@@ -532,19 +537,20 @@ mod tests {
#[test]
fn test_basic_target_id_with_apid_from_pus_tc() {
let sp_header = SpHeader::new_for_unseg_tc(0x111, 5, 0);
let sp_header = SpHeader::new_for_unseg_tc(u11::new(0x111), u14::new(5), 0);
let app_data = 1_u32.to_be_bytes();
let pus_tc = PusTcCreator::new_simple(sp_header, 17, 1, &app_data, true);
let pus_tc =
PusTcCreator::new_simple(sp_header, 17, 1, &app_data, CreatorConfig::default());
let id = UniqueApidTargetId::from_pus_tc(&pus_tc).unwrap();
assert_eq!(id.apid, 0x111);
assert_eq!(id.apid.value(), 0x111);
assert_eq!(id.unique_id, 1);
}
#[test]
fn test_basic_target_id_with_apid_from_pus_tc_invalid_app_data() {
let sp_header = SpHeader::new_for_unseg_tc(0x111, 5, 0);
let sp_header = SpHeader::new_for_unseg_tc(u11::new(0x111), u14::new(5), 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let pus_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let pus_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let error = UniqueApidTargetId::from_pus_tc(&pus_tc);
assert!(error.is_err());
let error = error.unwrap_err();

View File

@@ -211,8 +211,8 @@ mod tests {
println,
rc::Rc,
sync::{
mpsc::{self, TryRecvError},
Arc, Mutex,
mpsc::{self, TryRecvError},
},
time::Instant,
};

View File

@@ -1,4 +1,5 @@
use crate::{
ComponentId,
health::{HealthState, HealthTableProvider},
mode::{Mode, ModeAndSubmode, ModeReply, ModeRequest, ModeRequestSender, UNKNOWN_MODE_VAL},
mode_tree::{
@@ -7,7 +8,6 @@ use crate::{
},
queue::GenericTargetedMessagingError,
request::{GenericMessage, RequestId},
ComponentId,
};
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
@@ -136,7 +136,7 @@ impl SequenceExecutionHelper {
/// with [Self::load]
/// * `sender` - The sender to send mode requests to the components
/// * `children_mode_store` - The mode store vector to keep track of the mode states of
/// children components
/// children components
pub fn run(
&mut self,
table: &SequenceModeTables,
@@ -252,10 +252,10 @@ impl SequenceExecutionHelper {
Ok(ModeCommandingResult::AwaitingSuccessCheck)
} else if seq_table_value.entries.len() - 1 == sequence_idx {
self.state = SequenceExecutionHelperState::Idle;
return Ok(ModeCommandingResult::Done);
Ok(ModeCommandingResult::Done)
} else {
self.current_sequence_index = Some(sequence_idx + 1);
return Ok(ModeCommandingResult::StepDone);
Ok(ModeCommandingResult::StepDone)
}
}
@@ -597,7 +597,7 @@ impl SubsystemCommandingHelper {
}
fn update_internal_req_id(&mut self) {
let new_internal_req_id = self.request_id().unwrap() << 8
let new_internal_req_id = (self.request_id().unwrap() << 8)
| self.seq_exec_helper.current_sequence_index().unwrap() as u32;
self.seq_exec_helper.set_request_id(new_internal_req_id);
self.active_internal_request_id = Some(new_internal_req_id);
@@ -682,9 +682,10 @@ mod tests {
use super::*;
use crate::{
ComponentId,
mode::{
tests::{ModeReqSenderMock, ModeReqWrapper},
Mode, ModeAndSubmode, ModeReply, ModeRequest, UNKNOWN_MODE,
tests::{ModeReqSenderMock, ModeReqWrapper},
},
mode_tree::{
ModeStoreProvider, ModeStoreVec, SequenceModeTables, SequenceTableEntry,
@@ -693,7 +694,6 @@ mod tests {
queue::GenericTargetedMessagingError,
request::{GenericMessage, MessageMetadata, RequestId},
subsystem::{ModeCommandingResult, ModeTreeHelperState, SequenceExecutionHelperState},
ComponentId,
};
#[derive(Debug)]

View File

@@ -1,7 +1,7 @@
use core::fmt::Debug;
/// Generic abstraction for a check/countdown timer.
pub trait CountdownProvider: Debug {
pub trait Countdown: Debug {
fn has_expired(&self) -> bool;
fn reset(&mut self);
}

View File

@@ -9,20 +9,20 @@
//! They only need to send the received and generated data to these objects.
use crate::queue::GenericSendError;
use crate::{
pool::{PoolAddr, PoolError},
ComponentId,
pool::{PoolAddr, PoolError},
};
#[cfg(feature = "std")]
pub use alloc_mod::*;
use core::fmt::Debug;
#[cfg(feature = "alloc")]
use downcast_rs::{impl_downcast, Downcast};
use downcast_rs::{Downcast, impl_downcast};
use spacepackets::{
SpHeader,
ecss::{
tc::PusTcReader,
tm::{PusTmCreator, PusTmReader},
},
SpHeader,
};
#[cfg(feature = "std")]
use std::sync::mpsc;

View File

@@ -1,15 +1,16 @@
use arbitrary_int::{u11, u14};
use spacepackets::SpHeader;
use spacepackets::ecss::CreatorConfig;
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::time::cds::CdsTime;
use spacepackets::time::TimeWriter;
use spacepackets::SpHeader;
pub struct PusTmWithCdsShortHelper {
apid: u16,
apid: u11,
cds_short_buf: [u8; 7],
}
impl PusTmWithCdsShortHelper {
pub fn new(apid: u16) -> Self {
pub fn new(apid: u11) -> Self {
Self {
apid,
cds_short_buf: [0; 7],
@@ -22,7 +23,7 @@ impl PusTmWithCdsShortHelper {
service: u8,
subservice: u8,
source_data: &'data [u8],
seq_count: u16,
seq_count: u14,
) -> PusTmCreator<'_, 'data> {
let time_stamp = CdsTime::now_with_u16_days().unwrap();
time_stamp.write_to_bytes(&mut self.cds_short_buf).unwrap();
@@ -35,7 +36,7 @@ impl PusTmWithCdsShortHelper {
subservice: u8,
source_data: &'data [u8],
stamper: &CdsTime,
seq_count: u16,
seq_count: u14,
) -> PusTmCreator<'_, 'data> {
stamper.write_to_bytes(&mut self.cds_short_buf).unwrap();
self.create_pus_tm_common(service, subservice, source_data, seq_count)
@@ -46,40 +47,47 @@ impl PusTmWithCdsShortHelper {
service: u8,
subservice: u8,
source_data: &'data [u8],
seq_count: u16,
seq_count: u14,
) -> PusTmCreator<'_, 'data> {
let reply_header = SpHeader::new_for_unseg_tm(self.apid, seq_count, 0);
let tc_header = PusTmSecondaryHeader::new_simple(service, subservice, &self.cds_short_buf);
PusTmCreator::new(reply_header, tc_header, source_data, true)
PusTmCreator::new(
reply_header,
tc_header,
source_data,
CreatorConfig::default(),
)
}
}
#[cfg(test)]
mod tests {
use spacepackets::{ecss::PusPacket, time::cds::CdsTime, CcsdsPacket};
use arbitrary_int::{u11, u14};
use spacepackets::{CcsdsPacket, ecss::PusPacket, time::cds::CdsTime};
use super::PusTmWithCdsShortHelper;
#[test]
fn test_helper_with_stamper() {
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(0x123);
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(u11::new(0x123));
let stamper = CdsTime::new_with_u16_days(0, 0);
let tm = pus_tm_helper.create_pus_tm_with_stamper(17, 1, &[1, 2, 3, 4], &stamper, 25);
let tm =
pus_tm_helper.create_pus_tm_with_stamper(17, 1, &[1, 2, 3, 4], &stamper, u14::new(25));
assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 1);
assert_eq!(tm.user_data(), &[1, 2, 3, 4]);
assert_eq!(tm.seq_count(), 25);
assert_eq!(tm.seq_count().value(), 25);
assert_eq!(tm.timestamp(), [64, 0, 0, 0, 0, 0, 0])
}
#[test]
fn test_helper_from_now() {
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(0x123);
let tm = pus_tm_helper.create_pus_tm_timestamp_now(17, 1, &[1, 2, 3, 4], 25);
let mut pus_tm_helper = PusTmWithCdsShortHelper::new(u11::new(0x123));
let tm = pus_tm_helper.create_pus_tm_timestamp_now(17, 1, &[1, 2, 3, 4], u14::new(25));
assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 1);
assert_eq!(tm.user_data(), &[1, 2, 3, 4]);
assert_eq!(tm.seq_count(), 25);
assert_eq!(tm.seq_count().value(), 25);
assert_eq!(tm.timestamp().len(), 7);
}
}

View File

@@ -2,8 +2,8 @@
use core::mem::size_of;
use serde::{Deserialize, Serialize};
use spacepackets::ecss::{PfcReal, PfcUnsigned, Ptc};
use spacepackets::time::CcsdsTimeProvider;
use spacepackets::time::cds::CdsTime;
use spacepackets::time::{CcsdsTimeProvider, TimeWriter};
enum NumOfParamsInfo {
/// The parameter entry is a scalar field

View File

@@ -4,13 +4,13 @@ use satrs::dev_mgmt::{
DevManagerCommandingHelper, DevManagerHelperResult, TransparentDevManagerHook,
};
use satrs::mode::{
Mode, ModeError, ModeProvider, ModeReplyReceiver, ModeReplySender, ModeRequestHandler,
ModeRequestHandlerMpscBounded, ModeRequestReceiver, ModeRequestorAndHandlerMpscBounded,
ModeRequestorOneChildBoundedMpsc, INVALID_MODE, UNKNOWN_MODE,
INVALID_MODE, Mode, ModeError, ModeProvider, ModeReplyReceiver, ModeReplySender,
ModeRequestHandler, ModeRequestHandlerMpscBounded, ModeRequestReceiver,
ModeRequestorAndHandlerMpscBounded, ModeRequestorOneChildBoundedMpsc, UNKNOWN_MODE,
};
use satrs::mode_tree::{
connect_mode_nodes, ModeChild, ModeNode, ModeParent, ModeStoreProvider, SequenceTableEntry,
SequenceTableMapTable, TargetTableEntry,
ModeChild, ModeNode, ModeParent, ModeStoreProvider, SequenceTableEntry, SequenceTableMapTable,
TargetTableEntry, connect_mode_nodes,
};
use satrs::mode_tree::{SequenceTablesMapValue, TargetTablesMapValue};
use satrs::request::{MessageMetadata, RequestId};
@@ -20,10 +20,10 @@ use satrs::subsystem::{
StartSequenceError, SubsystemCommandingHelper, SubsystemHelperResult,
};
use satrs::{
ComponentId,
mode::{ModeAndSubmode, ModeReply, ModeRequest},
queue::GenericTargetedMessagingError,
request::GenericMessage,
ComponentId,
};
use std::borrow::{Borrow, BorrowMut};
use std::cell::RefCell;

View File

@@ -1,94 +0,0 @@
#![allow(dead_code, unused_imports)]
use satrs::events::{
EventU32, EventU32TypedSev, GenericEvent, HasSeverity, LargestEventRaw, LargestGroupIdRaw,
Severity, SeverityInfo, SeverityLow, SeverityMedium,
};
use std::convert::AsRef;
#[derive(Debug)]
struct GroupIdIntrospection {
name: &'static str,
id: LargestGroupIdRaw,
}
#[derive(Debug)]
struct EventIntrospection {
name: &'static str,
group_id: GroupIdIntrospection,
event: &'static EventU32,
info: &'static str,
}
//#[event(descr="This is some info event")]
const INFO_EVENT_0: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_0_ERASED: EventU32 = EventU32::const_from_info(INFO_EVENT_0);
// This is ideally auto-generated
const INFO_EVENT_0_INTROSPECTION: EventIntrospection = EventIntrospection {
name: "INFO_EVENT_0",
group_id: GroupIdIntrospection {
id: 0,
name: "Group ID 0 without name",
},
event: &INFO_EVENT_0_ERASED,
info: "This is some info event",
};
//#[event(descr="This is some low severity event")]
const SOME_LOW_SEV_EVENT: EventU32TypedSev<SeverityLow> = EventU32TypedSev::new(0, 12);
//const EVENT_LIST: [&'static Event; 2] = [&INFO_EVENT_0, &SOME_LOW_SEV_EVENT];
//#[event_group]
const TEST_GROUP_NAME: u16 = 1;
// Auto-generated?
const TEST_GROUP_NAME_NAME: &str = "TEST_GROUP_NAME";
//#[event(desc="Some medium severity event")]
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP: EventU32TypedSev<SeverityMedium> =
EventU32TypedSev::new(TEST_GROUP_NAME, 0);
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED: EventU32 =
EventU32::const_from_medium(MEDIUM_SEV_EVENT_IN_OTHER_GROUP);
// Also auto-generated
const MEDIUM_SEV_EVENT_IN_OTHER_GROUP_INTROSPECTION: EventIntrospection = EventIntrospection {
name: "MEDIUM_SEV_EVENT_IN_OTHER_GROUP",
group_id: GroupIdIntrospection {
name: TEST_GROUP_NAME_NAME,
id: TEST_GROUP_NAME,
},
event: &MEDIUM_SEV_EVENT_IN_OTHER_GROUP_REDUCED,
info: "Some medium severity event",
};
const CONST_SLICE: &[u8] = &[0, 1, 2, 3];
const INTROSPECTION_FOR_TEST_GROUP_0: [&EventIntrospection; 2] =
[&INFO_EVENT_0_INTROSPECTION, &INFO_EVENT_0_INTROSPECTION];
//const INTROSPECTION_FOR_TABLE: &'static [&EventIntrospection] = &INTROSPECTION_FOR_TEST_GROUP_0;
const INTROSPECTION_FOR_TEST_GROUP_NAME: [&EventIntrospection; 1] =
[&MEDIUM_SEV_EVENT_IN_OTHER_GROUP_INTROSPECTION];
//const BLAH: &'static [&EventIntrospection] = &INTROSPECTION_FOR_TEST_GROUP_NAME;
const ALL_EVENTS: [&[&EventIntrospection]; 2] = [
&INTROSPECTION_FOR_TEST_GROUP_0,
&INTROSPECTION_FOR_TEST_GROUP_NAME,
];
#[test]
fn main() {
//let test = stringify!(INFO_EVENT);
//println!("{:?}", test);
//for event in EVENT_LIST {
// println!("{:?}", event);
//}
//for events in ALL_EVENTS.into_iter().flatten() {
// dbg!("{:?}", events);
//}
//for introspection_info in INTROSPECTION_FOR_TEST_GROUP {
// dbg!("{:?}", introspection_info);
//}
//let test_struct =
}

View File

@@ -1,22 +1,23 @@
use satrs::event_man::{
use arbitrary_int::u11;
use satrs::event_man_legacy::{
EventManagerWithMpsc, EventMessage, EventMessageU32, EventRoutingError, EventSendProvider,
EventU32SenderMpsc,
};
use satrs::events::{EventU32, EventU32TypedSev, Severity, SeverityInfo};
use satrs::events_legacy::{EventU32, EventU32TypedSev, Severity, SeverityInfo};
use satrs::params::U32Pair;
use satrs::params::{Params, ParamsHeapless, WritableToBeBytes};
use satrs::pus::event_man::{DefaultPusEventReportingMap, EventReporter, PusEventTmCreatorWithMap};
use satrs::request::UniqueApidTargetId;
use satrs::tmtc::PacketAsVec;
use spacepackets::ecss::PusError;
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::{PusError, PusPacket};
use std::sync::mpsc::{self, SendError, TryRecvError};
use std::thread;
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u16 = 0x02;
const TEST_APID: u11 = u11::new(0x02);
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05);
#[derive(Debug, Clone)]
@@ -35,8 +36,7 @@ fn test_threaded_usage() {
event_man.subscribe_all(pus_event_man_send_provider.target_id());
event_man.add_sender(pus_event_man_send_provider);
let (event_packet_tx, event_packet_rx) = mpsc::channel::<PacketAsVec>();
let reporter =
EventReporter::new(TEST_ID.raw(), 0x02, 0, 128).expect("Creating event reporter failed");
let reporter = EventReporter::new(TEST_ID.raw(), u11::new(0x02), 0, 128);
let pus_event_man =
PusEventTmCreatorWithMap::new(reporter, DefaultPusEventReportingMap::default());
let error_handler = |event_msg: &EventMessageU32, error: EventRoutingError| {
@@ -107,9 +107,9 @@ fn test_threaded_usage() {
Ok(event_tm) => {
let tm = PusTmReader::new(event_tm.packet.as_slice(), 7)
.expect("Deserializing TM failed");
assert_eq!(tm.0.service(), 5);
assert_eq!(tm.0.subservice(), 1);
let src_data = tm.0.source_data();
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), 1);
let src_data = tm.source_data();
assert!(!src_data.is_empty());
assert_eq!(src_data.len(), 4);
let event =
@@ -137,9 +137,9 @@ fn test_threaded_usage() {
Ok(event_tm) => {
let tm = PusTmReader::new(event_tm.packet.as_slice(), 7)
.expect("Deserializing TM failed");
assert_eq!(tm.0.service(), 5);
assert_eq!(tm.0.subservice(), 2);
let src_data = tm.0.source_data();
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), 2);
let src_data = tm.source_data();
assert!(!src_data.is_empty());
assert_eq!(src_data.len(), 12);
let event =

View File

@@ -1,17 +1,19 @@
#[cfg(feature = "crossbeam")]
pub mod crossbeam_test {
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use hashbrown::HashMap;
use satrs::pool::{PoolProvider, PoolProviderWithGuards, StaticMemoryPool, StaticPoolConfig};
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0};
use satrs::pus::verification::{
FailParams, RequestId, VerificationReporter, VerificationReporterCfg,
FailParams, RequestId, VerificationReporter, VerificationReporterConfig,
VerificationReportingProvider,
};
use satrs::tmtc::{PacketSenderWithSharedPool, SharedStaticMemoryPool};
use spacepackets::SpHeader;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU8, PusPacket, WritablePusPacket};
use spacepackets::SpHeader;
use spacepackets::ecss::{CreatorConfig, EcssEnumU8, EcssEnumU16, WritablePusPacket};
use std::sync::RwLock;
use std::thread;
use std::time::Duration;
@@ -31,7 +33,7 @@ pub mod crossbeam_test {
// We use a synced sequence count provider here because both verification reporters have the
// the same APID. If they had distinct APIDs, the more correct approach would be to have
// each reporter have an own sequence count provider.
let cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let cfg = VerificationReporterConfig::new(TEST_APID, 1, 2, 8);
// Shared pool object to store the verification PUS telemetry
let pool_cfg = StaticPoolConfig::new_from_subpool_cfg_tuples(
vec![(10, 32), (10, 64), (10, 128), (10, 1024)],
@@ -46,8 +48,8 @@ pub mod crossbeam_test {
let sender =
PacketSenderWithSharedPool::new_with_shared_packet_pool(tx.clone(), &shared_tm_pool);
let sender_1 = sender.clone();
let mut reporter_with_sender_0 = VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &cfg);
let mut reporter_with_sender_1 = reporter_with_sender_0.clone();
let reporter_with_sender_0 = VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &cfg);
let reporter_with_sender_1 = reporter_with_sender_0.clone();
// For test purposes, we retrieve the request ID from the TCs and pass them to the receiver
// tread.
let req_id_0;
@@ -57,9 +59,9 @@ pub mod crossbeam_test {
let (tx_tc_1, rx_tc_1) = crossbeam_channel::bounded(3);
{
let mut tc_guard = shared_tc_pool.write().unwrap();
let sph = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sph = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let tc_header = PusTcSecondaryHeader::new_simple(17, 1);
let pus_tc_0 = PusTcCreator::new_no_app_data(sph, tc_header, true);
let pus_tc_0 = PusTcCreator::new_no_app_data(sph, tc_header, CreatorConfig::default());
req_id_0 = RequestId::new(&pus_tc_0);
let addr = tc_guard
.free_element(pus_tc_0.len_written(), |buf| {
@@ -67,9 +69,9 @@ pub mod crossbeam_test {
})
.unwrap();
tx_tc_0.send(addr).unwrap();
let sph = SpHeader::new_for_unseg_tc(TEST_APID, 1, 0);
let sph = SpHeader::new_for_unseg_tc(TEST_APID, u14::new(1), 0);
let tc_header = PusTcSecondaryHeader::new_simple(5, 1);
let pus_tc_1 = PusTcCreator::new_no_app_data(sph, tc_header, true);
let pus_tc_1 = PusTcCreator::new_no_app_data(sph, tc_header, CreatorConfig::default());
req_id_1 = RequestId::new(&pus_tc_1);
let addr = tc_guard
.free_element(pus_tc_0.len_written(), |buf| {
@@ -89,9 +91,9 @@ pub mod crossbeam_test {
let pg = tc_guard.read_with_guard(tc_addr);
tc_len = pg.read(&mut tc_buf).unwrap();
}
let (_tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let _tc = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let token = reporter_with_sender_0.add_tc_with_req_id(req_id_0);
let token = reporter_with_sender_0.start_verification_with_req_id(req_id_0);
let accepted_token = reporter_with_sender_0
.acceptance_success(&sender, token, &FIXED_STAMP)
.expect("Acceptance success failed");
@@ -125,8 +127,8 @@ pub mod crossbeam_test {
let pg = tc_guard.read_with_guard(tc_addr);
tc_len = pg.read(&mut tc_buf).unwrap();
}
let (tc, _) = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let token = reporter_with_sender_1.add_tc(&tc);
let tc = PusTcReader::new(&tc_buf[0..tc_len]).unwrap();
let token = reporter_with_sender_1.start_verification(&tc);
let accepted_token = reporter_with_sender_1
.acceptance_success(&sender_1, token, &FIXED_STAMP)
.expect("Acceptance success failed");
@@ -156,7 +158,7 @@ pub mod crossbeam_test {
.read(&mut tm_buf)
.expect("Error reading TM slice");
}
let (pus_tm, _) =
let pus_tm =
PusTmReader::new(&tm_buf[0..tm_len], 7).expect("Error reading verification TM");
let req_id =
RequestId::from_bytes(&pus_tm.source_data()[0..RequestId::SIZE_AS_BYTES])

View File

@@ -17,12 +17,14 @@ use core::{
use std::{
io::{Read, Write},
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
sync::{mpsc, Mutex},
sync::{Mutex, mpsc},
thread,
};
use arbitrary_int::{u11, u14};
use hashbrown::HashSet;
use satrs::{
ComponentId,
encoding::{
ccsds::{SpValidity, SpacePacketValidator},
cobs::encode_packet_with_cobs,
@@ -32,11 +34,10 @@ use satrs::{
TcpSpacepacketsServer, TcpTmtcInCobsServer,
},
tmtc::PacketSource,
ComponentId,
};
use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
CcsdsPacket, PacketId, SpHeader,
ecss::{CreatorConfig, WritablePusPacket, tc::PusTcCreator},
};
use std::{collections::VecDeque, sync::Arc, vec::Vec};
@@ -192,7 +193,7 @@ fn test_cobs_server() {
matches!(tc_receiver.try_recv(), Err(mpsc::TryRecvError::Empty));
}
const TEST_APID_0: u16 = 0x02;
const TEST_APID_0: u11 = u11::new(0x02);
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
#[derive(Default)]
@@ -217,8 +218,8 @@ impl SpacePacketValidator for SimpleVerificator {
fn test_ccsds_server() {
let (tc_sender, tc_receiver) = mpsc::channel();
let mut tm_source = SyncTmSource::default();
let sph = SpHeader::new_for_unseg_tc(TEST_APID_0, 0, 0);
let verif_tm = PusTcCreator::new_simple(sph, 1, 1, &[], true);
let sph = SpHeader::new_for_unseg_tc(TEST_APID_0, u14::new(0), 0);
let verif_tm = PusTcCreator::new_simple(sph, 1, 1, &[], CreatorConfig::default());
let tm_0 = verif_tm.to_vec().expect("tm generation failed");
tm_source.add_tm(&tm_0);
let mut packet_id_lookup = SimpleVerificator::default();
@@ -267,8 +268,8 @@ fn test_ccsds_server() {
.expect("setting reas timeout failed");
// Send ping telecommand.
let sph = SpHeader::new_for_unseg_tc(TEST_APID_0, 0, 0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let sph = SpHeader::new_for_unseg_tc(TEST_APID_0, u14::new(0), 0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], CreatorConfig::default());
let tc_0 = ping_tc.to_vec().expect("packet creation failed");
stream
.write_all(&tc_0)