Compare commits

...

73 Commits

Author SHA1 Message Date
fef8d6a792 start moving to CCSDS + serde 2025-11-17 11:13:49 +01:00
b2bc87641c Merge pull request 'bumped sat-rs' (#257) from bump-satrs into main
Reviewed-on: #257
2025-11-06 14:54:05 +01:00
Robin Mueller
c8245772bb bumped sat-rs 2025-11-06 14:53:49 +01:00
56d9cafa59 Merge pull request 'small changelog tweak' (#256) from small-changelog-tweak into main
Reviewed-on: #256
2025-11-06 14:51:07 +01:00
Robin Mueller
7de057aaf2 small changelog tweak 2025-11-06 14:50:38 +01:00
2f4a11a0bc Merge pull request 'bump spacepackets version' (#255) from bump-spacepackets-version into main
Reviewed-on: #255
2025-11-06 14:48:06 +01:00
Robin Mueller
0cd929a19c bumped spacepackets version 2025-11-06 14:47:27 +01:00
d54167837e Merge pull request 'updated STM32F3 RTICv2 example' (#254) from update-embedded-examples into main
Reviewed-on: #254
2025-10-31 14:31:05 +01:00
Robin Mueller
ecce07a471 updated STM32F3 RTICv2 example 2025-10-31 14:27:29 +01:00
526254851a Merge pull request 'updated dependencies' (#253) from update-dependencies into main
Reviewed-on: #253
2025-10-29 16:41:58 +01:00
Robin Mueller
440e757141 updated dependencies 2025-10-29 16:37:41 +01:00
02aa825783 Merge pull request 'Larger update' (#252) from larger-update into main
Reviewed-on: #252
2025-10-21 14:33:31 +02:00
Robin Mueller
bc9e0e4a94 Larger update
- ComponentId is u32 now
- Simplified TCP servers
2025-10-21 13:28:24 +02:00
06e713a557 Merge pull request 'Rework Event Management Module' (#251) from rework-event-management-module into main
Reviewed-on: #251
2025-10-20 11:21:44 +02:00
Robin Mueller
778512d50e rework event management module 2025-10-20 11:16:28 +02:00
5d40638964 Merge pull request 'update spacepackets dependency' (#250) from update-dependencies into main
Reviewed-on: #250
2025-09-26 16:01:40 +02:00
Robin Mueller
1b07b845f2 update spacepackets dependency 2025-09-26 16:01:30 +02:00
2e58a311c8 Merge pull request 'CI fix and naming improvement' (#249) from example-ci-fix into main
Reviewed-on: #249
2025-09-06 19:40:17 +02:00
Robin Mueller
62d64e692a CI fix and naming improvement 2025-09-06 19:36:56 +02:00
3784f47b66 Merge pull request 'update heapless dependency' (#248) from update-deps into main
Reviewed-on: #248
2025-09-06 19:32:18 +02:00
Robin Mueller
e1911f1b6e update heapless dependency 2025-09-06 19:32:06 +02:00
2e53ce1871 Merge pull request 'update trait names' (#247) from update-trait-names into main
Reviewed-on: #247
2025-09-06 19:31:46 +02:00
Robin Mueller
a6c460129b update trait names 2025-09-06 19:26:39 +02:00
d5ecefd683 Merge pull request 'small changelog adaption' (#246) from small-changelog-adaption into main
Reviewed-on: #246
2025-08-26 14:29:41 +02:00
2f9f3b8183 small changelog adaption 2025-08-26 14:28:22 +02:00
b28cff85de Merge pull request 'prepare next MIB release' (#245) from satrs-mib-release into main
Reviewed-on: #245
2025-08-26 14:26:47 +02:00
da3de9f22c prepare next MIB release 2025-08-26 14:24:48 +02:00
aefdf0987d Merge pull request 'add matrix chat badge' (#243) from add-chat-badge into main
Reviewed-on: #243
2025-08-14 14:17:56 +02:00
Robin Mueller
7ae9e62d66 add matrix chat badge 2025-08-14 14:17:41 +02:00
fea1a9028b Merge pull request 'edition bump to 2024 ald clippy' (#244) from edition-bump-clippy-fixes into main
Reviewed-on: #244
2025-08-14 14:17:18 +02:00
Robin Mueller
abc145fb2d edition bump to 2024 ald clippy 2025-08-14 14:10:00 +02:00
490635dfcf Merge pull request 'prepare patch releases' (#242) from prep-patch-releases into main
Reviewed-on: #242
2025-07-23 14:09:59 +02:00
de3d22a022 prepare patch releases 2025-07-23 14:05:04 +02:00
cbe211fe8b Merge pull request 'CFDP reference' (#241) from cfdp-ref into main
Reviewed-on: #241
2025-07-22 10:47:08 +02:00
Robin Mueller
e379bc3fd7 CFDP 2025-07-22 10:46:56 +02:00
a61ee85796 Merge pull request 'clippy fixes' (#240) from clippy-fixes into main
Reviewed-on: #240
2025-07-22 10:44:42 +02:00
Robin Mueller
81473b30f9 clippy fixes 2025-07-22 10:44:11 +02:00
22675a73f9 Merge pull request 'changelog' (#239) from satrs-changelog into main
Reviewed-on: #239
2025-07-22 10:36:58 +02:00
Robin Mueller
c68e3d4f75 changelog 2025-07-22 10:35:38 +02:00
3deedfba17 Merge pull request 'bump all dependencies' (#238) from dependency-update into main
Reviewed-on: #238
2025-07-22 10:31:15 +02:00
Robin Mueller
533caea0fe bump all dependencies 2025-07-22 10:29:54 +02:00
848fe6f207 Merge pull request 'spacepackets update' (#237) from update-satrs-deps into main
Reviewed-on: #237
2025-07-18 19:24:55 +02:00
Robin Mueller
cf64fea7d9 bump cobs-rs dependency 2025-07-18 19:24:40 +02:00
4948db3fa5 Merge pull request 'bump cobs-rs dependency' (#236) from update-satrs-deps into main
Reviewed-on: #236
2025-07-18 19:19:58 +02:00
Robin Mueller
1920e4878c bump cobs-rs dependency 2025-07-18 19:19:24 +02:00
f46bc94b04 Merge pull request 'some test fixes' (#235) from test-fix into main
Reviewed-on: #235
2025-05-19 15:49:02 +02:00
fb45da1890 some test fixes 2025-05-19 15:46:57 +02:00
f600ee499a Merge pull request 'add first generator' (#234) from generators into main
Reviewed-on: #234
2025-05-19 15:23:37 +02:00
3f28f60c59 add first generator 2025-05-19 15:21:51 +02:00
44b1f2b037 Merge pull request 'possible fix for clippy warning' (#233) from possible-fix-for-clippy-warning into main
Reviewed-on: #233
2025-05-19 15:15:24 +02:00
4b22958b34 possible fix for clippy warning 2025-05-19 15:14:59 +02:00
a711c6acd9 Merge pull request 'bump spacepackets' (#232) from bump-spacepackets into main
Reviewed-on: #232
2025-05-19 14:19:00 +02:00
99a954a1f5 some tweaks 2025-05-19 14:18:26 +02:00
4a4fd7ac2c use git deps 2025-05-16 19:49:43 +02:00
9bf08849a2 annoying 2025-05-16 19:43:45 +02:00
b8f7fefe26 Merge pull request 'should fix tests' (#231) from test-fix into main
Reviewed-on: #231
2025-05-10 16:31:14 +02:00
a501832698 should fix tests 2025-05-10 16:30:37 +02:00
c7284d3f1c Merge pull request 'minor wording improvements' (#230) from some-wording-improvements into main
Reviewed-on: #230
2025-05-10 16:27:37 +02:00
18263d4568 Merge branch 'main' into some-wording-improvements 2025-05-10 16:27:29 +02:00
95519c1363 minor wording improvements 2025-05-10 16:27:04 +02:00
2ec32717d0 Merge pull request 'use released dependencies' (#229) from use-relesed-dependencies into main
Reviewed-on: #229
2025-05-10 16:24:37 +02:00
bfdd777685 use released dependencies 2025-05-10 16:24:06 +02:00
b54c2b7863 Merge pull request 'spacepackets update' (#227) from spacepackets-update into main
Reviewed-on: #227
2025-05-10 16:19:07 +02:00
19f3355283 Merge pull request 'update SIM suite name' (#228) from update-sim-suite-name into main
Reviewed-on: #228
2025-05-10 16:18:16 +02:00
4aeb28d2f1 update SIM suite name 2025-05-10 16:14:48 +02:00
ddc4544456 spacepackets update, clippy fixes 2025-05-10 16:13:45 +02:00
9ab36c0362 Merge pull request 'update docs' (#226) from update-docs into main
Reviewed-on: #226
2025-05-06 16:21:17 +02:00
b95769c177 Merge branch 'main' into update-docs 2025-05-06 16:21:10 +02:00
bb20533ae1 Merge pull request 'remove token API for verification creator core' (#224) from simplify-verification-reporter-core into main
Reviewed-on: #224
2025-05-05 19:03:34 +02:00
27cacd0f43 remove token API for verification creator core 2025-05-05 19:02:37 +02:00
bd6488e87b update docs 2025-04-01 20:58:18 +02:00
52ec0d44aa Merge pull request 'bump README links' (#223) from update-readme-links into main
Reviewed-on: #223
2025-04-01 15:15:14 +02:00
4fa9f8d685 bump README links 2025-04-01 15:05:28 +02:00
108 changed files with 7027 additions and 43431 deletions

View File

@@ -47,6 +47,8 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
components: rustfmt
- run: cargo fmt --all -- --check
docs:
@@ -55,7 +57,7 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@nightly
- run: cargo +nightly doc --all-features --config 'build.rustdocflags=["--cfg", "docs_rs"]'
- run: RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc -p satrs --all-features
clippy:
name: Clippy
@@ -63,4 +65,6 @@ jobs:
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
with:
components: clippy
- run: cargo clippy -- -D warnings

View File

@@ -6,6 +6,7 @@ members = [
"satrs-example",
"satrs-minisim",
"satrs-shared",
"embedded-examples/embedded-client",
]
exclude = [

View File

@@ -1,9 +1,10 @@
<p align="center"> <img src="misc/satrs-logo-v2.png" width="40%"> </p>
[![sat-rs website](https://img.shields.io/badge/sat--rs-website-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
[![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/)
[![sat-rs book](https://img.shields.io/badge/sat--rs-book-darkgreen?style=flat)](https://robamu.github.io/sat-rs/book/)
[![Crates.io](https://img.shields.io/crates/v/satrs)](https://crates.io/crates/satrs)
[![docs.rs](https://img.shields.io/docsrs/satrs)](https://docs.rs/satrs)
[![matrix chat](https://img.shields.io/matrix/sat-rs%3Amatrix.org)](https://matrix.to/#/#sat-rs:matrix.org)
sat-rs
=========
@@ -11,7 +12,7 @@ sat-rs
This is the repository of the sat-rs library. Its primary goal is to provide re-usable components
to write on-board software for remote systems like rovers or satellites. It is specifically written
for the special requirements for these systems. You can find an overview of the project and the
link to the [more high-level sat-rs book](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/)
link to the [more high-level sat-rs book](https://robamu.github.io/sat-rs/book/)
at the [IRS software projects website](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/).
This is early-stage software. Important features are missing. New releases
@@ -38,7 +39,7 @@ This project currently contains following crates:
Example of a simple example on-board software using various sat-rs components which can be run
on a host computer or on any system with a standard runtime like a Raspberry Pi.
* [`satrs-minisim`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-minisim):
Mini-Simulator based on [asynchronix](https://github.com/asynchronics/asynchronix) which
Mini-Simulator based on [nexosim](https://github.com/asynchronics/nexosim) which
simulates some physical devices for the `satrs-example` application device handlers.
* [`satrs-mib`](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-mib):
Components to build a mission information base from the on-board software directly.
@@ -61,6 +62,8 @@ Each project has its own `CHANGELOG.md`.
packet protocol implementations. This repository is re-exported in the
[`satrs`](https://egit.irs.uni-stuttgart.de/rust/satrs/src/branch/main/satrs)
crate.
* [`cfdp`](https://egit.irs.uni-stuttgart.de/rust/cfdp): CCSDS File Delivery Protocol
(CFDP) high-level library components.
# Flight Heritage

View File

@@ -0,0 +1,18 @@
[package]
name = "embedded-client"
version = "0.1.0"
edition = "2024"
[dependencies]
clap = { version = "4", features = ["derive"] }
serialport = "4"
toml = "0.9"
serde = { version = "1", features = ["derive"] }
satrs-stm32f3-disco-rtic = { path = "../stm32f3-disco-rtic" }
spacepackets = { version = "0.17" }
tmtc-utils = { git = "https://egit.irs.uni-stuttgart.de/rust/tmtc-utils.git", version = "0.1" }
postcard = { version = "1", features = ["alloc"] }
cobs = "0.5"
fern = "0.7"
humantime = "2"
log = "0.4"

View File

@@ -0,0 +1,2 @@
[interface]
serial_port = "/dev/ttyUSB0"

View File

@@ -0,0 +1,107 @@
use std::{
fs::File,
io::Read,
path::Path,
time::{Duration, SystemTime},
};
use clap::Parser;
use cobs::CobsDecoderOwned;
use satrs_stm32f3_disco_rtic::Request;
use spacepackets::{CcsdsPacketCreatorOwned, CcsdsPacketReader, SpHeader};
use tmtc_utils::transport::serial::PacketTransportSerialCobs;
#[derive(Parser, Debug)]
struct Cli {
#[arg(short, long)]
ping: bool,
/// Set frequency in milliseconds.
#[arg(short, long)]
set_led_frequency: Option<u32>,
}
#[derive(Debug, serde::Deserialize)]
struct Config {
interface: Interface,
}
#[derive(Debug, serde::Deserialize)]
struct Interface {
serial_port: String,
}
fn setup_logger() -> Result<(), fern::InitError> {
fern::Dispatch::new()
.format(|out, message, record| {
out.finish(format_args!(
"[{} {} {}] {}",
humantime::format_rfc3339_seconds(SystemTime::now()),
record.level(),
record.target(),
message
))
})
.level(log::LevelFilter::Debug)
.chain(std::io::stdout())
.chain(fern::log_file("output.log")?)
.apply()?;
Ok(())
}
fn main() {
setup_logger().expect("failed to initialize logger");
println!("sat-rs embedded examples TMTC client");
let cli = Cli::parse();
let mut config_file =
File::open(Path::new("config.toml")).expect("opening config.toml file failed");
let mut toml_str = String::new();
config_file
.read_to_string(&mut toml_str)
.expect("reading config.toml file failed");
let config: Config = toml::from_str(&toml_str).expect("parsing config.toml file failed");
println!("Connecting to serial port {}", config.interface.serial_port);
let serial = serialport::new(config.interface.serial_port, 115200)
.open()
.expect("opening serial port failed");
let mut transport = PacketTransportSerialCobs::new(serial, CobsDecoderOwned::new(1024));
if cli.ping {
let request = Request::Ping;
let tc = create_stm32f3_tc(&request);
log::info!(
"Sending ping request with TC ID: {:#010x}",
tc.ccsds_packet_id_and_psc().raw()
);
transport.send(&tc.to_vec()).unwrap();
}
if let Some(freq_ms) = cli.set_led_frequency {
let request = Request::ChangeBlinkFrequency(Duration::from_millis(freq_ms as u64));
let tc = create_stm32f3_tc(&request);
log::info!(
"Sending change blink frequency request {:?} with TC ID: {:#010x}",
request,
tc.ccsds_packet_id_and_psc().raw()
);
transport.send(&tc.to_vec()).unwrap();
}
log::info!("Waiting for response...");
loop {
transport
.receive(|packet: &[u8]| {
let reader = CcsdsPacketReader::new_with_checksum(packet);
log::info!("Received packet: {:?}", reader);
})
.unwrap();
}
}
fn create_stm32f3_tc(request: &Request) -> CcsdsPacketCreatorOwned {
let req_raw = postcard::to_allocvec(&request).unwrap();
let sp_header = SpHeader::new_from_apid(satrs_stm32f3_disco_rtic::APID);
CcsdsPacketCreatorOwned::new_tc_with_checksum(sp_header, &req_raw).unwrap()
}

View File

@@ -34,4 +34,4 @@ rustflags = [
target = "thumbv7em-none-eabihf" # Cortex-M4F and Cortex-M7F (with FPU)
[env]
DEFMT_LOG = "info"
DEFMT_LOG = "info"

View File

@@ -1,4 +1,4 @@
/target
/itm.txt
/.cargo/config*
/.cargo/config.toml
/.vscode

File diff suppressed because it is too large Load Diff

View File

@@ -9,49 +9,28 @@ default-run = "satrs-stm32f3-disco-rtic"
[dependencies]
cortex-m = { version = "0.7", features = ["critical-section-single-core"] }
cortex-m-rt = "0.7"
defmt = "0.3"
defmt-brtt = { version = "0.1", default-features = false, features = ["rtt"] }
panic-probe = { version = "0.3", features = ["print-defmt"] }
embedded-hal = "0.2.7"
defmt = "1"
defmt-rtt = { version = "1" }
panic-probe = { version = "1", features = ["print-defmt"] }
embedded-hal = "1"
cortex-m-semihosting = "0.5.0"
embassy-stm32 = { version = "0.4", features = ["defmt", "stm32f303vc", "unstable-pac"] }
enumset = "1"
heapless = "0.8"
heapless = "0.9"
spacepackets = { version = "0.17", default-features = false, features = ["defmt", "serde"] }
static_cell = "2"
cobs = { version = "0.5", default-features = false, features = ["defmt"] }
postcard = { version = "1" }
arbitrary-int = "2"
thiserror = { version = "2", default-features = false }
serde = { version = "1", default-features = false, features = ["derive"] }
[dependencies.rtic]
version = "2"
features = ["thumbv7-backend"]
[dependencies.rtic-monotonics]
version = "2"
features = ["cortex-m-systick"]
[dependencies.cobs]
version = "0.3"
default-features = false
[dependencies.stm32f3xx-hal]
git = "https://github.com/robamu/stm32f3xx-hal"
version = "0.11.0-alpha.0"
features = ["stm32f303xc", "rt", "enumset"]
branch = "complete-dma-update"
# Can be used in workspace to develop and update HAL
# path = "../stm32f3xx-hal"
[dependencies.stm32f3-discovery]
git = "https://github.com/robamu/stm32f3-discovery"
version = "0.8.0-alpha.0"
branch = "complete-dma-update-hal"
# Can be used in workspace to develop and update BSP
# path = "../stm32f3-discovery"
[dependencies.satrs]
# path = "satrs"
version = "0.2"
default-features = false
features = ["defmt"]
rtic = { version = "2", features = ["thumbv7-backend"] }
rtic-sync = { version = "1" }
rtic-monotonics = { version = "2", features = ["cortex-m-systick"] }
[dev-dependencies]
defmt-test = "0.3"
defmt-test = "0.4"
# cargo test
[profile.test]

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +0,0 @@
target extended-remote localhost:2331
monitor reset
# *try* to stop at the user entry point (it might be gone due to inlining)
break main
load
continue

View File

@@ -1,12 +0,0 @@
# Sample OpenOCD configuration for the STM32F3DISCOVERY development board
# Depending on the hardware revision you got you'll have to pick ONE of these
# interfaces. At any time only one interface should be commented out.
# Revision C (newer revision)
source [find interface/stlink.cfg]
# Revision A and B (older revisions)
# source [find interface/stlink-v2.cfg]
source [find target/stm32f3x.cfg]

View File

@@ -1,42 +0,0 @@
target extended-remote :3333
# print demangled symbols
set print asm-demangle on
# set backtrace limit to not have infinite backtrace loops
set backtrace limit 32
# detect unhandled exceptions, hard faults and panics
break DefaultHandler
break HardFault
break rust_begin_unwind
# # run the next few lines so the panic message is printed immediately
# # the number needs to be adjusted for your panic handler
# commands $bpnum
# next 4
# end
# *try* to stop at the user entry point (it might be gone due to inlining)
break main
# monitor arm semihosting enable
# # send captured ITM to the file itm.fifo
# # (the microcontroller SWO pin must be connected to the programmer SWO pin)
# # 8000000 must match the core clock frequency
# # 2000000 is the frequency of the SWO pin. This was added for newer
# openocd versions like v0.12.0.
# monitor tpiu config internal itm.txt uart off 8000000 2000000
# # OR: make the microcontroller SWO pin output compatible with UART (8N1)
# # 8000000 must match the core clock frequency
# # 2000000 is the frequency of the SWO pin
# monitor tpiu config external uart off 8000000 2000000
# # enable ITM port 0
# monitor itm port 0 on
load
# start the process but immediately halt the processor
stepi

View File

@@ -1,8 +0,0 @@
/venv
/.tmtc-history.txt
/log
/.idea/*
!/.idea/runConfigurations
/seqcnt.txt
/tmtc_conf.json

View File

@@ -1,4 +0,0 @@
{
"com_if": "serial_cobs",
"serial_baudrate": 115200
}

View File

@@ -1,305 +0,0 @@
#!/usr/bin/env python3
"""Example client for the sat-rs example application"""
import struct
import logging
import sys
import time
from typing import Any, Optional, cast
from prompt_toolkit.history import FileHistory, History
from spacepackets.ecss.tm import CdsShortTimestamp
import tmtccmd
from spacepackets.ecss import PusTelemetry, PusTelecommand, PusTm, PusVerificator
from spacepackets.ecss.pus_17_test import Service17Tm
from spacepackets.ecss.pus_1_verification import UnpackParams, Service1Tm
from tmtccmd import TcHandlerBase, ProcedureParamsWrapper
from tmtccmd.core.base import BackendRequest
from tmtccmd.core.ccsds_backend import QueueWrapper
from tmtccmd.logging import add_colorlog_console_logger
from tmtccmd.pus import VerificationWrapper
from tmtccmd.tmtc import CcsdsTmHandler, SpecificApidHandlerBase
from tmtccmd.com import ComInterface
from tmtccmd.config import (
CmdTreeNode,
default_json_path,
SetupParams,
HookBase,
params_to_procedure_conversion,
)
from tmtccmd.config.com import SerialCfgWrapper
from tmtccmd.config import PreArgsParsingWrapper, SetupWrapper
from tmtccmd.logging.pus import (
RegularTmtcLogWrapper,
RawTmtcTimedLogWrapper,
TimedLogWhen,
)
from tmtccmd.tmtc import (
TcQueueEntryType,
ProcedureWrapper,
TcProcedureType,
FeedWrapper,
SendCbParams,
DefaultPusQueueHelper,
)
from tmtccmd.pus.s5_fsfw_event import Service5Tm
from spacepackets.seqcount import FileSeqCountProvider, PusFileSeqCountProvider
from tmtccmd.util.obj_id import ObjectIdDictT
_LOGGER = logging.getLogger()
EXAMPLE_PUS_APID = 0x02
class SatRsConfigHook(HookBase):
def __init__(self, json_cfg_path: str):
super().__init__(json_cfg_path)
def get_communication_interface(self, com_if_key: str) -> Optional[ComInterface]:
from tmtccmd.config.com import (
create_com_interface_default,
create_com_interface_cfg_default,
)
assert self.cfg_path is not None
cfg = create_com_interface_cfg_default(
com_if_key=com_if_key,
json_cfg_path=self.cfg_path,
space_packet_ids=None,
)
if cfg is None:
raise ValueError(
f"No valid configuration could be retrieved for the COM IF with key {com_if_key}"
)
if cfg.com_if_key == "serial_cobs":
cfg = cast(SerialCfgWrapper, cfg)
cfg.serial_cfg.serial_timeout = 0.5
return create_com_interface_default(cfg)
def get_command_definitions(self) -> CmdTreeNode:
"""This function should return the root node of the command definition tree."""
return create_cmd_definition_tree()
def get_cmd_history(self) -> Optional[History]:
"""Optionlly return a history class for the past command paths which will be used
when prompting a command path from the user in CLI mode."""
return FileHistory(".tmtc-history.txt")
def get_object_ids(self) -> ObjectIdDictT:
from tmtccmd.config.objects import get_core_object_ids
return get_core_object_ids()
def create_cmd_definition_tree() -> CmdTreeNode:
root_node = CmdTreeNode.root_node()
root_node.add_child(CmdTreeNode("ping", "Send PUS ping TC"))
root_node.add_child(CmdTreeNode("change_blink_freq", "Change blink frequency"))
return root_node
class PusHandler(SpecificApidHandlerBase):
def __init__(
self,
file_logger: logging.Logger,
verif_wrapper: VerificationWrapper,
raw_logger: RawTmtcTimedLogWrapper,
):
super().__init__(EXAMPLE_PUS_APID, None)
self.file_logger = file_logger
self.raw_logger = raw_logger
self.verif_wrapper = verif_wrapper
def handle_tm(self, packet: bytes, _user_args: Any):
try:
pus_tm = PusTm.unpack(
packet, timestamp_len=CdsShortTimestamp.TIMESTAMP_SIZE
)
except ValueError as e:
_LOGGER.warning("Could not generate PUS TM object from raw data")
_LOGGER.warning(f"Raw Packet: [{packet.hex(sep=',')}], REPR: {packet!r}")
raise e
service = pus_tm.service
tm_packet = None
if service == 1:
tm_packet = Service1Tm.unpack(
data=packet, params=UnpackParams(CdsShortTimestamp.TIMESTAMP_SIZE, 1, 2)
)
res = self.verif_wrapper.add_tm(tm_packet)
if res is None:
_LOGGER.info(
f"Received Verification TM[{tm_packet.service}, {tm_packet.subservice}] "
f"with Request ID {tm_packet.tc_req_id.as_u32():#08x}"
)
_LOGGER.warning(
f"No matching telecommand found for {tm_packet.tc_req_id}"
)
else:
self.verif_wrapper.log_to_console(tm_packet, res)
self.verif_wrapper.log_to_file(tm_packet, res)
if service == 3:
_LOGGER.info("No handling for HK packets implemented")
_LOGGER.info(f"Raw packet: 0x[{packet.hex(sep=',')}]")
pus_tm = PusTelemetry.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if pus_tm.subservice == 25:
if len(pus_tm.source_data) < 8:
raise ValueError("No addressable ID in HK packet")
json_str = pus_tm.source_data[8:]
_LOGGER.info("received JSON string: " + json_str.decode("utf-8"))
if service == 5:
tm_packet = Service5Tm.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if service == 17:
tm_packet = Service17Tm.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
if tm_packet.subservice == 2:
_LOGGER.info("Received Ping Reply TM[17,2]")
else:
_LOGGER.info(
f"Received Test Packet with unknown subservice {tm_packet.subservice}"
)
if tm_packet is None:
_LOGGER.info(
f"The service {service} is not implemented in Telemetry Factory"
)
tm_packet = PusTelemetry.unpack(packet, CdsShortTimestamp.TIMESTAMP_SIZE)
self.raw_logger.log_tm(pus_tm)
def make_addressable_id(target_id: int, unique_id: int) -> bytes:
byte_string = bytearray(struct.pack("!I", target_id))
byte_string.extend(struct.pack("!I", unique_id))
return byte_string
class TcHandler(TcHandlerBase):
def __init__(
self,
seq_count_provider: FileSeqCountProvider,
verif_wrapper: VerificationWrapper,
):
super(TcHandler, self).__init__()
self.seq_count_provider = seq_count_provider
self.verif_wrapper = verif_wrapper
self.queue_helper = DefaultPusQueueHelper(
queue_wrapper=QueueWrapper.empty(),
tc_sched_timestamp_len=7,
seq_cnt_provider=seq_count_provider,
pus_verificator=verif_wrapper.pus_verificator,
default_pus_apid=EXAMPLE_PUS_APID,
)
def send_cb(self, send_params: SendCbParams):
entry_helper = send_params.entry
if entry_helper.is_tc:
if entry_helper.entry_type == TcQueueEntryType.PUS_TC:
pus_tc_wrapper = entry_helper.to_pus_tc_entry()
pus_tc_wrapper.pus_tc.seq_count = (
self.seq_count_provider.get_and_increment()
)
self.verif_wrapper.add_tc(pus_tc_wrapper.pus_tc)
raw_tc = pus_tc_wrapper.pus_tc.pack()
_LOGGER.info(f"Sending {pus_tc_wrapper.pus_tc}")
send_params.com_if.send(raw_tc)
elif entry_helper.entry_type == TcQueueEntryType.LOG:
log_entry = entry_helper.to_log_entry()
_LOGGER.info(log_entry.log_str)
def queue_finished_cb(self, info: ProcedureWrapper):
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
_LOGGER.info(f"Queue handling finished for command {def_proc.cmd_path}")
def feed_cb(self, info: ProcedureWrapper, wrapper: FeedWrapper):
q = self.queue_helper
q.queue_wrapper = wrapper.queue_wrapper
if info.proc_type == TcProcedureType.TREE_COMMANDING:
def_proc = info.to_tree_commanding_procedure()
cmd_path = def_proc.cmd_path
if cmd_path == "/ping":
q.add_log_cmd("Sending PUS ping telecommand")
q.add_pus_tc(PusTelecommand(service=17, subservice=1))
if cmd_path == "/change_blink_freq":
self.create_change_blink_freq_command(q)
def create_change_blink_freq_command(self, q: DefaultPusQueueHelper):
q.add_log_cmd("Changing blink frequency")
while True:
blink_freq = int(
input(
"Please specify new blink frequency in ms. Valid Range [2..10000]: "
)
)
if blink_freq < 2 or blink_freq > 10000:
print(
"Invalid blink frequency. Please specify a value between 2 and 10000."
)
continue
break
app_data = struct.pack("!I", blink_freq)
q.add_pus_tc(PusTelecommand(service=8, subservice=1, app_data=app_data))
def main():
add_colorlog_console_logger(_LOGGER)
tmtccmd.init_printout(False)
hook_obj = SatRsConfigHook(json_cfg_path=default_json_path())
parser_wrapper = PreArgsParsingWrapper()
parser_wrapper.create_default_parent_parser()
parser_wrapper.create_default_parser()
parser_wrapper.add_def_proc_args()
params = SetupParams()
post_args_wrapper = parser_wrapper.parse(hook_obj, params)
proc_wrapper = ProcedureParamsWrapper()
if post_args_wrapper.use_gui:
post_args_wrapper.set_params_without_prompts(proc_wrapper)
else:
post_args_wrapper.set_params_with_prompts(proc_wrapper)
params.apid = EXAMPLE_PUS_APID
setup_args = SetupWrapper(
hook_obj=hook_obj, setup_params=params, proc_param_wrapper=proc_wrapper
)
# Create console logger helper and file loggers
tmtc_logger = RegularTmtcLogWrapper()
file_logger = tmtc_logger.logger
raw_logger = RawTmtcTimedLogWrapper(when=TimedLogWhen.PER_HOUR, interval=1)
verificator = PusVerificator()
verification_wrapper = VerificationWrapper(verificator, _LOGGER, file_logger)
# Create primary TM handler and add it to the CCSDS Packet Handler
tm_handler = PusHandler(file_logger, verification_wrapper, raw_logger)
ccsds_handler = CcsdsTmHandler(generic_handler=None)
ccsds_handler.add_apid_handler(tm_handler)
# Create TC handler
seq_count_provider = PusFileSeqCountProvider()
tc_handler = TcHandler(seq_count_provider, verification_wrapper)
tmtccmd.setup(setup_args=setup_args)
init_proc = params_to_procedure_conversion(setup_args.proc_param_wrapper)
tmtc_backend = tmtccmd.create_default_tmtc_backend(
setup_wrapper=setup_args,
tm_handler=ccsds_handler,
tc_handler=tc_handler,
init_procedure=init_proc,
)
tmtccmd.start(tmtc_backend=tmtc_backend, hook_obj=hook_obj)
try:
while True:
state = tmtc_backend.periodic_op(None)
if state.request == BackendRequest.TERMINATION_NO_ERROR:
sys.exit(0)
elif state.request == BackendRequest.DELAY_IDLE:
_LOGGER.info("TMTC Client in IDLE mode")
time.sleep(3.0)
elif state.request == BackendRequest.DELAY_LISTENER:
time.sleep(0.8)
elif state.request == BackendRequest.DELAY_CUSTOM:
if state.next_delay.total_seconds() <= 0.4:
time.sleep(state.next_delay.total_seconds())
else:
time.sleep(0.4)
elif state.request == BackendRequest.CALL_NEXT:
pass
except KeyboardInterrupt:
sys.exit(0)
if __name__ == "__main__":
main()

View File

@@ -1,2 +0,0 @@
tmtccmd == 8.0.1
# -e git+https://github.com/robamu-org/tmtccmd.git@main#egg=tmtccmd

View File

@@ -1,76 +1,61 @@
#![no_std]
#![no_main]
use satrs_stm32f3_disco_rtic as _;
#![no_std]
use stm32f3_discovery::leds::Leds;
use stm32f3_discovery::stm32f3xx_hal::delay::Delay;
use stm32f3_discovery::stm32f3xx_hal::{pac, prelude::*};
use stm32f3_discovery::switch_hal::{OutputSwitch, ToggleableOutputSwitch};
use panic_probe as _;
use rtic::app;
#[cortex_m_rt::entry]
fn main() -> ! {
defmt::println!("STM32F3 Discovery Blinky");
let dp = pac::Peripherals::take().unwrap();
let mut rcc = dp.RCC.constrain();
let cp = cortex_m::Peripherals::take().unwrap();
let mut flash = dp.FLASH.constrain();
let clocks = rcc.cfgr.freeze(&mut flash.acr);
let mut delay = Delay::new(cp.SYST, clocks);
#[app(device = embassy_stm32)]
mod app {
use rtic_monotonics::fugit::ExtU32;
use rtic_monotonics::Monotonic as _;
use satrs_stm32f3_disco_rtic::{Direction, LedPinSet, Leds};
let mut gpioe = dp.GPIOE.split(&mut rcc.ahb);
let mut leds = Leds::new(
gpioe.pe8,
gpioe.pe9,
gpioe.pe10,
gpioe.pe11,
gpioe.pe12,
gpioe.pe13,
gpioe.pe14,
gpioe.pe15,
&mut gpioe.moder,
&mut gpioe.otyper,
);
let delay_ms = 200u16;
loop {
leds.ld3_n.toggle().ok();
delay.delay_ms(delay_ms);
leds.ld3_n.toggle().ok();
delay.delay_ms(delay_ms);
rtic_monotonics::systick_monotonic!(Mono, 1000);
//explicit on/off
leds.ld4_nw.on().ok();
delay.delay_ms(delay_ms);
leds.ld4_nw.off().ok();
delay.delay_ms(delay_ms);
#[shared]
struct Shared {}
leds.ld5_ne.on().ok();
delay.delay_ms(delay_ms);
leds.ld5_ne.off().ok();
delay.delay_ms(delay_ms);
#[local]
struct Local {
leds: Leds,
current_dir: Direction,
}
leds.ld6_w.on().ok();
delay.delay_ms(delay_ms);
leds.ld6_w.off().ok();
delay.delay_ms(delay_ms);
#[init]
fn init(cx: init::Context) -> (Shared, Local) {
let p = embassy_stm32::init(Default::default());
leds.ld7_e.on().ok();
delay.delay_ms(delay_ms);
leds.ld7_e.off().ok();
delay.delay_ms(delay_ms);
defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery using RTICv2");
leds.ld8_sw.on().ok();
delay.delay_ms(delay_ms);
leds.ld8_sw.off().ok();
delay.delay_ms(delay_ms);
let led_pin_set = LedPinSet {
pin_n: p.PE8,
pin_ne: p.PE9,
pin_e: p.PE10,
pin_se: p.PE11,
pin_s: p.PE12,
pin_sw: p.PE13,
pin_w: p.PE14,
pin_nw: p.PE15,
};
let leds = Leds::new(led_pin_set);
leds.ld9_se.on().ok();
delay.delay_ms(delay_ms);
leds.ld9_se.off().ok();
delay.delay_ms(delay_ms);
// Initialize the systick interrupt & obtain the token to prove that we did
Mono::start(cx.core.SYST, 8_000_000);
blinky::spawn().expect("failed to spawn blinky task");
(
Shared {},
Local {
leds,
current_dir: Direction::North,
},
)
}
leds.ld10_s.on().ok();
delay.delay_ms(delay_ms);
leds.ld10_s.off().ok();
delay.delay_ms(delay_ms);
#[task(local = [leds, current_dir])]
async fn blinky(cx: blinky::Context) {
loop {
cx.local.leds.blink_next(cx.local.current_dir);
Mono::delay(200.millis()).await;
}
}
}

View File

@@ -1,51 +1,190 @@
#![no_main]
#![no_std]
use cortex_m_semihosting::debug;
use arbitrary_int::u11;
use core::time::Duration;
use embassy_stm32::gpio::Output;
use spacepackets::{
ccsds_packet_len_for_user_data_len_with_checksum, CcsdsPacketCreationError,
CcsdsPacketCreatorWithReservedData, CcsdsPacketIdAndPsc, SpacePacketHeader,
};
use defmt_brtt as _; // global logger
pub const APID: u11 = u11::new(0x02);
use stm32f3xx_hal as _; // memory layout
use panic_probe as _;
// same panicking *behavior* as `panic-probe` but doesn't print a panic message
// this prevents the panic message being printed *twice* when `defmt::panic` is invoked
#[defmt::panic_handler]
fn panic() -> ! {
cortex_m::asm::udf()
#[derive(defmt::Format, serde::Serialize, serde::Deserialize, PartialEq, Eq, Clone, Copy)]
pub enum Direction {
North,
NorthEast,
East,
SouthEast,
South,
SouthWest,
West,
NorthWest,
}
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with status code 0.
pub fn exit() -> ! {
loop {
debug::exit(debug::EXIT_SUCCESS);
impl Direction {
pub fn switch_to_next(&mut self) -> (Self, Self) {
let curr = *self;
*self = match self {
Direction::North => Direction::NorthEast,
Direction::NorthEast => Direction::East,
Direction::East => Direction::SouthEast,
Direction::SouthEast => Direction::South,
Direction::South => Direction::SouthWest,
Direction::SouthWest => Direction::West,
Direction::West => Direction::NorthWest,
Direction::NorthWest => Direction::North,
};
(curr, *self)
}
}
/// Hardfault handler.
///
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with an error. This seems better than the default, which is to spin in a
/// loop.
#[cortex_m_rt::exception]
unsafe fn HardFault(_frame: &cortex_m_rt::ExceptionFrame) -> ! {
loop {
debug::exit(debug::EXIT_FAILURE);
#[derive(Copy, Clone, Debug, defmt::Format, serde::Serialize, serde::Deserialize)]
pub enum Request {
Ping,
ChangeBlinkFrequency(Duration),
}
#[derive(Debug, defmt::Format, serde::Serialize, serde::Deserialize)]
pub struct TmHeader {
pub tc_packet_id: Option<CcsdsPacketIdAndPsc>,
pub uptime_millis: u32,
}
#[derive(Debug, defmt::Format, serde::Serialize, serde::Deserialize)]
pub enum Response {
CommandDone,
}
pub fn tm_size(tm_header: &TmHeader, response: &Response) -> usize {
ccsds_packet_len_for_user_data_len_with_checksum(
postcard::experimental::serialized_size(tm_header).unwrap()
+ postcard::experimental::serialized_size(response).unwrap(),
)
.unwrap()
}
pub fn create_tm_packet(
buf: &mut [u8],
sp_header: SpacePacketHeader,
tm_header: TmHeader,
response: Response,
) -> Result<usize, CcsdsPacketCreationError> {
let packet_data_size = postcard::experimental::serialized_size(&tm_header).unwrap()
+ postcard::experimental::serialized_size(&response).unwrap();
let mut creator =
CcsdsPacketCreatorWithReservedData::new_tm_with_checksum(sp_header, packet_data_size, buf)?;
let current_index = postcard::to_slice(&tm_header, creator.packet_data_mut())
.unwrap()
.len();
postcard::to_slice(&response, &mut creator.packet_data_mut()[current_index..]).unwrap();
Ok(creator.finish())
}
pub struct Leds {
pub north: Output<'static>,
pub north_east: Output<'static>,
pub east: Output<'static>,
pub south_east: Output<'static>,
pub south: Output<'static>,
pub south_west: Output<'static>,
pub west: Output<'static>,
pub north_west: Output<'static>,
}
impl Leds {
pub fn blink_next(&mut self, current_dir: &mut Direction) {
let (prev, curr) = current_dir.switch_to_next();
self.set_dir_low(prev);
self.set_dir_high(curr);
}
pub fn set_dir(&mut self, dir: Direction, level: embassy_stm32::gpio::Level) {
match dir {
Direction::North => self.north.set_level(level),
Direction::NorthEast => self.north_east.set_level(level),
Direction::East => self.east.set_level(level),
Direction::SouthEast => self.south_east.set_level(level),
Direction::South => self.south.set_level(level),
Direction::SouthWest => self.south_west.set_level(level),
Direction::West => self.west.set_level(level),
Direction::NorthWest => self.north_west.set_level(level),
}
}
pub fn set_dir_low(&mut self, dir: Direction) {
self.set_dir(dir, embassy_stm32::gpio::Level::Low);
}
pub fn set_dir_high(&mut self, dir: Direction) {
self.set_dir(dir, embassy_stm32::gpio::Level::High);
}
}
// defmt-test 0.3.0 has the limitation that this `#[tests]` attribute can only be used
// once within a crate. the module can be in any file but there can only be at most
// one `#[tests]` module in this library crate
#[cfg(test)]
#[defmt_test::tests]
mod unit_tests {
use defmt::assert;
pub struct LedPinSet {
pub pin_n: embassy_stm32::Peri<'static, embassy_stm32::peripherals::PE8>,
pub pin_ne: embassy_stm32::Peri<'static, embassy_stm32::peripherals::PE9>,
pub pin_e: embassy_stm32::Peri<'static, embassy_stm32::peripherals::PE10>,
pub pin_se: embassy_stm32::Peri<'static, embassy_stm32::peripherals::PE11>,
pub pin_s: embassy_stm32::Peri<'static, embassy_stm32::peripherals::PE12>,
pub pin_sw: embassy_stm32::Peri<'static, embassy_stm32::peripherals::PE13>,
pub pin_w: embassy_stm32::Peri<'static, embassy_stm32::peripherals::PE14>,
pub pin_nw: embassy_stm32::Peri<'static, embassy_stm32::peripherals::PE15>,
}
#[test]
fn it_works() {
assert!(true)
impl Leds {
pub fn new(pin_set: LedPinSet) -> Self {
let led_n = Output::new(
pin_set.pin_n,
embassy_stm32::gpio::Level::Low,
embassy_stm32::gpio::Speed::Medium,
);
let led_ne = Output::new(
pin_set.pin_ne,
embassy_stm32::gpio::Level::Low,
embassy_stm32::gpio::Speed::Medium,
);
let led_e = Output::new(
pin_set.pin_e,
embassy_stm32::gpio::Level::Low,
embassy_stm32::gpio::Speed::Medium,
);
let led_se = Output::new(
pin_set.pin_se,
embassy_stm32::gpio::Level::Low,
embassy_stm32::gpio::Speed::Medium,
);
let led_s = Output::new(
pin_set.pin_s,
embassy_stm32::gpio::Level::Low,
embassy_stm32::gpio::Speed::Medium,
);
let led_sw = Output::new(
pin_set.pin_sw,
embassy_stm32::gpio::Level::Low,
embassy_stm32::gpio::Speed::Medium,
);
let led_w = Output::new(
pin_set.pin_w,
embassy_stm32::gpio::Level::Low,
embassy_stm32::gpio::Speed::Medium,
);
let led_nw = Output::new(
pin_set.pin_nw,
embassy_stm32::gpio::Level::Low,
embassy_stm32::gpio::Speed::Medium,
);
Self {
north: led_n,
north_east: led_ne,
east: led_e,
south_east: led_se,
south: led_s,
south_west: led_sw,
west: led_w,
north_west: led_nw,
}
}
}

View File

@@ -1,682 +1,349 @@
#![no_std]
#![no_main]
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReportCreator, VerificationToken,
};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::ecss::EcssEnumU16;
use satrs::spacepackets::CcsdsPacket;
use satrs::spacepackets::{ByteConversionError, SpHeader};
// global logger + panicking-behavior + memory layout
use satrs_stm32f3_disco_rtic as _;
use arbitrary_int::{u11, u14};
use cortex_m_semihosting::debug::{self, EXIT_FAILURE, EXIT_SUCCESS};
use satrs_stm32f3_disco_rtic::{create_tm_packet, tm_size, CcsdsPacketId, Request, Response};
use spacepackets::{CcsdsPacketCreationError, SpHeader};
use defmt_rtt as _; // global logger
use panic_probe as _;
use rtic::app;
use heapless::{mpmc::Q8, Vec};
#[allow(unused_imports)]
use rtic_monotonics::fugit::{MillisDurationU32, TimerInstantU32};
use rtic_monotonics::systick::prelude::*;
use satrs::seq_count::SequenceCountProviderCore;
use satrs::spacepackets::{ecss::PusPacket, ecss::WritablePusPacket};
use stm32f3xx_hal::dma::dma1;
use stm32f3xx_hal::gpio::{PushPull, AF7, PA2, PA3};
use stm32f3xx_hal::pac::USART2;
use stm32f3xx_hal::serial::{Rx, RxEvent, Serial, SerialDmaRx, SerialDmaTx, Tx, TxEvent};
use crate::app::Mono;
const UART_BAUD: u32 = 115200;
const DEFAULT_BLINK_FREQ_MS: u32 = 1000;
const TX_HANDLER_FREQ_MS: u32 = 20;
const MIN_DELAY_BETWEEN_TX_PACKETS_MS: u32 = 5;
const MAX_TC_LEN: usize = 128;
const MAX_TM_LEN: usize = 128;
pub const PUS_APID: u16 = 0x02;
type TxType = Tx<USART2, PA2<AF7<PushPull>>>;
type RxType = Rx<USART2, PA3<AF7<PushPull>>>;
type InstantFugit = TimerInstantU32<1000>;
type TxDmaTransferType = SerialDmaTx<&'static [u8], dma1::C7, TxType>;
type RxDmaTransferType = SerialDmaRx<&'static mut [u8], dma1::C6, RxType>;
pub const PUS_APID: u11 = u11::new(0x02);
// This is the predictable maximum overhead of the COBS encoding scheme.
// It is simply the maximum packet lenght dividied by 254 rounded up.
const COBS_TC_OVERHEAD: usize = (MAX_TC_LEN + 254 - 1) / 254;
const COBS_TM_OVERHEAD: usize = (MAX_TM_LEN + 254 - 1) / 254;
const COBS_TM_OVERHEAD: usize = cobs::max_encoding_overhead(MAX_TM_LEN);
const TC_BUF_LEN: usize = MAX_TC_LEN + COBS_TC_OVERHEAD;
const TM_BUF_LEN: usize = MAX_TC_LEN + COBS_TM_OVERHEAD;
// This is a static buffer which should ONLY (!) be used as the TX DMA
// transfer buffer.
static mut DMA_TX_BUF: [u8; TM_BUF_LEN] = [0; TM_BUF_LEN];
// This is a static buffer which should ONLY (!) be used as the RX DMA
// transfer buffer.
static mut DMA_RX_BUF: [u8; TC_BUF_LEN] = [0; TC_BUF_LEN];
const TC_DMA_BUF_LEN: usize = 512;
type TmPacket = Vec<u8, MAX_TM_LEN>;
type TcPacket = Vec<u8, MAX_TC_LEN>;
type TmPacket = heapless::Vec<u8, MAX_TM_LEN>;
static TM_REQUESTS: Q8<TmPacket> = Q8::new();
static TM_QUEUE: heapless::mpmc::Queue<TmPacket, 16> = heapless::mpmc::Queue::new();
use core::sync::atomic::{AtomicU16, Ordering};
pub struct SeqCountProviderAtomicRef {
atomic: AtomicU16,
ordering: Ordering,
}
impl SeqCountProviderAtomicRef {
pub const fn new(ordering: Ordering) -> Self {
Self {
atomic: AtomicU16::new(0),
ordering,
}
}
}
impl SequenceCountProviderCore<u16> for SeqCountProviderAtomicRef {
fn get(&self) -> u16 {
self.atomic.load(self.ordering)
}
fn increment(&self) {
self.atomic.fetch_add(1, self.ordering);
}
fn get_and_increment(&self) -> u16 {
self.atomic.fetch_add(1, self.ordering)
}
}
static SEQ_COUNT_PROVIDER: SeqCountProviderAtomicRef =
SeqCountProviderAtomicRef::new(Ordering::Relaxed);
pub struct TxIdle {
tx: TxType,
dma_channel: dma1::C7,
}
#[derive(Debug, defmt::Format)]
#[derive(Debug, defmt::Format, thiserror::Error)]
pub enum TmSendError {
ByteConversion(ByteConversionError),
#[error("packet creation error: {0}")]
PacketCreation(#[from] CcsdsPacketCreationError),
#[error("queue error")]
Queue,
}
impl From<ByteConversionError> for TmSendError {
fn from(value: ByteConversionError) -> Self {
Self::ByteConversion(value)
}
}
fn send_tm(tm_creator: PusTmCreator) -> Result<(), TmSendError> {
if tm_creator.len_written() > MAX_TM_LEN {
return Err(ByteConversionError::ToSliceTooSmall {
expected: tm_creator.len_written(),
found: MAX_TM_LEN,
}
.into());
}
let mut tm_vec = TmPacket::new();
tm_vec
.resize(tm_creator.len_written(), 0)
.expect("vec resize failed");
tm_creator.write_to_bytes(tm_vec.as_mut_slice())?;
defmt::info!(
"Sending TM[{},{}] with size {}",
tm_creator.service(),
tm_creator.subservice(),
tm_creator.len_written()
);
TM_REQUESTS
.enqueue(tm_vec)
.map_err(|_| TmSendError::Queue)?;
Ok(())
}
fn handle_tm_send_error(error: TmSendError) {
defmt::warn!("sending tm failed with error {}", error);
}
pub enum UartTxState {
// Wrapped in an option because we need an owned type later.
Idle(Option<TxIdle>),
// Same as above
Transmitting(Option<TxDmaTransferType>),
}
pub struct UartTxShared {
last_completed: Option<InstantFugit>,
state: UartTxState,
}
pub struct RequestWithToken {
token: VerificationToken<TcStateAccepted>,
request: Request,
}
#[derive(Debug, defmt::Format)]
pub enum Request {
Ping,
ChangeBlinkFrequency(u32),
pub struct RequestWithTcId {
pub request: Request,
pub tc_id: CcsdsPacketId,
}
#[derive(Debug, defmt::Format)]
pub enum RequestError {
InvalidApid = 1,
InvalidService = 2,
InvalidSubservice = 3,
NotEnoughAppData = 4,
}
pub fn convert_pus_tc_to_request(
tc: &PusTcReader,
verif_reporter: &mut VerificationReportCreator,
src_data_buf: &mut [u8],
timestamp: &[u8],
) -> Result<RequestWithToken, RequestError> {
defmt::info!(
"Found PUS TC [{},{}] with length {}",
tc.service(),
tc.subservice(),
tc.len_packed()
);
let token = verif_reporter.add_tc(tc);
if tc.apid() != PUS_APID {
defmt::warn!("Received tc with unknown APID {}", tc.apid());
let result = send_tm(
verif_reporter
.acceptance_failure(
src_data_buf,
token,
SEQ_COUNT_PROVIDER.get_and_increment(),
0,
FailParams::new(timestamp, &EcssEnumU16::new(0), &[]),
)
.unwrap(),
);
if let Err(e) = result {
handle_tm_send_error(e);
}
return Err(RequestError::InvalidApid);
}
let (tm_creator, accepted_token) = verif_reporter
.acceptance_success(
src_data_buf,
token,
SEQ_COUNT_PROVIDER.get_and_increment(),
0,
timestamp,
)
.unwrap();
if let Err(e) = send_tm(tm_creator) {
handle_tm_send_error(e);
}
if tc.service() == 17 && tc.subservice() == 1 {
if tc.subservice() == 1 {
return Ok(RequestWithToken {
request: Request::Ping,
token: accepted_token,
});
} else {
return Err(RequestError::InvalidSubservice);
}
} else if tc.service() == 8 {
if tc.subservice() == 1 {
if tc.user_data().len() < 4 {
return Err(RequestError::NotEnoughAppData);
}
let new_freq_ms = u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap());
return Ok(RequestWithToken {
request: Request::ChangeBlinkFrequency(new_freq_ms),
token: accepted_token,
});
} else {
return Err(RequestError::InvalidSubservice);
}
} else {
return Err(RequestError::InvalidService);
}
}
#[app(device = stm32f3xx_hal::pac, peripherals = true)]
#[app(device = embassy_stm32)]
mod app {
use super::*;
use core::slice::Iter;
use satrs::pus::verification::{TcStateStarted, VerificationReportCreator};
use satrs::spacepackets::{ecss::tc::PusTcReader, time::cds::P_FIELD_BASE};
#[allow(unused_imports)]
use stm32f3_discovery::leds::Direction;
use stm32f3_discovery::leds::Leds;
use stm32f3xx_hal::prelude::*;
use core::time::Duration;
use stm32f3_discovery::switch_hal::OutputSwitch;
use stm32f3xx_hal::Switch;
#[allow(dead_code)]
type SerialType = Serial<USART2, (PA2<AF7<PushPull>>, PA3<AF7<PushPull>>)>;
use super::*;
use arbitrary_int::u14;
use rtic::Mutex;
use rtic_sync::{
channel::{Receiver, Sender},
make_channel,
};
use satrs_stm32f3_disco_rtic::{CcsdsPacketId, LedPinSet, Request, Response};
use spacepackets::CcsdsPacketReader;
systick_monotonic!(Mono, 1000);
embassy_stm32::bind_interrupts!(struct Irqs {
USART2 => embassy_stm32::usart::InterruptHandler<embassy_stm32::peripherals::USART2>;
});
#[shared]
struct Shared {
blink_freq: MillisDurationU32,
tx_shared: UartTxShared,
rx_transfer: Option<RxDmaTransferType>,
blink_freq: Duration,
}
#[local]
struct Local {
verif_reporter: VerificationReportCreator,
leds: Leds,
last_dir: Direction,
curr_dir: Iter<'static, Direction>,
leds: satrs_stm32f3_disco_rtic::Leds,
current_dir: satrs_stm32f3_disco_rtic::Direction,
seq_count: u14,
tx: embassy_stm32::usart::UartTx<'static, embassy_stm32::mode::Async>,
rx: embassy_stm32::usart::RingBufferedUartRx<'static>,
}
#[init]
fn init(cx: init::Context) -> (Shared, Local) {
let mut rcc = cx.device.RCC.constrain();
static DMA_BUF: static_cell::ConstStaticCell<[u8; TC_DMA_BUF_LEN]> =
static_cell::ConstStaticCell::new([0; TC_DMA_BUF_LEN]);
let p = embassy_stm32::init(Default::default());
let (req_sender, req_receiver) = make_channel!(RequestWithTcId, 16);
// Initialize the systick interrupt & obtain the token to prove that we did
Mono::start(cx.core.SYST, 8_000_000);
let mut flash = cx.device.FLASH.constrain();
let clocks = rcc
.cfgr
.use_hse(8.MHz())
.sysclk(8.MHz())
.pclk1(8.MHz())
.freeze(&mut flash.acr);
defmt::info!("sat-rs demo application for the STM32F3-Discovery with RTICv2");
let led_pin_set = LedPinSet {
pin_n: p.PE8,
pin_ne: p.PE9,
pin_e: p.PE10,
pin_se: p.PE11,
pin_s: p.PE12,
pin_sw: p.PE13,
pin_w: p.PE14,
pin_nw: p.PE15,
};
let leds = satrs_stm32f3_disco_rtic::Leds::new(led_pin_set);
// Set up monotonic timer.
//let mono_timer = MonoTimer::new(cx.core.DWT, clocks, &mut cx.core.DCB);
let mut config = embassy_stm32::usart::Config::default();
config.baudrate = UART_BAUD;
let uart = embassy_stm32::usart::Uart::new(
p.USART2, p.PA3, p.PA2, Irqs, p.DMA1_CH7, p.DMA1_CH6, config,
)
.unwrap();
defmt::info!("Starting sat-rs demo application for the STM32F3-Discovery");
let mut gpioe = cx.device.GPIOE.split(&mut rcc.ahb);
let leds = Leds::new(
gpioe.pe8,
gpioe.pe9,
gpioe.pe10,
gpioe.pe11,
gpioe.pe12,
gpioe.pe13,
gpioe.pe14,
gpioe.pe15,
&mut gpioe.moder,
&mut gpioe.otyper,
);
let mut gpioa = cx.device.GPIOA.split(&mut rcc.ahb);
// USART2 pins
let mut pins = (
// TX pin: PA2
gpioa
.pa2
.into_af_push_pull(&mut gpioa.moder, &mut gpioa.otyper, &mut gpioa.afrl),
// RX pin: PA3
gpioa
.pa3
.into_af_push_pull(&mut gpioa.moder, &mut gpioa.otyper, &mut gpioa.afrl),
);
pins.1.internal_pull_up(&mut gpioa.pupdr, true);
let mut usart2 = Serial::new(
cx.device.USART2,
pins,
UART_BAUD.Bd(),
clocks,
&mut rcc.apb1,
);
usart2.configure_rx_interrupt(RxEvent::Idle, Switch::On);
// This interrupt is enabled to re-schedule new transfers in the interrupt handler immediately.
usart2.configure_tx_interrupt(TxEvent::TransmissionComplete, Switch::On);
let dma1 = cx.device.DMA1.split(&mut rcc.ahb);
let (mut tx_serial, mut rx_serial) = usart2.split();
// This interrupt is immediately triggered, clear it. It will only be reset
// by the hardware when data is received on RX (RXNE event)
rx_serial.clear_event(RxEvent::Idle);
// For some reason, this is also immediately triggered..
tx_serial.clear_event(TxEvent::TransmissionComplete);
let rx_transfer = rx_serial.read_exact(unsafe { DMA_RX_BUF.as_mut_slice() }, dma1.ch6);
let (tx, rx) = uart.split();
defmt::info!("Spawning tasks");
blink::spawn().unwrap();
blinky::spawn().unwrap();
serial_tx_handler::spawn().unwrap();
let verif_reporter = VerificationReportCreator::new(PUS_APID).unwrap();
serial_rx_handler::spawn(req_sender).unwrap();
req_handler::spawn(req_receiver).unwrap();
(
Shared {
blink_freq: MillisDurationU32::from_ticks(DEFAULT_BLINK_FREQ_MS),
tx_shared: UartTxShared {
last_completed: None,
state: UartTxState::Idle(Some(TxIdle {
tx: tx_serial,
dma_channel: dma1.ch7,
})),
},
rx_transfer: Some(rx_transfer),
blink_freq: Duration::from_millis(DEFAULT_BLINK_FREQ_MS as u64),
},
Local {
verif_reporter,
leds,
last_dir: Direction::North,
curr_dir: Direction::iter(),
tx,
seq_count: u14::new(0),
rx: rx.into_ring_buffered(DMA_BUF.take()),
current_dir: satrs_stm32f3_disco_rtic::Direction::North,
},
)
}
#[task(local = [leds, curr_dir, last_dir], shared=[blink_freq])]
async fn blink(mut cx: blink::Context) {
let blink::LocalResources {
leds,
curr_dir,
last_dir,
..
} = cx.local;
let mut toggle_leds = |dir: &Direction| {
let last_led = leds.for_direction(*last_dir);
last_led.off().ok();
let led = leds.for_direction(*dir);
led.on().ok();
*last_dir = *dir;
};
#[task(local = [leds, current_dir], shared=[blink_freq])]
async fn blinky(mut cx: blinky::Context) {
loop {
match curr_dir.next() {
Some(dir) => {
toggle_leds(dir);
}
None => {
*curr_dir = Direction::iter();
toggle_leds(curr_dir.next().unwrap());
}
}
cx.local.leds.blink_next(cx.local.current_dir);
let current_blink_freq = cx.shared.blink_freq.lock(|current| *current);
Mono::delay(current_blink_freq).await;
Mono::delay(MillisDurationU32::from_ticks(
current_blink_freq.as_millis() as u32,
))
.await;
}
}
#[task(
shared = [tx_shared],
local = [
tx,
encoded_buf: [u8; TM_BUF_LEN] = [0; TM_BUF_LEN]
],
shared = [],
)]
async fn serial_tx_handler(mut cx: serial_tx_handler::Context) {
async fn serial_tx_handler(cx: serial_tx_handler::Context) {
loop {
let is_idle = cx.shared.tx_shared.lock(|tx_shared| {
if let UartTxState::Idle(_) = tx_shared.state {
return true;
}
false
});
if is_idle {
let last_completed = cx.shared.tx_shared.lock(|shared| shared.last_completed);
if let Some(last_completed) = last_completed {
let elapsed_ms = (Mono::now() - last_completed).to_millis();
if elapsed_ms < MIN_DELAY_BETWEEN_TX_PACKETS_MS {
Mono::delay((MIN_DELAY_BETWEEN_TX_PACKETS_MS - elapsed_ms).millis()).await;
}
}
} else {
// Check for completion after 1 ms
Mono::delay(1.millis()).await;
while let Some(vec) = TM_QUEUE.dequeue() {
let encoded_len =
cobs::encode_including_sentinels(&vec[0..vec.len()], cx.local.encoded_buf);
defmt::debug!("sending {} bytes over UART", encoded_len);
cx.local
.tx
.write(&cx.local.encoded_buf[0..encoded_len])
.await
.unwrap();
continue;
}
if let Some(vec) = TM_REQUESTS.dequeue() {
cx.shared
.tx_shared
.lock(|tx_shared| match &mut tx_shared.state {
UartTxState::Idle(tx) => {
let encoded_len;
//debug!(target: "serial_tx_handler", "bytes: {:x?}", &buf[0..len]);
// Safety: We only copy the data into the TX DMA buffer in this task.
// If the DMA is active, another branch will be taken.
unsafe {
// 0 sentinel value as start marker
DMA_TX_BUF[0] = 0;
encoded_len =
cobs::encode(&vec[0..vec.len()], &mut DMA_TX_BUF[1..]);
// Should never panic, we accounted for the overhead.
// Write into transfer buffer directly, no need for intermediate
// encoding buffer.
// 0 end marker
DMA_TX_BUF[encoded_len + 1] = 0;
}
//debug!(target: "serial_tx_handler", "Sending {} bytes", encoded_len + 2);
//debug!("sent: {:x?}", &mut_tx_dma_buf[0..encoded_len + 2]);
let tx_idle = tx.take().unwrap();
// Transfer completion and re-scheduling of new TX transfers will be done
// by the IRQ handler.
// SAFETY: The DMA is the exclusive writer to the DMA buffer now.
let transfer = tx_idle.tx.write_all(
unsafe { &DMA_TX_BUF[0..encoded_len + 2] },
tx_idle.dma_channel,
);
tx_shared.state = UartTxState::Transmitting(Some(transfer));
// The memory block is automatically returned to the pool when it is dropped.
}
UartTxState::Transmitting(_) => (),
});
// Check for completion after 1 ms
Mono::delay(1.millis()).await;
continue;
}
// Nothing to do, and we are idle.
Mono::delay(TX_HANDLER_FREQ_MS.millis()).await;
}
}
#[task(
local = [
verif_reporter,
rx,
read_buf: [u8; 128] = [0; 128],
decode_buf: [u8; MAX_TC_LEN] = [0; MAX_TC_LEN],
src_data_buf: [u8; MAX_TM_LEN] = [0; MAX_TM_LEN],
timestamp: [u8; 7] = [0; 7],
],
shared = [blink_freq]
)]
async fn serial_rx_handler(
mut cx: serial_rx_handler::Context,
received_packet: Vec<u8, MAX_TC_LEN>,
cx: serial_rx_handler::Context,
mut sender: Sender<'static, RequestWithTcId, 16>,
) {
cx.local.timestamp[0] = P_FIELD_BASE;
defmt::info!("Received packet with {} bytes", received_packet.len());
let decode_buf = cx.local.decode_buf;
let packet = received_packet.as_slice();
let mut start_idx = None;
for (idx, byte) in packet.iter().enumerate() {
if *byte != 0 {
start_idx = Some(idx);
break;
}
}
if start_idx.is_none() {
defmt::warn!("decoding error, can only process cobs encoded frames, data is all 0");
return;
}
let start_idx = start_idx.unwrap();
match cobs::decode(&received_packet.as_slice()[start_idx..], decode_buf) {
Ok(len) => {
defmt::info!("Decoded packet length: {}", len);
let pus_tc = PusTcReader::new(decode_buf);
match pus_tc {
Ok((tc, _tc_len)) => {
match convert_pus_tc_to_request(
&tc,
cx.local.verif_reporter,
cx.local.src_data_buf,
cx.local.timestamp,
) {
Ok(request_with_token) => {
let started_token = handle_start_verification(
request_with_token.token,
cx.local.verif_reporter,
cx.local.src_data_buf,
cx.local.timestamp,
);
match request_with_token.request {
Request::Ping => {
handle_ping_request(cx.local.timestamp);
let mut decoder = cobs::CobsDecoder::new(cx.local.decode_buf);
loop {
match cx.local.rx.read(cx.local.read_buf).await {
Ok(bytes) => {
defmt::debug!("received {} bytes over UART", bytes);
for byte in cx.local.read_buf[0..bytes].iter() {
match decoder.feed(*byte) {
Ok(None) => (),
Ok(Some(packet_size)) => {
match CcsdsPacketReader::new_with_checksum(
&decoder.dest()[0..packet_size],
) {
Ok(packet) => {
let packet_id = packet.packet_id();
let psc = packet.psc();
let tc_packet_id = CcsdsPacketId { packet_id, psc };
if let Ok(request) =
postcard::from_bytes::<Request>(packet.packet_data())
{
sender
.send(RequestWithTcId {
request,
tc_id: tc_packet_id,
})
.await
.unwrap();
}
}
Request::ChangeBlinkFrequency(new_freq_ms) => {
defmt::info!("Received blink frequency change request with new frequncy {}", new_freq_ms);
cx.shared.blink_freq.lock(|blink_freq| {
*blink_freq =
MillisDurationU32::from_ticks(new_freq_ms);
});
Err(e) => {
defmt::error!("error unpacking ccsds packet: {}", e);
}
}
handle_completion_verification(
started_token,
cx.local.verif_reporter,
cx.local.src_data_buf,
cx.local.timestamp,
);
}
Err(e) => {
// TODO: Error handling: Send verification failure based on request error.
defmt::warn!("request error {}", e);
defmt::error!("cobs decoding error: {}", e);
}
}
}
Err(e) => {
defmt::warn!("Error unpacking PUS TC: {}", e);
}
}
Err(e) => {
defmt::error!("uart read error: {}", e);
}
}
Err(_) => {
defmt::warn!("decoding error, can only process cobs encoded frames")
}
}
}
fn handle_ping_request(timestamp: &[u8]) {
defmt::info!("Received PUS ping telecommand, sending ping reply TM[17,2]");
let sp_header =
SpHeader::new_for_unseg_tc(PUS_APID, SEQ_COUNT_PROVIDER.get_and_increment(), 0);
let sec_header = PusTmSecondaryHeader::new_simple(17, 2, timestamp);
let ping_reply = PusTmCreator::new(sp_header, sec_header, &[], true);
let mut tm_packet = TmPacket::new();
tm_packet
.resize(ping_reply.len_written(), 0)
.expect("vec resize failed");
ping_reply.write_to_bytes(&mut tm_packet).unwrap();
if TM_REQUESTS.enqueue(tm_packet).is_err() {
defmt::warn!("TC queue full");
return;
}
}
fn handle_start_verification(
accepted_token: VerificationToken<TcStateAccepted>,
verif_reporter: &mut VerificationReportCreator,
src_data_buf: &mut [u8],
timestamp: &[u8],
) -> VerificationToken<TcStateStarted> {
let (tm_creator, started_token) = verif_reporter
.start_success(
src_data_buf,
accepted_token,
SEQ_COUNT_PROVIDER.get(),
0,
&timestamp,
)
.unwrap();
let result = send_tm(tm_creator);
if let Err(e) = result {
handle_tm_send_error(e);
}
started_token
}
fn handle_completion_verification(
started_token: VerificationToken<TcStateStarted>,
verif_reporter: &mut VerificationReportCreator,
src_data_buf: &mut [u8],
timestamp: &[u8],
#[task(shared = [blink_freq], local = [seq_count])]
async fn req_handler(
mut cx: req_handler::Context,
mut receiver: Receiver<'static, RequestWithTcId, 16>,
) {
let result = send_tm(
verif_reporter
.completion_success(
src_data_buf,
started_token,
SEQ_COUNT_PROVIDER.get(),
0,
timestamp,
)
.unwrap(),
);
if let Err(e) = result {
handle_tm_send_error(e);
loop {
match receiver.recv().await {
Ok(request_with_tc_id) => {
let tm_send_result = match request_with_tc_id.request {
Request::Ping => handle_ping_request(&mut cx, request_with_tc_id.tc_id),
Request::ChangeBlinkFrequency(duration) => {
handle_change_blink_frequency_request(
&mut cx,
request_with_tc_id.tc_id,
duration,
)
}
};
if let Err(e) = tm_send_result {
defmt::error!("error sending TM response: {}", e);
}
}
Err(_e) => defmt::error!("request receive error"),
}
}
}
#[task(binds = DMA1_CH6, shared = [rx_transfer])]
fn rx_dma_isr(mut cx: rx_dma_isr::Context) {
let mut tc_packet = TcPacket::new();
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_ref = rx_transfer.as_ref().unwrap();
if rx_ref.is_complete() {
let uart_rx_owned = rx_transfer.take().unwrap();
let (buf, c, rx) = uart_rx_owned.stop();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a stack allocaed vector here
// to do this.
tc_packet.resize(buf.len(), 0).expect("vec resize failed");
tc_packet.copy_from_slice(buf);
// Start the next transfer as soon as possible.
*rx_transfer = Some(rx.read_exact(buf, c));
// Send the vector to a regular task.
serial_rx_handler::spawn(tc_packet).expect("spawning rx handler task failed");
// If this happens, there is a high chance that the maximum packet length was
// exceeded. Circular mode is not used here, so data might be missed.
defmt::warn!(
"rx transfer with maximum length {}, might miss data",
TC_BUF_LEN
);
}
});
fn handle_ping_request(
cx: &mut req_handler::Context,
tc_packet_id: CcsdsPacketId,
) -> Result<(), TmSendError> {
defmt::info!("Received PUS ping telecommand, sending ping reply");
send_tm(tc_packet_id, Response::CommandDone, *cx.local.seq_count)?;
*cx.local.seq_count = cx.local.seq_count.wrapping_add(u14::new(1));
Ok(())
}
#[task(binds = USART2_EXTI26, shared = [rx_transfer, tx_shared])]
fn serial_isr(mut cx: serial_isr::Context) {
fn handle_change_blink_frequency_request(
cx: &mut req_handler::Context,
tc_packet_id: CcsdsPacketId,
duration: Duration,
) -> Result<(), TmSendError> {
defmt::info!(
"Received ChangeBlinkFrequency request, new frequency: {} ms",
duration.as_millis()
);
cx.shared
.tx_shared
.lock(|tx_shared| match &mut tx_shared.state {
UartTxState::Idle(_) => (),
UartTxState::Transmitting(transfer) => {
let transfer_ref = transfer.as_ref().unwrap();
if transfer_ref.is_complete() {
let transfer = transfer.take().unwrap();
let (_, dma_channel, mut tx) = transfer.stop();
tx.clear_event(TxEvent::TransmissionComplete);
tx_shared.state = UartTxState::Idle(Some(TxIdle { tx, dma_channel }));
// We cache the last completed time to ensure that there is a minimum delay between consecutive
// transferred packets.
tx_shared.last_completed = Some(Mono::now());
}
}
});
let mut tc_packet = TcPacket::new();
cx.shared.rx_transfer.lock(|rx_transfer| {
let rx_transfer_ref = rx_transfer.as_ref().unwrap();
// Received a partial packet.
if rx_transfer_ref.is_event_triggered(RxEvent::Idle) {
let rx_transfer_owned = rx_transfer.take().unwrap();
let (buf, ch, mut rx, rx_len) = rx_transfer_owned.stop_and_return_received_bytes();
// The received data is transferred to another task now to avoid any processing overhead
// during the interrupt. There are multiple ways to do this, we use a stack
// allocated vector to do this.
tc_packet
.resize(rx_len as usize, 0)
.expect("vec resize failed");
tc_packet[0..rx_len as usize].copy_from_slice(&buf[0..rx_len as usize]);
rx.clear_event(RxEvent::Idle);
serial_rx_handler::spawn(tc_packet).expect("spawning rx handler failed");
*rx_transfer = Some(rx.read_exact(buf, ch));
}
});
.blink_freq
.lock(|blink_freq| *blink_freq = duration);
send_tm(tc_packet_id, Response::CommandDone, *cx.local.seq_count)?;
*cx.local.seq_count = cx.local.seq_count.wrapping_add(u14::new(1));
Ok(())
}
}
fn send_tm(
tc_packet_id: CcsdsPacketId,
response: Response,
current_seq_count: u14,
) -> Result<(), TmSendError> {
let sp_header = SpHeader::new_for_unseg_tc(PUS_APID, current_seq_count, 0);
let tm_header = satrs_stm32f3_disco_rtic::TmHeader {
tc_packet_id: Some(tc_packet_id),
uptime_millis: Mono::now().duration_since_epoch().to_millis(),
};
let mut tm_packet = TmPacket::new();
let tm_size = tm_size(&tm_header, &response);
tm_packet.resize(tm_size, 0).expect("vec resize failed");
create_tm_packet(&mut tm_packet, sp_header, tm_header, response)?;
if TM_QUEUE.enqueue(tm_packet).is_err() {
defmt::warn!("TC queue full");
return Err(TmSendError::Queue);
}
Ok(())
}
// same panicking *behavior* as `panic-probe` but doesn't print a panic message
// this prevents the panic message being printed *twice* when `defmt::panic` is invoked
#[defmt::panic_handler]
fn panic() -> ! {
cortex_m::asm::udf()
}
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with status code 0.
pub fn exit() -> ! {
loop {
debug::exit(EXIT_SUCCESS);
}
}
/// Hardfault handler.
///
/// Terminates the application and makes a semihosting-capable debug tool exit
/// with an error. This seems better than the default, which is to spin in a
/// loop.
#[cortex_m_rt::exception]
unsafe fn HardFault(_frame: &cortex_m_rt::ExceptionFrame) -> ! {
loop {
debug::exit(EXIT_FAILURE);
}
}
// defmt-test 0.3.0 has the limitation that this `#[tests]` attribute can only be used
// once within a crate. the module can be in any file but there can only be at most
// one `#[tests]` module in this library crate
#[cfg(test)]
#[defmt_test::tests]
mod unit_tests {
use defmt::assert;
#[test]
fn it_works() {
assert!(true)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -16,16 +16,17 @@ harness = false
[dependencies]
cortex-m = { version = "0.7", features = ["critical-section-single-core"] }
cortex-m-rt = "0.7"
defmt = "0.3"
defmt = "1"
defmt-brtt = { version = "0.1", default-features = false, features = ["rtt"] }
panic-probe = { version = "0.3", features = ["print-defmt"] }
panic-probe = { version = "1", features = ["print-defmt"] }
cortex-m-semihosting = "0.5.0"
# TODO: Replace with embassy-hal.
stm32h7xx-hal = { version="0.16", features= ["stm32h743v", "ethernet"] }
embedded-alloc = "0.6"
rtic-sync = { version = "1", features = ["defmt-03"] }
[dependencies.smoltcp]
version = "0.11"
version = "0.12"
default-features = false
features = ["medium-ethernet", "proto-ipv4", "socket-raw", "socket-dhcpv4", "socket-udp", "defmt"]
@@ -39,12 +40,11 @@ features = ["cortex-m-systick"]
[dependencies.satrs]
path = "../../satrs"
version = "0.2"
default-features = false
features = ["defmt", "heapless"]
[dev-dependencies]
defmt-test = "0.3"
defmt-test = "0.4"
# cargo build/run
[profile.dev]

View File

@@ -0,0 +1,8 @@
[package]
name = "satrs-gen"
version = "0.1.0"
edition = "2024"
[dependencies]
toml = "0.8"
heck = "0.5"

View File

@@ -0,0 +1,34 @@
[apid]
Sched = 1
GenericPus = 2
Acs = 3
Cfdp = 4
Tmtc = 5
Eps = 6
[ids]
[ids.Eps]
Pcdu = 0
Subsystem = 1
[ids.Tmtc]
UdpServer = 0
TcpServer = 1
[ids.GenericPus]
PusEventManagement = 0
PusRouting = 1
PusTest = 2
PusAction = 3
PusMode = 4
PusHk = 5
[ids.Sched]
PusSched = 0
[ids.Acs]
Subsystem = 1
Assembly = 2
Mgm0 = 3
Mgm1 = 4

View File

@@ -0,0 +1,91 @@
use heck::{ToShoutySnakeCase, ToSnakeCase};
use std::{
collections::BTreeMap,
fs::{self, File},
io::{self, Write},
};
use toml::{Value, map::Map};
fn main() -> io::Result<()> {
// Read the configuration file
let config_str = fs::read_to_string("components.toml").expect("Unable to read file");
let config: Value = toml::from_str(&config_str).expect("Unable to parse TOML");
let mut output = File::create("../satrs-example/src/ids.rs")?;
generate_rust_code(&config, &mut output);
Ok(())
}
fn sort_enum_table(table_map: &Map<String, Value>) -> BTreeMap<u64, &str> {
// Collect entries into a BTreeMap to sort them by key
let mut sorted_entries: BTreeMap<u64, &str> = BTreeMap::new();
for (key, value) in table_map {
if let Some(value) = value.as_integer() {
if !(0..=0x7FF).contains(&value) {
panic!("Invalid APID value: {}", value);
}
sorted_entries.insert(value as u64, key);
}
}
sorted_entries
}
fn generate_rust_code(config: &Value, writer: &mut impl Write) {
writeln!(
writer,
"//! This is an auto-generated configuration module."
)
.unwrap();
writeln!(writer, "use satrs::request::UniqueApidTargetId;").unwrap();
writeln!(writer).unwrap();
// Generate the main module
writeln!(
writer,
"#[derive(Debug, Copy, Clone, PartialEq, Eq, strum::EnumIter)]"
)
.unwrap();
writeln!(writer, "pub enum Apid {{").unwrap();
// Generate constants for the main module
if let Some(apid_table) = config.get("apid").and_then(Value::as_table) {
let sorted_entries = sort_enum_table(apid_table);
// Write the sorted entries to the writer
for (value, key) in sorted_entries {
writeln!(writer, " {} = {},", key, value).unwrap();
}
}
writeln!(writer, "}}").unwrap();
// Generate ID tables.
if let Some(id_tables) = config.get("ids").and_then(Value::as_table) {
for (mod_name, table) in id_tables {
let mod_name_as_snake = mod_name.to_snake_case();
writeln!(writer).unwrap();
writeln!(writer, "pub mod {} {{", mod_name_as_snake).unwrap();
let sorted_entries = sort_enum_table(table.as_table().unwrap());
writeln!(writer, " #[derive(Debug, Copy, Clone, PartialEq, Eq)]").unwrap();
writeln!(writer, " pub enum Id {{").unwrap();
// Write the sorted entries to the writer
for (value, key) in &sorted_entries {
writeln!(writer, " {} = {},", key, value).unwrap();
}
writeln!(writer, " }}").unwrap();
writeln!(writer).unwrap();
for (_value, key) in sorted_entries {
let key_shouting = key.to_shouty_snake_case();
writeln!(
writer,
" pub const {}: super::UniqueApidTargetId = super::UniqueApidTargetId::new(super::Apid::{} as u16, Id::{} as u32);",
key_shouting, mod_name, key
).unwrap();
}
writeln!(writer, "}}").unwrap();
}
}
}

29
justfile Normal file
View File

@@ -0,0 +1,29 @@
all: check build embedded test check-fmt clippy docs
check:
cargo check
cargo check -p satrs-example --no-default-features
build:
cargo build
test:
cargo nextest run --all-features
cargo test --doc --all-features
embedded:
cargo check -p satrs --target=thumbv7em-none-eabihf --no-default-features
fmt:
cargo fmt --all
check-fmt:
cargo fmt --all -- --check
clippy:
cargo clippy -- -D warnings
docs-satrs:
RUSTDOCFLAGS="--cfg docsrs --generate-link-to-definition -Z unstable-options" cargo +nightly doc -p satrs --all-features
docs: docs-satrs

View File

@@ -3,20 +3,20 @@
Software for space systems oftentimes has different requirements than the software for host
systems or servers. Currently, most space systems are considered embedded systems.
For these systems, the computation power and the available heap are the most important resources
which are constrained. This might make completeley heap based memory management schemes which
For these systems, the computation power and the available heap are important resources
which are also constrained. This might make completeley heap based memory management schemes which
are oftentimes used on host and server based systems unfeasable. Still, completely forbidding
heap allocations might make software development unnecessarilly difficult, especially in a
time where the OBSW might be running on Linux based systems with hundreds of MBs of RAM.
A useful pattern used commonly in space systems is to limit heap allocations to program
A useful pattern commonly used in space systems is to limit heap allocations to program
initialization time and avoid frequent run-time allocations. This prevents issues like
running out of memory (something even Rust can not protect from) or heap fragmentation on systems
without a MMU.
# Using pre-allocated pool structures
A huge candidate for heap allocations is the TMTC and handling. TC, TMs and IPC data are all
A candidate for heap allocations is the TMTC and handling. TC, TMs and IPC data are all
candidates where the data size might vary greatly. The regular solution for host systems
might be to send around this data as a `Vec<u8>` until it is dropped. `sat-rs` provides
another solution to avoid run-time allocations by offering pre-allocated static

View File

@@ -18,26 +18,21 @@ csv = "1"
num_enum = "0.7"
thiserror = "2"
lazy_static = "1"
strum = { version = "0.26", features = ["derive"] }
strum = { version = "0.27", features = ["derive"] }
derive-new = "0.7"
cfg-if = "1"
arbitrary-int = "2"
bitbybit = "1.4"
serde = { version = "1", features = ["derive"] }
serde_json = "1"
[dependencies.satrs]
path = "../satrs"
features = ["test_util"]
[dependencies.satrs-minisim]
path = "../satrs-minisim"
[dependencies.satrs-mib]
version = "0.1.1"
path = "../satrs-mib"
satrs = { path = "../satrs", features = ["test_util"] }
satrs-minisim = { path = "../satrs-minisim" }
satrs-mib = { path = "../satrs-mib" }
[features]
heap_tmtc = []
default = ["heap_tmtc"]
heap_tmtc = []
[dev-dependencies]
env_logger = "0.11"

View File

@@ -2,7 +2,7 @@ use derive_new::new;
use satrs::hk::{HkRequest, HkRequestVariant};
use satrs::mode_tree::{ModeChild, ModeNode};
use satrs::power::{PowerSwitchInfo, PowerSwitcherCommandSender};
use satrs_example::config::pus::PUS_MODE_SERVICE;
use satrs_example::ids::generic_pus::PUS_MODE;
use satrs_example::{DeviceMode, TimestampHelper};
use satrs_minisim::acs::lis3mdl::{
MgmLis3MdlReply, MgmLis3RawValues, FIELD_LSB_PER_GAUSS_4_SENS, GAUSS_TO_MICROTESLA_FACTOR,
@@ -83,7 +83,7 @@ impl SpiInterface for SpiSimInterface {
.sim_request_tx
.send(SimRequest::new_with_epoch_time(mgm_sensor_request))
{
log::error!("failed to send MGM LIS3 request: {}", e);
log::error!("failed to send MGM LIS3 request: {e}");
}
match self.sim_reply_rx.recv_timeout(Duration::from_millis(50)) {
Ok(sim_reply) => {
@@ -97,7 +97,7 @@ impl SpiInterface for SpiSimInterface {
.copy_from_slice(&sim_reply_lis3.raw.z.to_le_bytes());
}
Err(e) => {
log::warn!("MGM LIS3 SIM reply timeout: {}", e);
log::warn!("MGM LIS3 SIM reply timeout: {e}");
}
}
Ok(())
@@ -417,7 +417,7 @@ impl<
if requestor.sender_id() == NO_SENDER {
return Ok(());
}
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {:x}",
requestor.sender_id()
@@ -434,7 +434,7 @@ impl<
requestor: MessageMetadata,
reply: ModeReply,
) -> Result<(), Self::Error> {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
@@ -484,6 +484,7 @@ mod tests {
sync::{mpsc, Arc},
};
use arbitrary_int::u21;
use satrs::{
mode::{ModeReply, ModeRequest},
mode_tree::ModeParent,
@@ -492,7 +493,7 @@ mod tests {
tmtc::PacketAsVec,
ComponentId,
};
use satrs_example::config::{acs::MGM_ASSEMBLY, components::Apid};
use satrs_example::ids::{acs::ASSEMBLY, Apid};
use satrs_minisim::acs::lis3mdl::MgmLis3RawValues;
use crate::{eps::TestSwitchHelper, pus::hk::HkReply, requests::CompositeRequest};
@@ -538,7 +539,7 @@ mod tests {
impl ModeNode for MgmAssemblyMock {
fn id(&self) -> satrs::ComponentId {
PUS_MODE_SERVICE.into()
PUS_MODE.into()
}
}
@@ -557,7 +558,7 @@ mod tests {
impl ModeNode for PusMock {
fn id(&self) -> satrs::ComponentId {
PUS_MODE_SERVICE.into()
PUS_MODE.into()
}
}
@@ -574,7 +575,7 @@ mod tests {
let (request_tx, request_rx) = mpsc::sync_channel(5);
let (reply_tx_to_pus, reply_rx_to_pus) = mpsc::sync_channel(5);
let (reply_tx_to_parent, reply_rx_to_parent) = mpsc::sync_channel(5);
let id = UniqueApidTargetId::new(Apid::Acs as u16, 1);
let id = UniqueApidTargetId::new(Apid::Acs.raw_value(), u21::new(1));
let mode_node = ModeRequestHandlerMpscBounded::new(id.into(), request_rx);
let (composite_request_tx, composite_request_rx) = mpsc::channel();
let (hk_reply_tx, hk_reply_rx) = mpsc::sync_channel(10);
@@ -592,8 +593,8 @@ mod tests {
TestSpiInterface::default(),
shared_mgm_set,
);
handler.add_mode_parent(PUS_MODE_SERVICE.into(), reply_tx_to_pus);
handler.add_mode_parent(MGM_ASSEMBLY.into(), reply_tx_to_parent);
handler.add_mode_parent(PUS_MODE.into(), reply_tx_to_pus);
handler.add_mode_parent(ASSEMBLY.into(), reply_tx_to_parent);
Self {
mode_request_tx: request_tx,
mode_reply_rx_to_pus: reply_rx_to_pus,
@@ -631,7 +632,7 @@ mod tests {
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,
@@ -692,7 +693,7 @@ mod tests {
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,

View File

@@ -1,10 +1,9 @@
use arbitrary_int::u11;
use satrs::pus::verification::RequestId;
use satrs::spacepackets::ecss::tc::PusTcCreator;
use satrs::spacepackets::ecss::tm::PusTmReader;
use satrs::{
spacepackets::ecss::{PusPacket, WritablePusPacket},
spacepackets::SpHeader,
};
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs::spacepackets::SpHeader;
use satrs_example::config::{OBSW_SERVER_ADDR, SERVER_PORT};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::Duration;
@@ -12,7 +11,12 @@ use std::time::Duration;
fn main() {
let mut buf = [0; 32];
let addr = SocketAddr::new(IpAddr::V4(OBSW_SERVER_ADDR), SERVER_PORT);
let pus_tc = PusTcCreator::new_simple(SpHeader::new_from_apid(0x02), 17, 1, &[], true);
let pus_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(u11::new(0x02)),
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let client = UdpSocket::bind("127.0.0.1:7302").expect("Connecting to UDP server failed");
let tc_req_id = RequestId::new(&pus_tc);
println!("Packing and sending PUS ping command TC[17,1] with request ID {tc_req_id}");
@@ -29,10 +33,10 @@ fn main() {
let res = client.recv(&mut buf);
match res {
Ok(_len) => {
let (pus_tm, size) = PusTmReader::new(&buf, 7).expect("Parsing PUS TM failed");
if pus_tm.service() == 17 && pus_tm.subservice() == 2 {
let pus_tm = PusTmReader::new(&buf, 7).expect("Parsing PUS TM failed");
if pus_tm.service_type_id() == 17 && pus_tm.message_subtype_id() == 2 {
println!("Received PUS Ping Reply TM[17,2]")
} else if pus_tm.service() == 1 {
} else if pus_tm.service_type_id() == 1 {
if pus_tm.source_data().is_empty() {
println!("Invalid verification TM, no source data");
}
@@ -41,28 +45,29 @@ fn main() {
println!("Invalid verification TM source data, less than 4 bytes")
}
let req_id = RequestId::from_bytes(src_data).unwrap();
if pus_tm.subservice() == 1 {
let subtype_id = pus_tm.message_subtype_id();
if subtype_id == 1 {
println!("Received TM[1,1] acceptance success for request ID {req_id}")
} else if pus_tm.subservice() == 2 {
} else if subtype_id == 2 {
println!("Received TM[1,2] acceptance failure for request ID {req_id}")
} else if pus_tm.subservice() == 3 {
} else if subtype_id == 3 {
println!("Received TM[1,3] start success for request ID {req_id}")
} else if pus_tm.subservice() == 4 {
} else if subtype_id == 4 {
println!("Received TM[1,2] start failure for request ID {req_id}")
} else if pus_tm.subservice() == 5 {
} else if subtype_id == 5 {
println!("Received TM[1,5] step success for request ID {req_id}")
} else if pus_tm.subservice() == 6 {
} else if subtype_id == 6 {
println!("Received TM[1,6] step failure for request ID {req_id}")
} else if pus_tm.subservice() == 7 {
} else if subtype_id == 7 {
println!("Received TM[1,7] completion success for request ID {req_id}")
} else if pus_tm.subservice() == 8 {
} else if subtype_id == 8 {
println!("Received TM[1,8] completion failure for request ID {req_id}");
}
} else {
println!(
"Received TM[{}, {}] with {} bytes",
pus_tm.service(),
pus_tm.subservice(),
pus_tm.service_type_id(),
pus_tm.message_subtype_id(),
size
);
}

View File

@@ -1,3 +1,4 @@
use arbitrary_int::u11;
use lazy_static::lazy_static;
use satrs::{
res_code::ResultU16,
@@ -10,7 +11,7 @@ use strum::IntoEnumIterator;
use num_enum::{IntoPrimitive, TryFromPrimitive};
use satrs::{
events::{EventU32TypedSev, SeverityInfo},
events_legacy::{EventU32TypedSev, SeverityInfo},
pool::{StaticMemoryPool, StaticPoolConfig},
};
@@ -43,14 +44,14 @@ pub const TEST_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<Severi
lazy_static! {
pub static ref PACKET_ID_VALIDATOR: HashSet<PacketId> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, id as u16));
for id in crate::ids::Apid::iter() {
set.insert(PacketId::new(PacketType::Tc, true, u11::new(id as u16)));
}
set
};
pub static ref APID_VALIDATOR: HashSet<u16> = {
let mut set = HashSet::new();
for id in components::Apid::iter() {
for id in crate::ids::Apid::iter() {
set.insert(id as u16);
}
set
@@ -122,92 +123,11 @@ pub mod mode_err {
}
pub mod components {
use satrs::{request::UniqueApidTargetId, ComponentId};
use strum::EnumIter;
use satrs::ComponentId;
#[derive(Copy, Clone, PartialEq, Eq, EnumIter)]
pub enum Apid {
Sched = 1,
GenericPus = 2,
Acs = 3,
Cfdp = 4,
Tmtc = 5,
Eps = 6,
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum EpsId {
Pcdu = 0,
Subsystem = 1,
}
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum TmtcId {
UdpServer = 0,
TcpServer = 1,
}
pub const EPS_SUBSYSTEM: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Eps as u16, EpsId::Subsystem as u32);
pub const PCDU_HANDLER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Eps as u16, EpsId::Pcdu as u32);
pub const UDP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::UdpServer as u32);
pub const TCP_SERVER: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Tmtc as u16, TmtcId::TcpServer as u32);
pub const NO_SENDER: ComponentId = ComponentId::MAX;
}
pub mod pus {
use super::components::Apid;
use satrs::request::UniqueApidTargetId;
// Component IDs for components with the PUS APID.
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum PusId {
PusEventManagement = 0,
PusRouting = 1,
PusTest = 2,
PusAction = 3,
PusMode = 4,
PusHk = 5,
}
pub const PUS_ACTION_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusAction as u32);
pub const PUS_EVENT_MANAGEMENT: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, 0);
pub const PUS_ROUTING_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusRouting as u32);
pub const PUS_TEST_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusTest as u32);
pub const PUS_MODE_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusMode as u32);
pub const PUS_HK_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::GenericPus as u16, PusId::PusHk as u32);
pub const PUS_SCHED_SERVICE: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Sched as u16, 0);
}
pub mod acs {
use super::components::Apid;
use satrs::request::UniqueApidTargetId;
#[derive(Copy, Clone, PartialEq, Eq)]
pub enum AcsId {
Subsystem = 1,
Assembly = 2,
Mgm0 = 3,
Mgm1 = 4,
}
pub const MGM_ASSEMBLY: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Assembly as u32);
pub const MGM_HANDLER_0: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
pub const MGM_HANDLER_1: UniqueApidTargetId =
UniqueApidTargetId::new(Apid::Acs as u16, AcsId::Mgm0 as u32);
}
pub mod pool {
use super::*;
pub fn create_static_pools() -> (StaticMemoryPool, StaticMemoryPool) {

View File

@@ -20,10 +20,8 @@ use satrs::{
spacepackets::ByteConversionError,
};
use satrs_example::{
config::{
components::{NO_SENDER, PCDU_HANDLER},
pus::PUS_MODE_SERVICE,
},
config::components::NO_SENDER,
ids::{eps::PCDU, generic_pus::PUS_MODE},
DeviceMode, TimestampHelper,
};
use satrs_minisim::{
@@ -403,7 +401,7 @@ impl<ComInterface: SerialInterface> PcduHandler<ComInterface> {
}
}
}) {
log::warn!("receiving PCDU replies failed: {:?}", e);
log::warn!("receiving PCDU replies failed: {e:?}");
}
}
}
@@ -452,7 +450,7 @@ impl<ComInterface: SerialInterface> ModeRequestHandler for PcduHandler<ComInterf
if requestor.sender_id() == NO_SENDER {
return Ok(());
}
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
@@ -469,7 +467,7 @@ impl<ComInterface: SerialInterface> ModeRequestHandler for PcduHandler<ComInterf
requestor: MessageMetadata,
reply: ModeReply,
) -> Result<(), Self::Error> {
if requestor.sender_id() != PUS_MODE_SERVICE.id() {
if requestor.sender_id() != PUS_MODE.id() {
log::warn!(
"can not send back mode reply to sender {}",
requestor.sender_id()
@@ -492,7 +490,7 @@ impl<ComInterface: SerialInterface> ModeRequestHandler for PcduHandler<ComInterf
impl<ComInterface: SerialInterface> ModeNode for PcduHandler<ComInterface> {
fn id(&self) -> satrs::ComponentId {
PCDU_HANDLER.into()
PCDU.into()
}
}
@@ -508,13 +506,11 @@ impl<ComInterface: SerialInterface> ModeChild for PcduHandler<ComInterface> {
mod tests {
use std::sync::mpsc;
use arbitrary_int::u21;
use satrs::{
mode::ModeRequest, power::SwitchStateBinary, request::GenericMessage, tmtc::PacketAsVec,
};
use satrs_example::config::{
acs::MGM_HANDLER_0,
components::{Apid, EPS_SUBSYSTEM, PCDU_HANDLER},
};
use satrs_example::ids::{self, Apid};
use satrs_minisim::eps::SwitchMapBinary;
use super::*;
@@ -570,15 +566,14 @@ mod tests {
let (mode_request_tx, mode_request_rx) = mpsc::sync_channel(5);
let (mode_reply_tx_to_pus, mode_reply_rx_to_pus) = mpsc::sync_channel(5);
let (mode_reply_tx_to_parent, mode_reply_rx_to_parent) = mpsc::sync_channel(5);
let mode_node =
ModeRequestHandlerMpscBounded::new(PCDU_HANDLER.into(), mode_request_rx);
let mode_node = ModeRequestHandlerMpscBounded::new(PCDU.into(), mode_request_rx);
let (composite_request_tx, composite_request_rx) = mpsc::channel();
let (hk_reply_tx, hk_reply_rx) = mpsc::sync_channel(10);
let (tm_tx, tm_rx) = mpsc::sync_channel::<PacketAsVec>(5);
let (switch_request_tx, switch_reqest_rx) = mpsc::channel();
let shared_switch_map = Arc::new(Mutex::new(SwitchSet::default()));
let mut handler = PcduHandler::new(
UniqueApidTargetId::new(Apid::Eps as u16, 0),
UniqueApidTargetId::new(Apid::Eps.raw_value(), u21::new(0)),
"TEST_PCDU",
mode_node,
composite_request_rx,
@@ -588,8 +583,8 @@ mod tests {
SerialInterfaceTest::default(),
shared_switch_map,
);
handler.add_mode_parent(EPS_SUBSYSTEM.into(), mode_reply_tx_to_parent);
handler.add_mode_parent(PUS_MODE_SERVICE.into(), mode_reply_tx_to_pus);
handler.add_mode_parent(ids::eps::SUBSYSTEM.into(), mode_reply_tx_to_parent);
handler.add_mode_parent(PUS_MODE.into(), mode_reply_tx_to_pus);
Self {
mode_request_tx,
mode_reply_rx_to_pus,
@@ -684,7 +679,7 @@ mod tests {
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,
@@ -719,7 +714,7 @@ mod tests {
testbench
.mode_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, PUS_MODE_SERVICE.id()),
MessageMetadata::new(0, PUS_MODE.id()),
ModeRequest::SetMode {
mode_and_submode: ModeAndSubmode::new(DeviceMode::Normal as u32, 0),
forced: false,
@@ -729,7 +724,7 @@ mod tests {
testbench
.switch_request_tx
.send(GenericMessage::new(
MessageMetadata::new(0, MGM_HANDLER_0.id()),
MessageMetadata::new(0, ids::acs::MGM0.id()),
SwitchRequest::new(0, SwitchStateBinary::On),
))
.expect("failed to send switch request");

View File

@@ -1,13 +1,15 @@
use std::sync::mpsc::{self};
use crate::pus::create_verification_reporter;
use satrs::event_man::{EventMessageU32, EventRoutingError};
use satrs::pus::event::EventTmHookProvider;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u11;
use satrs::event_man_legacy::{EventMessageU32, EventRoutingError};
use satrs::pus::event::EventTmHook;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::EcssTmSender;
use satrs::request::UniqueApidTargetId;
use satrs::{
event_man::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
event_man_legacy::{EventManagerWithBoundedMpsc, EventSendProvider, EventU32SenderMpscBounded},
pus::{
event_man::{
DefaultPusEventU32TmCreator, EventReporter, EventRequest, EventRequestWithToken,
@@ -16,17 +18,17 @@ use satrs::{
},
spacepackets::time::cds::CdsTime,
};
use satrs_example::config::pus::PUS_EVENT_MANAGEMENT;
use satrs_example::ids::generic_pus::PUS_EVENT_MANAGEMENT;
use crate::update_time;
// This helper sets the APID of the event sender for the PUS telemetry.
#[derive(Default)]
pub struct EventApidSetter {
pub next_apid: u16,
pub next_apid: u11,
}
impl EventTmHookProvider for EventApidSetter {
impl EventTmHook for EventApidSetter {
fn modify_tm(&self, tm: &mut satrs::spacepackets::ecss::tm::PusTmCreator) {
tm.set_apid(self.next_apid);
}
@@ -59,12 +61,11 @@ impl<TmSender: EcssTmSender> PusEventHandler<TmSender> {
// telemetry for each event.
let event_reporter = EventReporter::new_with_hook(
PUS_EVENT_MANAGEMENT.raw(),
0,
u11::ZERO,
0,
128,
EventApidSetter::default(),
)
.unwrap();
);
let pus_event_dispatcher =
DefaultPusEventU32TmCreator::new_with_default_backend(event_reporter);
let pus_event_man_send_provider = EventU32SenderMpscBounded::new(
@@ -217,20 +218,18 @@ impl<TmSender: EcssTmSender> EventHandler<TmSender> {
#[cfg(test)]
mod tests {
use arbitrary_int::u21;
use satrs::{
events::EventU32,
pus::verification::VerificationReporterCfg,
spacepackets::{
ecss::{tm::PusTmReader, PusPacket},
CcsdsPacket,
},
events_legacy::EventU32,
pus::verification::VerificationReporterConfig,
spacepackets::ecss::{tm::PusTmReader, PusPacket},
tmtc::PacketAsVec,
};
use super::*;
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(1, 2);
const TEST_EVENT: EventU32 = EventU32::new(satrs::events::Severity::Info, 1, 1);
const TEST_CREATOR_ID: UniqueApidTargetId = UniqueApidTargetId::new(u11::new(1), u21::new(2));
const TEST_EVENT: EventU32 = EventU32::new(satrs::events_legacy::Severity::Info, 1, 1);
pub struct EventManagementTestbench {
pub event_tx: mpsc::SyncSender<EventMessageU32>,
@@ -244,7 +243,7 @@ mod tests {
let (event_tx, event_rx) = mpsc::sync_channel(10);
let (_event_req_tx, event_req_rx) = mpsc::sync_channel(10);
let (tm_sender, tm_receiver) = mpsc::channel();
let verif_reporter_cfg = VerificationReporterCfg::new(0x05, 2, 2, 128).unwrap();
let verif_reporter_cfg = VerificationReporterConfig::new(u11::new(0x05), 2, 2, 128);
let verif_reporter =
VerificationReporter::new(PUS_EVENT_MANAGEMENT.id(), &verif_reporter_cfg);
let mut event_manager = EventManagerWithBoundedMpsc::new(event_rx);
@@ -270,7 +269,7 @@ mod tests {
.event_tx
.send(EventMessageU32::new(
TEST_CREATOR_ID.id(),
EventU32::new(satrs::events::Severity::Info, 1, 1),
EventU32::new(satrs::events_legacy::Severity::Info, 1, 1),
))
.expect("failed to send event");
testbench.pus_event_handler.handle_event_requests();
@@ -281,9 +280,7 @@ mod tests {
.try_recv()
.expect("failed to receive TM packet");
assert_eq!(tm_packet.sender_id, PUS_EVENT_MANAGEMENT.id());
let tm_reader = PusTmReader::new(&tm_packet.packet, 7)
.expect("failed to create TM reader")
.0;
let tm_reader = PusTmReader::new(&tm_packet.packet, 7).expect("failed to create TM reader");
assert_eq!(tm_reader.apid(), TEST_CREATOR_ID.apid);
assert_eq!(tm_reader.user_data().len(), 4);
let event_read_back = EventU32::from_be_bytes(tm_reader.user_data().try_into().unwrap());

View File

@@ -1,8 +1,9 @@
use arbitrary_int::traits::Integer as _;
use derive_new::new;
use satrs::hk::UniqueId;
use satrs::request::UniqueApidTargetId;
use satrs::spacepackets::ecss::hk;
use satrs::spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use satrs::spacepackets::ecss::{hk, CreatorConfig, MessageTypeId};
use satrs::spacepackets::{ByteConversionError, SpHeader};
#[derive(Debug, new, Copy, Clone)]
@@ -29,7 +30,7 @@ impl HkUniqueId {
expected: 8,
});
}
buf[0..4].copy_from_slice(&self.target_id.unique_id.to_be_bytes());
buf[0..4].copy_from_slice(&self.target_id.unique_id.as_u32().to_be_bytes());
buf[4..8].copy_from_slice(&self.set_id.to_be_bytes());
Ok(8)
@@ -53,9 +54,13 @@ impl PusHkHelper {
hk_data_writer: &mut HkWriter,
buf: &'b mut [u8],
) -> Result<PusTmCreator<'a, 'b>, ByteConversionError> {
let sec_header =
PusTmSecondaryHeader::new(3, hk::Subservice::TmHkPacket as u8, 0, 0, timestamp);
buf[0..4].copy_from_slice(&self.component_id.unique_id.to_be_bytes());
let sec_header = PusTmSecondaryHeader::new(
MessageTypeId::new(3, hk::MessageSubtypeId::TmHkPacket as u8),
0,
0,
timestamp,
);
buf[0..4].copy_from_slice(&self.component_id.unique_id.as_u32().to_be_bytes());
buf[4..8].copy_from_slice(&set_id.to_be_bytes());
let (_, second_half) = buf.split_at_mut(8);
let hk_data_len = hk_data_writer(second_half)?;
@@ -63,7 +68,7 @@ impl PusHkHelper {
SpHeader::new_from_apid(self.component_id.apid),
sec_header,
&buf[0..8 + hk_data_len],
true,
CreatorConfig::default(),
))
}
}

109
satrs-example/src/ids.rs Normal file
View File

@@ -0,0 +1,109 @@
//! This is an auto-generated configuration module.
use satrs::request::UniqueApidTargetId;
#[derive(Debug, PartialEq, Eq, strum::EnumIter)]
#[bitbybit::bitenum(u11)]
pub enum Apid {
Sched = 1,
GenericPus = 2,
Acs = 3,
Cfdp = 4,
Tmtc = 5,
Eps = 6,
}
pub mod acs {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
Subsystem = 1,
Assembly = 2,
Mgm0 = 3,
Mgm1 = 4,
}
pub const SUBSYSTEM: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Subsystem.raw_value());
pub const ASSEMBLY: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Assembly.raw_value());
pub const MGM0: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Mgm0.raw_value());
pub const MGM1: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Acs.raw_value(), Id::Mgm1.raw_value());
}
pub mod eps {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
Pcdu = 0,
Subsystem = 1,
}
pub const PCDU: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Eps.raw_value(), Id::Pcdu.raw_value());
pub const SUBSYSTEM: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Eps.raw_value(), Id::Subsystem.raw_value());
}
pub mod generic_pus {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
PusEventManagement = 0,
PusRouting = 1,
PusTest = 2,
PusAction = 3,
PusMode = 4,
PusHk = 5,
}
pub const PUS_EVENT_MANAGEMENT: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusEventManagement.raw_value(),
);
pub const PUS_ROUTING: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusRouting.raw_value(),
);
pub const PUS_TEST: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusTest.raw_value(),
);
pub const PUS_ACTION: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusAction.raw_value(),
);
pub const PUS_MODE: super::UniqueApidTargetId = super::UniqueApidTargetId::new(
super::Apid::GenericPus.raw_value(),
Id::PusMode.raw_value(),
);
pub const PUS_HK: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::GenericPus.raw_value(), Id::PusHk.raw_value());
}
pub mod sched {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
PusSched = 0,
}
pub const PUS_SCHED: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Sched.raw_value(), Id::PusSched.raw_value());
}
pub mod tmtc {
#[derive(Debug, PartialEq, Eq)]
#[bitbybit::bitenum(u21, exhaustive = false)]
pub enum Id {
UdpServer = 0,
TcpServer = 1,
}
pub const UDP_SERVER: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Tmtc.raw_value(), Id::UdpServer.raw_value());
pub const TCP_SERVER: super::UniqueApidTargetId =
super::UniqueApidTargetId::new(super::Apid::Tmtc.raw_value(), Id::TcpServer.raw_value());
}

View File

@@ -24,7 +24,7 @@ pub fn create_sim_client(sim_request_rx: mpsc::Receiver<SimRequest>) -> Option<S
return Some(sim_client);
}
Err(e) => {
log::warn!("sim client creation error: {}", e);
log::warn!("sim client creation error: {e}");
}
}
None
@@ -116,7 +116,7 @@ impl SimClientUdp {
.udp_client
.send_to(request_json.as_bytes(), self.simulator_addr)
{
log::error!("error sending data to UDP SIM server: {}", e);
log::error!("error sending data to UDP SIM server: {e}");
break;
} else {
no_sim_requests_handled = false;
@@ -151,7 +151,7 @@ impl SimClientUdp {
}
}
Err(e) => {
log::warn!("failed to deserialize SIM reply: {}", e);
log::warn!("failed to deserialize SIM reply: {e}");
}
}
}
@@ -161,7 +161,7 @@ impl SimClientUdp {
{
break;
}
log::error!("error receiving data from UDP SIM server: {}", e);
log::error!("error receiving data from UDP SIM server: {e}");
break;
}
}

View File

@@ -5,7 +5,7 @@ use std::{
};
use log::{info, warn};
use satrs::tmtc::StoreAndSendError;
use satrs::hal::std::tcp_spacepackets_server::CcsdsPacketParser;
use satrs::{
encoding::ccsds::{SpValidity, SpacePacketValidator},
hal::std::tcp_server::{HandledConnectionHandler, ServerConfig, TcpSpacepacketsServer},
@@ -31,7 +31,7 @@ impl SpacePacketValidator for SimplePacketValidator {
if self.valid_ids.contains(&sp_header.packet_id()) {
return SpValidity::Valid;
}
log::warn!("ignoring space packet with header {:?}", sp_header);
log::warn!("ignoring space packet with header {sp_header:?}");
// We could perform a CRC check.. but lets keep this simple and assume that TCP ensures
// data integrity.
SpValidity::Skip
@@ -103,16 +103,14 @@ impl PacketSource for SyncTcpTmSource {
}
}
pub type TcpServer<ReceivesTc, SendError> = TcpSpacepacketsServer<
pub type TcpServer<ReceivesTc> = TcpSpacepacketsServer<
SyncTcpTmSource,
ReceivesTc,
SimplePacketValidator,
ConnectionFinishedHandler,
(),
SendError,
>;
pub struct TcpTask(pub TcpServer<TmTcSender, StoreAndSendError>);
pub struct TcpTask(pub TcpServer<TmTcSender>);
impl TcpTask {
pub fn new(
@@ -124,8 +122,7 @@ impl TcpTask {
Ok(Self(TcpSpacepacketsServer::new(
cfg,
tm_source,
tc_sender,
SimplePacketValidator { valid_ids },
CcsdsPacketParser::new(cfg.id, 2048, tc_sender, SimplePacketValidator { valid_ids }),
ConnectionFinishedHandler::default(),
None,
)?))

View File

@@ -1,13 +1,15 @@
#![allow(dead_code)]
use std::net::{SocketAddr, UdpSocket};
use std::sync::mpsc;
use log::{info, warn};
use satrs::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use satrs::pus::HandlingStatus;
use satrs::tmtc::{PacketAsVec, PacketInPool, StoreAndSendError};
use satrs::{
hal::std::udp_server::{ReceiveResult, UdpTcServer},
pool::{PoolProviderWithGuards, SharedStaticMemoryPool},
};
use satrs::queue::GenericSendError;
use satrs::tmtc::PacketAsVec;
use satrs::pool::{PoolProviderWithGuards, SharedStaticMemoryPool};
use satrs::tmtc::PacketInPool;
use crate::tmtc::sender::TmTcSender;
@@ -67,7 +69,7 @@ impl UdpTmHandler for DynamicUdpTmHandler {
}
pub struct UdpTmtcServer<TmHandler: UdpTmHandler> {
pub udp_tc_server: UdpTcServer<TmTcSender, StoreAndSendError>,
pub udp_tc_server: UdpTcServer<TmTcSender, GenericSendError>,
pub tm_handler: TmHandler,
}
@@ -112,6 +114,9 @@ mod tests {
sync::{Arc, Mutex},
};
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs::{
spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
@@ -119,7 +124,8 @@ mod tests {
},
ComponentId,
};
use satrs_example::config::{components, OBSW_SERVER_ADDR};
use satrs_example::config::OBSW_SERVER_ADDR;
use satrs_example::ids;
use crate::tmtc::sender::{MockSender, TmTcSender};
@@ -175,10 +181,15 @@ mod tests {
udp_tc_server,
tm_handler,
};
let sph = SpHeader::new_for_unseg_tc(components::Apid::GenericPus as u16, 0, 0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true)
.to_vec()
.unwrap();
let sph = SpHeader::new_for_unseg_tc(ids::Apid::GenericPus.raw_value(), u14::ZERO, 0);
let ping_tc = PusTcCreator::new_simple(
sph,
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
)
.to_vec()
.unwrap();
let client = UdpSocket::bind("127.0.0.1:0").expect("Connecting to UDP server failed");
let client_addr = client.local_addr().unwrap();
println!("{}", server_addr);

View File

@@ -9,15 +9,16 @@ use satrs::pus::verification::{
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConverter, EcssTmSender, EcssTmtcError,
ActiveRequest, EcssTcAndToken, EcssTcCacher, EcssTmSender, EcssTmtcError,
GenericConversionError, MpscTcReceiver, PusPacketHandlingError, PusReplyHandler,
PusServiceHelper, PusTcToRequestConverter,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{EcssEnumU16, PusPacket, PusServiceId};
use satrs_example::config::pus::PUS_ACTION_SERVICE;
use satrs_example::config::tmtc_err;
use satrs_example::ids;
use satrs_example::ids::generic_pus::PUS_ACTION;
use std::sync::mpsc;
use std::time::Duration;
@@ -160,7 +161,7 @@ impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for Actio
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusActionRequestStd, ActionRequest), Self::Error> {
let subservice = tc.subservice();
let subservice = tc.message_subtype_id();
let user_data = tc.user_data();
if user_data.len() < 8 {
verif_reporter
@@ -207,17 +208,17 @@ impl PusTcToRequestConverter<ActivePusActionRequestStd, ActionRequest> for Actio
pub fn create_action_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
action_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ActionReplyPus>>,
) -> ActionServiceWrapper {
let action_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_ACTION_SERVICE.id(),
ids::generic_pus::PUS_ACTION.id(),
pus_action_rx,
tm_sender,
create_verification_reporter(PUS_ACTION_SERVICE.id(), PUS_ACTION_SERVICE.apid),
create_verification_reporter(PUS_ACTION.id(), PUS_ACTION.apid),
tc_in_mem_converter,
),
ActionRequestConverter::default(),
@@ -269,12 +270,14 @@ impl TargetedPusService for ActionServiceWrapper {
#[cfg(test)]
mod tests {
use arbitrary_int::traits::Integer as _;
use satrs::pus::test_util::{
TEST_APID, TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::pus::verification::test_util::TestVerificationReporter;
use satrs::pus::{verification, EcssTcInVecConverter};
use satrs::pus::{verification, EcssTcVecCacher};
use satrs::request::MessageMetadata;
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs::tmtc::PacketAsVec;
use satrs::ComponentId;
use satrs::{
@@ -324,7 +327,7 @@ mod tests {
pus_action_rx,
TmTcSender::Heap(tm_funnel_tx.clone()),
verif_reporter,
EcssTcInMemConverter::Heap(EcssTcInVecConverter::default()),
EcssTcCacher::Heap(EcssTcVecCacher::default()),
),
ActionRequestConverter::default(),
DefaultActiveActionRequestMap::default(),
@@ -367,7 +370,7 @@ mod tests {
if let Err(mpsc::TryRecvError::Empty) = packet {
} else {
let tm = packet.unwrap();
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap().0;
let unexpected_tm = PusTmReader::new(&tm.packet, 7).unwrap();
panic!("unexpected TM packet {unexpected_tm:?}");
}
}
@@ -410,7 +413,11 @@ mod tests {
pub fn add_tc(&mut self, tc: &PusTcCreator) {
self.request_id = Some(verification::RequestId::new(tc).into());
let token = self.service.service_helper.verif_reporter_mut().add_tc(tc);
let token = self
.service
.service_helper
.verif_reporter_mut()
.start_verification(tc);
let accepted_token = self
.service
.service_helper
@@ -443,12 +450,13 @@ mod tests {
);
// Create a basic action request and verify forwarding.
let sp_header = SpHeader::new_from_apid(TEST_APID);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(8, 128));
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.to_be_bytes());
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_1.as_u32().to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let pus8_packet =
PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
testbench.verify_next_tc_is_handled_properly(&time_stamp);
@@ -484,7 +492,7 @@ mod tests {
TEST_COMPONENT_ID_1.id(),
);
// Create a basic action request and verify forwarding.
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(8, 128));
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
@@ -494,7 +502,7 @@ mod tests {
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
CreatorConfig::default(),
);
testbench.add_tc(&pus8_packet);
let time_stamp: [u8; 7] = [0; 7];
@@ -510,17 +518,17 @@ mod tests {
TEST_COMPONENT_ID_0.raw(),
ActionRequestConverter::default(),
);
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(8, 128));
let action_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.as_u32().to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
let pus8_packet = PusTcCreator::new(
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
CreatorConfig::default(),
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
@@ -546,11 +554,11 @@ mod tests {
fn converter_action_req_with_data() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ActionRequestConverter::default());
let sec_header = PusTcSecondaryHeader::new_simple(8, 128);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(8, 128));
let action_id = 5_u32;
let mut app_data: [u8; 16] = [0; 16];
// Invalid ID, routing should fail.
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.as_u32().to_be_bytes());
app_data[4..8].copy_from_slice(&action_id.to_be_bytes());
for i in 0..8 {
app_data[i + 8] = i as u8;
@@ -559,7 +567,7 @@ mod tests {
SpHeader::new_from_apid(TEST_APID),
sec_header,
&app_data,
true,
CreatorConfig::default(),
);
let token = testbench.add_tc(&pus8_packet);
let result = testbench.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0);
@@ -689,7 +697,7 @@ mod tests {
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), ActionReplyHandler::default());
let action_reply = ActionReplyPus::new(5_u32, ActionReplyVariant::Completed);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
GenericMessage::new(MessageMetadata::new(10_u32, 15_u32), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);

View File

@@ -6,17 +6,17 @@ use satrs::pus::event_man::EventRequestWithToken;
use satrs::pus::event_srv::PusEventServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter, MpscTcReceiver,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcCacher, MpscTcReceiver,
PartialPusHandlingError, PusServiceHelper,
};
use satrs::spacepackets::ecss::PusServiceId;
use satrs_example::config::pus::PUS_EVENT_MANAGEMENT;
use satrs_example::ids::generic_pus::PUS_EVENT_MANAGEMENT;
use super::{DirectPusService, HandlingStatus};
pub fn create_event_service(
tm_sender: TmTcSender,
tm_in_pool_converter: EcssTcInMemConverter,
tm_in_pool_converter: EcssTcCacher,
pus_event_rx: mpsc::Receiver<EcssTcAndToken>,
event_request_tx: mpsc::Sender<EventRequestWithToken>,
) -> EventServiceWrapper {
@@ -36,12 +36,8 @@ pub fn create_event_service(
}
pub struct EventServiceWrapper {
pub handler: PusEventServiceHandler<
MpscTcReceiver,
TmTcSender,
EcssTcInMemConverter,
VerificationReporter,
>,
pub handler:
PusEventServiceHandler<MpscTcReceiver, TmTcSender, EcssTcCacher, VerificationReporter>,
}
impl DirectPusService for EventServiceWrapper {

View File

@@ -5,16 +5,16 @@ use satrs::pus::verification::{
VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap, EcssTcAndToken,
EcssTcInMemConverter, EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver,
PusPacketHandlingError, PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
ActivePusRequestStd, ActiveRequest, DefaultActiveRequestMap, EcssTcAndToken, EcssTcCacher,
EcssTmSender, EcssTmtcError, GenericConversionError, MpscTcReceiver, PusPacketHandlingError,
PusReplyHandler, PusServiceHelper, PusTcToRequestConverter,
};
use satrs::request::{GenericMessage, UniqueApidTargetId};
use satrs::res_code::ResultU16;
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{hk, PusPacket, PusServiceId};
use satrs_example::config::pus::PUS_HK_SERVICE;
use satrs_example::config::{hk_err, tmtc_err};
use satrs_example::ids::generic_pus::PUS_HK;
use std::sync::mpsc;
use std::time::Duration;
@@ -164,11 +164,11 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
found: 4,
});
}
let subservice = tc.subservice();
let subservice = tc.message_subtype_id();
let target_id_and_apid = UniqueApidTargetId::from_pus_tc(tc).expect("invalid tc format");
let unique_id = u32::from_be_bytes(tc.user_data()[4..8].try_into().unwrap());
let standard_subservice = hk::Subservice::try_from(subservice);
let standard_subservice = hk::MessageSubtypeId::try_from(subservice);
if standard_subservice.is_err() {
verif_reporter
.start_failure(
@@ -180,19 +180,22 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
return Err(GenericConversionError::InvalidSubservice(subservice));
}
let request = match standard_subservice.unwrap() {
hk::Subservice::TcEnableHkGeneration | hk::Subservice::TcEnableDiagGeneration => {
hk::MessageSubtypeId::TcEnableHkGeneration
| hk::MessageSubtypeId::TcEnableDiagGeneration => {
HkRequest::new(unique_id, HkRequestVariant::EnablePeriodic)
}
hk::Subservice::TcDisableHkGeneration | hk::Subservice::TcDisableDiagGeneration => {
hk::MessageSubtypeId::TcDisableHkGeneration
| hk::MessageSubtypeId::TcDisableDiagGeneration => {
HkRequest::new(unique_id, HkRequestVariant::DisablePeriodic)
}
hk::Subservice::TcReportHkReportStructures => todo!(),
hk::Subservice::TmHkPacket => todo!(),
hk::Subservice::TcGenerateOneShotHk | hk::Subservice::TcGenerateOneShotDiag => {
hk::MessageSubtypeId::TcReportHkReportStructures => todo!(),
hk::MessageSubtypeId::TmHkPacket => todo!(),
hk::MessageSubtypeId::TcGenerateOneShotHk
| hk::MessageSubtypeId::TcGenerateOneShotDiag => {
HkRequest::new(unique_id, HkRequestVariant::OneShot)
}
hk::Subservice::TcModifyDiagCollectionInterval
| hk::Subservice::TcModifyHkCollectionInterval => {
hk::MessageSubtypeId::TcModifyDiagCollectionInterval
| hk::MessageSubtypeId::TcModifyHkCollectionInterval => {
if user_data.len() < 12 {
verif_reporter
.start_failure(
@@ -242,17 +245,17 @@ impl PusTcToRequestConverter<ActivePusRequestStd, HkRequest> for HkRequestConver
pub fn create_hk_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
pus_hk_rx: mpsc::Receiver<EcssTcAndToken>,
request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<HkReply>>,
) -> HkServiceWrapper {
let pus_3_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_HK_SERVICE.id(),
PUS_HK.id(),
pus_hk_rx,
tm_sender,
create_verification_reporter(PUS_HK_SERVICE.id(), PUS_HK_SERVICE.apid),
create_verification_reporter(PUS_HK.id(), PUS_HK.apid),
tc_in_mem_converter,
),
HkRequestConverter::default(),
@@ -302,16 +305,19 @@ impl TargetedPusService for HkServiceWrapper {
#[cfg(test)]
mod tests {
use arbitrary_int::traits::Integer as _;
use arbitrary_int::{u14, u21};
use satrs::pus::test_util::{
TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1, TEST_UNIQUE_ID_0, TEST_UNIQUE_ID_1,
};
use satrs::request::MessageMetadata;
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs::{
hk::HkRequestVariant,
pus::test_util::TEST_APID,
request::GenericMessage,
spacepackets::{
ecss::{hk::Subservice, tc::PusTcCreator},
ecss::{hk::MessageSubtypeId, tc::PusTcCreator},
SpHeader,
},
};
@@ -328,19 +334,18 @@ mod tests {
fn hk_converter_one_shot_req() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[0..4].copy_from_slice(&target_id.as_u32().to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let hk_req = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcGenerateOneShotHk as u8,
MessageTypeId::new(3, MessageSubtypeId::TcGenerateOneShotHk as u8),
&app_data,
true,
CreatorConfig::default(),
);
let accepted_token = hk_bench.add_tc(&hk_req);
let (_active_req, req) = hk_bench
@@ -358,11 +363,11 @@ mod tests {
fn hk_converter_enable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[0..4].copy_from_slice(&target_id.as_u32().to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
@@ -377,18 +382,16 @@ mod tests {
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableHkGeneration as u8,
MessageTypeId::new(3, MessageSubtypeId::TcEnableHkGeneration as u8),
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcEnableDiagGeneration as u8,
MessageTypeId::new(3, MessageSubtypeId::TcEnableDiagGeneration as u8),
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc1);
}
@@ -397,11 +400,11 @@ mod tests {
fn hk_conversion_disable_periodic_generation() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 8] = [0; 8];
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[0..4].copy_from_slice(&target_id.as_u32().to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
let mut generic_check = |tc: &PusTcCreator| {
let accepted_token = hk_bench.add_tc(tc);
@@ -416,18 +419,16 @@ mod tests {
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableHkGeneration as u8,
MessageTypeId::new(3, MessageSubtypeId::TcDisableHkGeneration as u8),
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcDisableDiagGeneration as u8,
MessageTypeId::new(3, MessageSubtypeId::TcDisableDiagGeneration as u8),
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc1);
}
@@ -436,12 +437,12 @@ mod tests {
fn hk_conversion_modify_interval() {
let mut hk_bench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), HkRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let target_id = TEST_UNIQUE_ID_0;
let unique_id = 5_u32;
let mut app_data: [u8; 12] = [0; 12];
let collection_interval_factor = 5_u32;
app_data[0..4].copy_from_slice(&target_id.to_be_bytes());
app_data[0..4].copy_from_slice(&target_id.as_u32().to_be_bytes());
app_data[4..8].copy_from_slice(&unique_id.to_be_bytes());
app_data[8..12].copy_from_slice(&collection_interval_factor.to_be_bytes());
@@ -459,18 +460,16 @@ mod tests {
};
let tc0 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyHkCollectionInterval as u8,
MessageTypeId::new(3, MessageSubtypeId::TcModifyHkCollectionInterval as u8),
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc0);
let tc1 = PusTcCreator::new_simple(
sp_header,
3,
Subservice::TcModifyDiagCollectionInterval as u8,
MessageTypeId::new(3, MessageSubtypeId::TcModifyDiagCollectionInterval as u8),
&app_data,
true,
CreatorConfig::default(),
);
generic_check(&tc1);
}
@@ -479,8 +478,8 @@ mod tests {
fn hk_reply_handler() {
let mut reply_testbench =
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_0.id(), HkReplyHandler::default());
let sender_id = 2_u64;
let apid_target_id = 3_u32;
let sender_id = 2_u32;
let apid_target_id = u21::new(3);
let unique_id = 5_u32;
let (req_id, active_req) = reply_testbench.add_tc(TEST_APID, apid_target_id, &[]);
let reply = GenericMessage::new(
@@ -501,7 +500,7 @@ mod tests {
ReplyHandlerTestbench::new(TEST_COMPONENT_ID_1.id(), HkReplyHandler::default());
let action_reply = HkReply::new(5_u32, HkReplyVariant::Ack);
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), action_reply);
GenericMessage::new(MessageMetadata::new(10_u32, 15_u32), action_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);

View File

@@ -4,13 +4,13 @@ use log::warn;
use satrs::pool::PoolAddr;
use satrs::pus::verification::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReporter,
VerificationReporterCfg, VerificationReportingProvider, VerificationToken,
VerificationReporterConfig, VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{
ActiveRequestMapProvider, ActiveRequestProvider, EcssTcAndToken, EcssTcInMemConversionProvider,
EcssTcInMemConverter, EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError,
GenericRoutingError, HandlingStatus, PusPacketHandlingError, PusReplyHandler, PusRequestRouter,
PusServiceHelper, PusTcToRequestConverter, TcInMemory,
ActiveRequest, ActiveRequestStore, CacheAndReadRawEcssTc, EcssTcAndToken, EcssTcCacher,
EcssTcReceiver, EcssTmSender, EcssTmtcError, GenericConversionError, GenericRoutingError,
HandlingStatus, PusPacketHandlingError, PusReplyHandler, PusRequestRouter, PusServiceHelper,
PusTcToRequestConverter, TcInMemory,
};
use satrs::queue::{GenericReceiveError, GenericSendError};
use satrs::request::{Apid, GenericMessage, MessageMetadata};
@@ -18,8 +18,8 @@ use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs::tmtc::{PacketAsVec, PacketInPool};
use satrs::ComponentId;
use satrs_example::config::pus::PUS_ROUTING_SERVICE;
use satrs_example::config::{tmtc_err, CustomPusServiceId};
use satrs_example::ids::generic_pus::PUS_ROUTING;
use satrs_example::TimestampHelper;
use std::fmt::Debug;
use std::sync::mpsc;
@@ -33,12 +33,13 @@ pub mod stack;
pub mod test;
pub fn create_verification_reporter(owner_id: ComponentId, apid: Apid) -> VerificationReporter {
let verif_cfg = VerificationReporterCfg::new(apid, 1, 2, 8).unwrap();
let verif_cfg = VerificationReporterConfig::new(apid, 1, 2, 8);
// Every software component which needs to generate verification telemetry, gets a cloned
// verification reporter.
VerificationReporter::new(owner_id, &verif_cfg)
}
/*
/// Simple router structure which forwards PUS telecommands to dedicated handlers.
pub struct PusTcMpscRouter {
pub test_tc_sender: mpsc::SyncSender<EcssTcAndToken>,
@@ -62,12 +63,9 @@ pub struct PusTcDistributor {
impl PusTcDistributor {
pub fn new(tm_sender: TmTcSender, pus_router: PusTcMpscRouter) -> Self {
Self {
id: PUS_ROUTING_SERVICE.raw(),
id: PUS_ROUTING.raw(),
tm_sender,
verif_reporter: create_verification_reporter(
PUS_ROUTING_SERVICE.id(),
PUS_ROUTING_SERVICE.apid,
),
verif_reporter: create_verification_reporter(PUS_ROUTING.id(), PUS_ROUTING.apid),
pus_router,
stamp_helper: TimestampHelper::default(),
}
@@ -105,18 +103,18 @@ impl PusTcDistributor {
sender_id,
pus_tc_result.unwrap_err()
);
log::warn!("raw data: {:x?}", raw_tc);
log::warn!("raw data: {raw_tc:x?}");
// TODO: Shouldn't this be an error?
return Ok(HandlingStatus::HandledOne);
}
let pus_tc = pus_tc_result.unwrap().0;
let init_token = self.verif_reporter.add_tc(&pus_tc);
let pus_tc = pus_tc_result.unwrap();
let init_token = self.verif_reporter.start_verification(&pus_tc);
self.stamp_helper.update_from_now();
let accepted_token = self
.verif_reporter
.acceptance_success(&self.tm_sender, init_token, self.stamp_helper.stamp())
.expect("Acceptance success failure");
let service = PusServiceId::try_from(pus_tc.service());
let service = PusServiceId::try_from(pus_tc.service_type_id());
let tc_in_memory: TcInMemory = if let Some(store_addr) = addr_opt {
PacketInPool::new(sender_id, store_addr).into()
} else {
@@ -190,6 +188,7 @@ impl PusTcDistributor {
Ok(HandlingStatus::HandledOne)
}
}
*/
pub trait TargetedPusService {
const SERVICE_ID: u8;
@@ -272,16 +271,16 @@ pub struct PusTargetedRequestService<
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestMapInstance: ActiveRequestStore<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequest,
RequestType,
ReplyType,
> {
pub service_helper:
PusServiceHelper<TcReceiver, TmTcSender, EcssTcInMemConverter, VerificationReporter>,
PusServiceHelper<TcReceiver, TmTcSender, EcssTcCacher, VerificationReporter>,
pub request_router: GenericRequestRouter,
pub request_converter: RequestConverter,
pub active_request_map: ActiveRequestMap,
pub active_request_map: ActiveRequestMapInstance,
pub reply_handler: ReplyHandler,
pub reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
phantom: std::marker::PhantomData<(RequestType, ActiveRequestInfo, ReplyType)>,
@@ -292,8 +291,8 @@ impl<
VerificationReporter: VerificationReportingProvider,
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestMapInstance: ActiveRequestStore<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequest,
RequestType,
ReplyType,
>
@@ -302,7 +301,7 @@ impl<
VerificationReporter,
RequestConverter,
ReplyHandler,
ActiveRequestMap,
ActiveRequestMapInstance,
ActiveRequestInfo,
RequestType,
ReplyType,
@@ -314,11 +313,11 @@ where
service_helper: PusServiceHelper<
TcReceiver,
TmTcSender,
EcssTcInMemConverter,
EcssTcCacher,
VerificationReporter,
>,
request_converter: RequestConverter,
active_request_map: ActiveRequestMap,
active_request_map: ActiveRequestMapInstance,
reply_hook: ReplyHandler,
request_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ReplyType>>,
@@ -512,7 +511,7 @@ where
/// and also log the error.
pub fn generic_pus_request_timeout_handler(
sender: &(impl EcssTmSender + ?Sized),
active_request: &(impl ActiveRequestProvider + Debug),
active_request: &(impl ActiveRequest + Debug),
verification_handler: &impl VerificationReportingProvider,
time_stamp: &[u8],
service_str: &'static str,
@@ -534,13 +533,15 @@ pub fn generic_pus_request_timeout_handler(
pub(crate) mod tests {
use std::time::Duration;
use arbitrary_int::{u11, u21};
use satrs::pus::test_util::TEST_COMPONENT_ID_0;
use satrs::pus::{MpscTmAsVecSender, PusTmVariant};
use satrs::request::RequestId;
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs::{
pus::{
verification::test_util::TestVerificationReporter, ActivePusRequestStd,
ActiveRequestMapProvider, MpscTcReceiver,
ActiveRequestStore, MpscTcReceiver,
},
request::UniqueApidTargetId,
spacepackets::{
@@ -559,7 +560,7 @@ pub(crate) mod tests {
// Testbench dedicated to the testing of [PusReplyHandler]s
pub struct ReplyHandlerTestbench<
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestInfo: ActiveRequest,
Reply,
> {
pub id: ComponentId,
@@ -573,7 +574,7 @@ pub(crate) mod tests {
impl<
ReplyHandler: PusReplyHandler<ActiveRequestInfo, Reply, Error = EcssTmtcError>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestInfo: ActiveRequest,
Reply,
> ReplyHandlerTestbench<ReplyHandler, ActiveRequestInfo, Reply>
{
@@ -593,17 +594,17 @@ pub(crate) mod tests {
pub fn add_tc(
&mut self,
apid: u16,
apid_target: u32,
apid: u11,
apid_target: u21,
time_stamp: &[u8],
) -> (verification::RequestId, ActivePusRequestStd) {
let sp_header = SpHeader::new_from_apid(apid);
let sec_header_dummy = PusTcSecondaryHeader::new_simple(0, 0);
let init = self.verif_reporter.add_tc(&PusTcCreator::new(
let sec_header_dummy = PusTcSecondaryHeader::new_simple(MessageTypeId::new(0, 0));
let init = self.verif_reporter.start_verification(&PusTcCreator::new(
sp_header,
sec_header_dummy,
&[],
true,
CreatorConfig::default(),
));
let accepted = self
.verif_reporter
@@ -674,7 +675,7 @@ pub(crate) mod tests {
// Testbench dedicated to the testing of [PusTcToRequestConverter]s
pub struct PusConverterTestbench<
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestInfo: ActiveRequest,
Request,
> {
pub id: ComponentId,
@@ -688,7 +689,7 @@ pub(crate) mod tests {
impl<
Converter: PusTcToRequestConverter<ActiveRequestInfo, Request, Error = GenericConversionError>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestInfo: ActiveRequest,
Request,
> PusConverterTestbench<Converter, ActiveRequestInfo, Request>
{
@@ -706,7 +707,7 @@ pub(crate) mod tests {
}
pub fn add_tc(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let token = self.verif_reporter.add_tc(tc);
let token = self.verif_reporter.start_verification(tc);
self.current_request_id = Some(verification::RequestId::new(tc));
self.current_packet = Some(tc.to_vec().unwrap());
self.verif_reporter
@@ -722,8 +723,8 @@ pub(crate) mod tests {
&mut self,
token: VerificationToken<TcStateAccepted>,
time_stamp: &[u8],
expected_apid: u16,
expected_apid_target: u32,
expected_apid: u11,
expected_apid_target: u21,
) -> Result<(ActiveRequestInfo, Request), Converter::Error> {
if self.current_packet.is_none() {
return Err(GenericConversionError::InvalidAppData(
@@ -734,7 +735,7 @@ pub(crate) mod tests {
let tc_reader = PusTcReader::new(&current_packet).unwrap();
let (active_info, request) = self.converter.convert(
token,
&tc_reader.0,
&tc_reader,
&self.dummy_sender,
&self.verif_reporter,
time_stamp,
@@ -754,8 +755,8 @@ pub(crate) mod tests {
pub struct TargetedPusRequestTestbench<
RequestConverter: PusTcToRequestConverter<ActiveRequestInfo, RequestType, Error = GenericConversionError>,
ReplyHandler: PusReplyHandler<ActiveRequestInfo, ReplyType, Error = EcssTmtcError>,
ActiveRequestMap: ActiveRequestMapProvider<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequestProvider,
ActiveRequestMapInstance: ActiveRequestStore<ActiveRequestInfo>,
ActiveRequestInfo: ActiveRequest,
RequestType,
ReplyType,
> {
@@ -764,7 +765,7 @@ pub(crate) mod tests {
TestVerificationReporter,
RequestConverter,
ReplyHandler,
ActiveRequestMap,
ActiveRequestMapInstance,
ActiveRequestInfo,
RequestType,
ReplyType,

View File

@@ -1,6 +1,9 @@
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use derive_new::new;
use satrs::mode_tree::{ModeNode, ModeParent};
use satrs_example::config::pus::PUS_MODE_SERVICE;
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs_example::ids;
use std::sync::mpsc;
use std::time::Duration;
@@ -8,8 +11,8 @@ use crate::requests::GenericRequestRouter;
use crate::tmtc::sender::TmTcSender;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DefaultActiveRequestMap, EcssTcAndToken, EcssTcInMemConverter, MpscTcReceiver,
PusPacketHandlingError, PusServiceHelper,
DefaultActiveRequestMap, EcssTcAndToken, EcssTcCacher, MpscTcReceiver, PusPacketHandlingError,
PusServiceHelper,
};
use satrs::request::GenericMessage;
use satrs::{
@@ -20,8 +23,8 @@ use satrs::{
self, FailParams, TcStateAccepted, TcStateStarted, VerificationReportingProvider,
VerificationToken,
},
ActivePusRequestStd, ActiveRequestProvider, EcssTmSender, EcssTmtcError,
GenericConversionError, PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
ActivePusRequestStd, ActiveRequest, EcssTmSender, EcssTmtcError, GenericConversionError,
PusReplyHandler, PusTcToRequestConverter, PusTmVariant,
},
request::UniqueApidTargetId,
spacepackets::{
@@ -77,10 +80,19 @@ impl PusReplyHandler<ActivePusRequestStd, ModeReply> for ModeReplyHandler {
.write_to_be_bytes(&mut source_data)
.expect("writing mode reply failed");
let req_id = verification::RequestId::from(reply.request_id());
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), 0, 0);
let sec_header =
PusTmSecondaryHeader::new(200, Subservice::TmModeReply as u8, 0, 0, time_stamp);
let pus_tm = PusTmCreator::new(sp_header, sec_header, &source_data, true);
let sp_header = SpHeader::new_for_unseg_tm(req_id.packet_id().apid(), u14::ZERO, 0);
let sec_header = PusTmSecondaryHeader::new(
MessageTypeId::new(200, Subservice::TmModeReply as u8),
0,
0,
time_stamp,
);
let pus_tm = PusTmCreator::new(
sp_header,
sec_header,
&source_data,
CreatorConfig::default(),
);
tm_sender.send_tm(self.owner_id, PusTmVariant::Direct(pus_tm))?;
verification_handler.completion_success(tm_sender, started_token, time_stamp)?;
}
@@ -146,7 +158,7 @@ impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestCo
verif_reporter: &impl VerificationReportingProvider,
time_stamp: &[u8],
) -> Result<(ActivePusRequestStd, ModeRequest), Self::Error> {
let subservice = tc.subservice();
let subservice = tc.message_subtype_id();
let user_data = tc.user_data();
let not_enough_app_data = |expected: usize| {
verif_reporter
@@ -210,22 +222,25 @@ impl PusTcToRequestConverter<ActivePusRequestStd, ModeRequest> for ModeRequestCo
pub fn create_mode_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
pus_action_rx: mpsc::Receiver<EcssTcAndToken>,
mode_router: GenericRequestRouter,
reply_receiver: mpsc::Receiver<GenericMessage<ModeReply>>,
) -> ModeServiceWrapper {
let mode_request_handler = PusTargetedRequestService::new(
PusServiceHelper::new(
PUS_MODE_SERVICE.id(),
ids::generic_pus::PUS_MODE.id(),
pus_action_rx,
tm_sender,
create_verification_reporter(PUS_MODE_SERVICE.id(), PUS_MODE_SERVICE.apid),
create_verification_reporter(
ids::generic_pus::PUS_MODE.id(),
ids::generic_pus::PUS_MODE.apid,
),
tc_in_mem_converter,
),
ModeRequestConverter::default(),
DefaultActiveRequestMap::default(),
ModeReplyHandler::new(PUS_MODE_SERVICE.id()),
ModeReplyHandler::new(ids::generic_pus::PUS_MODE.id()),
mode_router,
reply_receiver,
);
@@ -287,8 +302,11 @@ impl TargetedPusService for ModeServiceWrapper {
#[cfg(test)]
mod tests {
use arbitrary_int::traits::Integer;
use arbitrary_int::u14;
use satrs::pus::test_util::{TEST_APID, TEST_COMPONENT_ID_0, TEST_UNIQUE_ID_0};
use satrs::request::MessageMetadata;
use satrs::spacepackets::ecss::{CreatorConfig, MessageTypeId};
use satrs::{
mode::{ModeAndSubmode, ModeReply, ModeRequest},
pus::mode::Subservice,
@@ -311,11 +329,12 @@ mod tests {
fn mode_converter_read_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcReadMode as u8);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(MessageTypeId::new(200, Subservice::TcReadMode as u8));
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.as_u32().to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
@@ -327,15 +346,16 @@ mod tests {
fn mode_converter_set_mode_request() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcSetMode as u8);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(MessageTypeId::new(200, Subservice::TcSetMode as u8));
let mut app_data: [u8; 4 + ModeAndSubmode::RAW_LEN] = [0; 4 + ModeAndSubmode::RAW_LEN];
let mode_and_submode = ModeAndSubmode::new(2, 1);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.as_u32().to_be_bytes());
mode_and_submode
.write_to_be_bytes(&mut app_data[4..])
.unwrap();
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
@@ -353,11 +373,14 @@ mod tests {
fn mode_converter_announce_mode() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceMode as u8);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(
200,
Subservice::TcAnnounceMode as u8,
));
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.as_u32().to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
@@ -369,12 +392,14 @@ mod tests {
fn mode_converter_announce_mode_recursively() {
let mut testbench =
PusConverterTestbench::new(TEST_COMPONENT_ID_0.id(), ModeRequestConverter::default());
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(200, Subservice::TcAnnounceModeRecursive as u8);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(
200,
Subservice::TcAnnounceModeRecursive as u8,
));
let mut app_data: [u8; 4] = [0; 4];
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
app_data[0..4].copy_from_slice(&TEST_UNIQUE_ID_0.as_u32().to_be_bytes());
let tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = testbench.add_tc(&tc);
let (_active_req, req) = testbench
.convert(token, &[], TEST_APID, TEST_UNIQUE_ID_0)
@@ -390,7 +415,7 @@ mod tests {
);
let mode_reply = ModeReply::ModeReply(ModeAndSubmode::new(5, 1));
let unrequested_reply =
GenericMessage::new(MessageMetadata::new(10_u32, 15_u64), mode_reply);
GenericMessage::new(MessageMetadata::new(10_u32, 15_u32), mode_reply);
// Right now this function does not do a lot. We simply check that it does not panic or do
// weird stuff.
let result = testbench.handle_unrequested_reply(&unrequested_reply);

View File

@@ -9,13 +9,13 @@ use satrs::pus::scheduler::{PusScheduler, TcInfo};
use satrs::pus::scheduler_srv::PusSchedServiceHandler;
use satrs::pus::verification::VerificationReporter;
use satrs::pus::{
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConverter, MpscTcReceiver,
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcCacher, MpscTcReceiver,
PartialPusHandlingError, PusServiceHelper,
};
use satrs::spacepackets::ecss::PusServiceId;
use satrs::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool};
use satrs::ComponentId;
use satrs_example::config::pus::PUS_SCHED_SERVICE;
use satrs_example::ids::sched::PUS_SCHED;
use super::{DirectPusService, HandlingStatus};
@@ -84,7 +84,7 @@ pub struct SchedulingServiceWrapper {
pub pus_11_handler: PusSchedServiceHandler<
MpscTcReceiver,
TmTcSender,
EcssTcInMemConverter,
EcssTcCacher,
VerificationReporter,
PusScheduler,
>,
@@ -174,7 +174,7 @@ impl SchedulingServiceWrapper {
pub fn create_scheduler_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
tc_releaser: TcReleaser,
pus_sched_rx: mpsc::Receiver<EcssTcAndToken>,
sched_tc_pool: StaticMemoryPool,
@@ -183,10 +183,10 @@ pub fn create_scheduler_service(
.expect("Creating PUS Scheduler failed");
let pus_11_handler = PusSchedServiceHandler::new(
PusServiceHelper::new(
PUS_SCHED_SERVICE.id(),
PUS_SCHED.id(),
pus_sched_rx,
tm_sender,
create_verification_reporter(PUS_SCHED_SERVICE.id(), PUS_SCHED_SERVICE.apid),
create_verification_reporter(PUS_SCHED.id(), PUS_SCHED.apid),
tc_in_mem_converter,
),
scheduler,

View File

@@ -1,33 +1,33 @@
use crate::pus::create_verification_reporter;
use crate::tmtc::sender::TmTcSender;
use log::info;
use satrs::event_man::{EventMessage, EventMessageU32};
use satrs::event_man_legacy::{EventMessage, EventMessageU32};
use satrs::pus::test::PusService17TestHandler;
use satrs::pus::verification::{FailParams, VerificationReporter, VerificationReportingProvider};
use satrs::pus::PartialPusHandlingError;
use satrs::pus::{
DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcInMemConversionProvider,
EcssTcInMemConverter, MpscTcReceiver, PusServiceHelper,
CacheAndReadRawEcssTc, DirectPusPacketHandlerResult, EcssTcAndToken, EcssTcCacher,
MpscTcReceiver, PusServiceHelper,
};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::{PusPacket, PusServiceId};
use satrs_example::config::pus::PUS_TEST_SERVICE;
use satrs_example::config::{tmtc_err, TEST_EVENT};
use satrs_example::ids::generic_pus::PUS_TEST;
use std::sync::mpsc;
use super::{DirectPusService, HandlingStatus};
pub fn create_test_service(
tm_sender: TmTcSender,
tc_in_mem_converter: EcssTcInMemConverter,
tc_in_mem_converter: EcssTcCacher,
event_sender: mpsc::SyncSender<EventMessageU32>,
pus_test_rx: mpsc::Receiver<EcssTcAndToken>,
) -> TestCustomServiceWrapper {
let pus17_handler = PusService17TestHandler::new(PusServiceHelper::new(
PUS_TEST_SERVICE.id(),
PUS_TEST.id(),
pus_test_rx,
tm_sender,
create_verification_reporter(PUS_TEST_SERVICE.id(), PUS_TEST_SERVICE.apid),
create_verification_reporter(PUS_TEST.id(), PUS_TEST.apid),
tc_in_mem_converter,
));
TestCustomServiceWrapper {
@@ -37,12 +37,8 @@ pub fn create_test_service(
}
pub struct TestCustomServiceWrapper {
pub handler: PusService17TestHandler<
MpscTcReceiver,
TmTcSender,
EcssTcInMemConverter,
VerificationReporter,
>,
pub handler:
PusService17TestHandler<MpscTcReceiver, TmTcSender, EcssTcCacher, VerificationReporter>,
pub event_tx: mpsc::SyncSender<EventMessageU32>,
}
@@ -90,7 +86,7 @@ impl DirectPusService for TestCustomServiceWrapper {
);
}
DirectPusPacketHandlerResult::CustomSubservice(subservice, token) => {
let (tc, _) = PusTcReader::new(
let tc = PusTcReader::new(
self.handler
.service_helper
.tc_in_mem_converter
@@ -100,7 +96,7 @@ impl DirectPusService for TestCustomServiceWrapper {
if subservice == 128 {
info!("generating test event");
self.event_tx
.send(EventMessage::new(PUS_TEST_SERVICE.id(), TEST_EVENT.into()))
.send(EventMessage::new(PUS_TEST.id(), TEST_EVENT.into()))
.expect("Sending test event failed");
match self.handler.service_helper.verif_reporter().start_success(
self.handler.service_helper.tm_sender(),
@@ -126,7 +122,7 @@ impl DirectPusService for TestCustomServiceWrapper {
}
}
} else {
let fail_data = [tc.subservice()];
let fail_data = [tc.message_subtype_id()];
self.handler
.service_helper
.verif_reporter()

View File

@@ -1,6 +1,7 @@
use satrs::spacepackets::time::{cds::CdsTime, TimeWriter};
use satrs::spacepackets::time::cds::CdsTime;
pub mod config;
pub mod ids;
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum DeviceMode {

View File

@@ -18,6 +18,7 @@ use interface::{
};
use log::info;
use logger::setup_logger;
/*
use pus::{
action::create_action_service,
event::create_event_service,
@@ -28,23 +29,28 @@ use pus::{
test::create_test_service,
PusTcDistributor, PusTcMpscRouter,
};
*/
use requests::GenericRequestRouter;
use satrs::{
hal::std::{tcp_server::ServerConfig, udp_server::UdpTcServer},
mode::{Mode, ModeAndSubmode, ModeRequest, ModeRequestHandlerMpscBounded},
mode_tree::connect_mode_nodes,
pus::{event_man::EventRequestWithToken, EcssTcInMemConverter, HandlingStatus},
pus::{event_man::EventRequestWithToken, EcssTcCacher, HandlingStatus},
request::{GenericMessage, MessageMetadata},
spacepackets::time::{cds::CdsTime, TimeWriter},
spacepackets::time::cds::CdsTime,
};
use satrs_example::{
config::{
acs::{MGM_HANDLER_0, MGM_HANDLER_1},
components::{NO_SENDER, PCDU_HANDLER, TCP_SERVER, UDP_SERVER},
components::NO_SENDER,
pool::create_sched_tc_pool,
tasks::{FREQ_MS_AOCS, FREQ_MS_PUS_STACK, FREQ_MS_UDP_TMTC, SIM_CLIENT_IDLE_DELAY_MS},
OBSW_SERVER_ADDR, PACKET_ID_VALIDATOR, SERVER_PORT,
},
ids::{
acs::*,
eps::*,
tmtc::{TCP_SERVER, UDP_SERVER},
},
DeviceMode,
};
use tmtc::sender::TmTcSender;
@@ -53,12 +59,12 @@ use tmtc::{tc_source::TcSourceTask, tm_sink::TmSink};
cfg_if::cfg_if! {
if #[cfg(feature = "heap_tmtc")] {
use interface::udp::DynamicUdpTmHandler;
use satrs::pus::EcssTcInVecConverter;
use satrs::pus::EcssTcVecCacher;
use tmtc::{tc_source::TcSourceTaskDynamic, tm_sink::TmSinkDynamic};
} else {
use std::sync::RwLock;
use interface::udp::StaticUdpTmHandler;
use satrs::pus::EcssTcInSharedPoolConverter;
use satrs::pus::EcssTcInSharedPoolCacher;
use satrs::tmtc::{PacketSenderWithSharedPool, SharedPacketPool};
use satrs_example::config::pool::create_static_pools;
use tmtc::{
@@ -74,7 +80,7 @@ mod events;
mod hk;
mod interface;
mod logger;
mod pus;
//mod pus;
mod requests;
mod spi;
mod tmtc;
@@ -124,13 +130,13 @@ fn main() {
let mut request_map = GenericRequestRouter::default();
request_map
.composite_router_map
.insert(MGM_HANDLER_0.id(), mgm_0_handler_composite_tx);
.insert(MGM0.id(), mgm_0_handler_composite_tx);
request_map
.composite_router_map
.insert(MGM_HANDLER_0.id(), mgm_1_handler_composite_tx);
.insert(MGM1.id(), mgm_1_handler_composite_tx);
request_map
.composite_router_map
.insert(PCDU_HANDLER.id(), pcdu_handler_composite_tx);
.insert(PCDU.id(), pcdu_handler_composite_tx);
// This helper structure is used by all telecommand providers which need to send telecommands
// to the TC source.
@@ -139,9 +145,9 @@ fn main() {
let tc_sender_with_shared_pool =
PacketSenderWithSharedPool::new(tc_source_tx, shared_tc_pool_wrapper.clone());
let tc_in_mem_converter =
EcssTcInMemConverter::Static(EcssTcInSharedPoolConverter::new(shared_tc_pool, 4096));
EcssTcCacher::Static(EcssTcInSharedPoolCacher::new(shared_tc_pool, 4096));
} else if #[cfg(feature = "heap_tmtc")] {
let tc_in_mem_converter = EcssTcInMemConverter::Heap(EcssTcInVecConverter::default());
let tc_in_mem_converter = EcssTcCacher::Heap(EcssTcVecCacher::default());
}
}
@@ -173,6 +179,7 @@ fn main() {
let tc_releaser = TcReleaser::Heap(tc_source_tx.clone());
}
}
/*
let pus_router = PusTcMpscRouter {
test_tc_sender: pus_test_tx,
event_tc_sender: pus_event_tx,
@@ -229,6 +236,7 @@ fn main() {
pus_scheduler_service,
pus_mode_service,
);
*/
cfg_if::cfg_if! {
if #[cfg(not(feature = "heap_tmtc"))] {
@@ -301,10 +309,8 @@ fn main() {
let shared_mgm_0_set = Arc::default();
let shared_mgm_1_set = Arc::default();
let mgm_0_mode_node =
ModeRequestHandlerMpscBounded::new(MGM_HANDLER_0.into(), mgm_0_handler_mode_rx);
let mgm_1_mode_node =
ModeRequestHandlerMpscBounded::new(MGM_HANDLER_1.into(), mgm_1_handler_mode_rx);
let mgm_0_mode_node = ModeRequestHandlerMpscBounded::new(MGM0.into(), mgm_0_handler_mode_rx);
let mgm_1_mode_node = ModeRequestHandlerMpscBounded::new(MGM1.into(), mgm_1_handler_mode_rx);
let (mgm_0_spi_interface, mgm_1_spi_interface) =
if let Some(sim_client) = opt_sim_client.as_mut() {
sim_client
@@ -328,7 +334,7 @@ fn main() {
)
};
let mut mgm_0_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_0,
MGM0,
"MGM_0",
mgm_0_mode_node,
mgm_0_handler_composite_rx,
@@ -339,7 +345,7 @@ fn main() {
shared_mgm_0_set,
);
let mut mgm_1_handler = MgmHandlerLis3Mdl::new(
MGM_HANDLER_1,
MGM1,
"MGM_1",
mgm_1_mode_node,
mgm_1_handler_composite_rx,
@@ -372,10 +378,9 @@ fn main() {
} else {
SerialSimInterfaceWrapper::Dummy(SerialInterfaceDummy::default())
};
let pcdu_mode_node =
ModeRequestHandlerMpscBounded::new(PCDU_HANDLER.into(), pcdu_handler_mode_rx);
let pcdu_mode_node = ModeRequestHandlerMpscBounded::new(PCDU.into(), pcdu_handler_mode_rx);
let mut pcdu_handler = PcduHandler::new(
PCDU_HANDLER,
PCDU,
"PCDU",
pcdu_mode_node,
pcdu_handler_composite_rx,

View File

@@ -8,14 +8,14 @@ use satrs::mode::ModeRequest;
use satrs::pus::verification::{
FailParams, TcStateAccepted, VerificationReportingProvider, VerificationToken,
};
use satrs::pus::{ActiveRequestProvider, EcssTmSender, GenericRoutingError, PusRequestRouter};
use satrs::pus::{ActiveRequest, EcssTmSender, GenericRoutingError, PusRequestRouter};
use satrs::queue::GenericSendError;
use satrs::request::{GenericMessage, MessageMetadata, UniqueApidTargetId};
use satrs::spacepackets::ecss::tc::PusTcReader;
use satrs::spacepackets::ecss::PusPacket;
use satrs::ComponentId;
use satrs_example::config::pus::PUS_ROUTING_SERVICE;
use satrs_example::config::tmtc_err;
use satrs_example::ids;
#[derive(Clone, Debug)]
#[non_exhaustive]
@@ -37,7 +37,7 @@ pub struct GenericRequestRouter {
impl Default for GenericRequestRouter {
fn default() -> Self {
Self {
id: PUS_ROUTING_SERVICE.raw(),
id: ids::generic_pus::PUS_ROUTING.raw(),
composite_router_map: Default::default(),
mode_router_map: Default::default(),
}
@@ -46,7 +46,7 @@ impl Default for GenericRequestRouter {
impl GenericRequestRouter {
pub(crate) fn handle_error_generic(
&self,
active_request: &impl ActiveRequestProvider,
active_request: &impl ActiveRequest,
tc: &PusTcReader,
error: GenericRoutingError,
tm_sender: &(impl EcssTmSender + ?Sized),
@@ -55,7 +55,7 @@ impl GenericRequestRouter {
) {
warn!(
"Routing request for service {} failed: {error:?}",
tc.service()
tc.service_type_id()
);
let accepted_token: VerificationToken<TcStateAccepted> = active_request
.token()
@@ -66,7 +66,8 @@ impl GenericRequestRouter {
let apid_target_id = UniqueApidTargetId::from(id);
warn!("Target APID for request: {}", apid_target_id.apid);
warn!("Target Unique ID for request: {}", apid_target_id.unique_id);
let mut fail_data: [u8; 8] = [0; 8];
let mut fail_data: [u8; core::mem::size_of::<ComponentId>()] =
[0; core::mem::size_of::<ComponentId>()];
fail_data.copy_from_slice(&id.to_be_bytes());
verif_reporter
.completion_failure(

View File

@@ -4,7 +4,7 @@ use satrs::{
pus::EcssTmSender,
queue::GenericSendError,
spacepackets::ecss::WritablePusPacket,
tmtc::{PacketAsVec, PacketSenderRaw, PacketSenderWithSharedPool, StoreAndSendError},
tmtc::{PacketAsVec, PacketHandler, PacketSenderWithSharedPool},
ComponentId,
};
@@ -48,26 +48,33 @@ impl EcssTmSender for TmTcSender {
}
}
impl PacketSenderRaw for TmTcSender {
type Error = StoreAndSendError;
impl PacketHandler for TmTcSender {
type Error = GenericSendError;
fn send_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
fn handle_packet(&self, sender_id: ComponentId, packet: &[u8]) -> Result<(), Self::Error> {
match self {
TmTcSender::Static(packet_sender_with_shared_pool) => {
packet_sender_with_shared_pool.send_packet(sender_id, packet)
if let Err(e) = packet_sender_with_shared_pool.handle_packet(sender_id, packet) {
log::error!("Error sending packet via Static TM/TC sender: {:?}", e);
}
}
TmTcSender::Heap(sync_sender) => {
if let Err(e) = sync_sender.handle_packet(sender_id, packet) {
log::error!("Error sending packet via Heap TM/TC sender: {:?}", e);
}
}
TmTcSender::Mock(sender) => {
sender.handle_packet(sender_id, packet).unwrap();
}
TmTcSender::Heap(sync_sender) => sync_sender
.send_packet(sender_id, packet)
.map_err(StoreAndSendError::Send),
TmTcSender::Mock(sender) => sender.send_packet(sender_id, packet),
}
Ok(())
}
}
impl PacketSenderRaw for MockSender {
type Error = StoreAndSendError;
impl PacketHandler for MockSender {
type Error = GenericSendError;
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
fn handle_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut mut_queue = self.0.borrow_mut();
mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
Ok(())

View File

@@ -2,8 +2,12 @@ use satrs::{
pool::PoolProvider,
pus::HandlingStatus,
tmtc::{PacketAsVec, PacketInPool, SharedPacketPool},
ComponentId,
};
use std::{
collections::HashMap,
sync::mpsc::{self, TryRecvError},
};
use std::sync::mpsc::{self, TryRecvError};
use crate::pus::PusTcDistributor;
@@ -11,7 +15,9 @@ use crate::pus::PusTcDistributor;
pub struct TcSourceTaskStatic {
shared_tc_pool: SharedPacketPool,
tc_receiver: mpsc::Receiver<PacketInPool>,
tc_buf: [u8; 4096],
/// We allocate this buffer from the heap to avoid a clippy warning on large enum variant
/// differences.
tc_buf: Box<[u8; 4096]>,
pus_distributor: PusTcDistributor,
}
@@ -25,7 +31,7 @@ impl TcSourceTaskStatic {
Self {
shared_tc_pool,
tc_receiver,
tc_buf: [0; 4096],
tc_buf: Box::new([0; 4096]),
pus_distributor: pus_receiver,
}
}
@@ -44,11 +50,11 @@ impl TcSourceTaskStatic {
.0
.read()
.expect("locking tc pool failed");
pool.read(&packet_in_pool.store_addr, &mut self.tc_buf)
pool.read(&packet_in_pool.store_addr, self.tc_buf.as_mut_slice())
.expect("reading pool failed");
drop(pool);
self.pus_distributor
.handle_tc_packet_in_store(packet_in_pool, &self.tc_buf)
.handle_tc_packet_in_store(packet_in_pool, self.tc_buf.as_slice())
.ok();
HandlingStatus::HandledOne
}
@@ -63,18 +69,24 @@ impl TcSourceTaskStatic {
}
}
pub type CcsdsDistributorDyn = HashMap<ComponentId, std::sync::mpsc::SyncSender<PacketAsVec>>;
pub type CcsdsDistributorStatic = HashMap<ComponentId, std::sync::mpsc::SyncSender<PacketInPool>>;
// TC source components where the heap is the backing memory of the received telecommands.
pub struct TcSourceTaskDynamic {
pub tc_receiver: mpsc::Receiver<PacketAsVec>,
pus_distributor: PusTcDistributor,
ccsds_distributor: CcsdsDistributorDyn,
}
#[allow(dead_code)]
impl TcSourceTaskDynamic {
pub fn new(tc_receiver: mpsc::Receiver<PacketAsVec>, pus_receiver: PusTcDistributor) -> Self {
pub fn new(
tc_receiver: mpsc::Receiver<PacketAsVec>,
ccsds_distributor: CcsdsDistributorDyn,
) -> Self {
Self {
tc_receiver,
pus_distributor: pus_receiver,
ccsds_distributor,
}
}

View File

@@ -3,18 +3,17 @@ use std::{
sync::mpsc::{self},
};
use arbitrary_int::{u11, u14};
use log::info;
use satrs::{
pool::PoolProvider,
spacepackets::{
ecss::{tm::PusTmZeroCopyWriter, PusPacket},
seq_count::CcsdsSimpleSeqCountProvider,
seq_count::SequenceCounter,
seq_count::SequenceCounterCcsdsSimple,
time::cds::MIN_CDS_FIELD_LEN,
CcsdsPacket,
},
};
use satrs::{
spacepackets::seq_count::SequenceCountProvider,
tmtc::{PacketAsVec, PacketInPool, SharedPacketPool},
};
@@ -22,14 +21,16 @@ use crate::interface::tcp::SyncTcpTmSource;
#[derive(Default)]
pub struct CcsdsSeqCounterMap {
apid_seq_counter_map: HashMap<u16, CcsdsSimpleSeqCountProvider>,
apid_seq_counter_map: HashMap<u11, SequenceCounterCcsdsSimple>,
}
impl CcsdsSeqCounterMap {
pub fn get_and_increment(&mut self, apid: u16) -> u16 {
self.apid_seq_counter_map
.entry(apid)
.or_default()
.get_and_increment()
pub fn get_and_increment(&mut self, apid: u11) -> u14 {
u14::new(
self.apid_seq_counter_map
.entry(apid)
.or_default()
.get_and_increment(),
)
}
}
@@ -58,7 +59,7 @@ impl TmFunnelCommon {
);
let entry = self
.msg_counter_map
.entry(zero_copy_writer.service())
.entry(zero_copy_writer.service_type_id())
.or_insert(0);
zero_copy_writer.set_msg_count(*entry);
if *entry == u16::MAX {
@@ -75,8 +76,8 @@ impl TmFunnelCommon {
fn packet_printout(tm: &PusTmZeroCopyWriter) {
info!(
"Sending PUS TM[{},{}] with APID {}",
tm.service(),
tm.subservice(),
tm.service_type_id(),
tm.message_subtype_id(),
tm.apid()
);
}
@@ -114,7 +115,7 @@ impl TmSinkStatic {
let mut tm_copy = Vec::new();
pool_guard
.modify(&pus_tm_in_pool.store_addr, |buf| {
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN)
let zero_copy_writer = PusTmZeroCopyWriter::new(buf, MIN_CDS_FIELD_LEN, true)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
tm_copy = buf.to_vec()
@@ -154,8 +155,9 @@ impl TmSinkDynamic {
if let Ok(mut tm) = self.tm_funnel_rx.recv() {
// Read the TM, set sequence counter and message counter, and finally update
// the CRC.
let zero_copy_writer = PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN)
.expect("Creating TM zero copy writer failed");
let zero_copy_writer =
PusTmZeroCopyWriter::new(&mut tm.packet, MIN_CDS_FIELD_LEN, true)
.expect("Creating TM zero copy writer failed");
self.common.apply_packet_processing(zero_copy_writer);
self.common.sync_tm_tcp_source.add_tm(&tm.packet);
self.tm_server_tx

View File

@@ -8,6 +8,10 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.1.3] 2024-08-26
Bump `satrs-shared`.
# [v0.1.2] 2024-04-17
Allow `satrs-shared` from `v0.1.3` to `<v0.2`.
@@ -19,3 +23,6 @@ Allow `satrs-shared` from `v0.1.3` to `<v0.2`.
# [v0.1.0] 2024-02-12
Initial release containing the `resultcode` macro.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-mib-v0.1.3...HEAD
[v0.1.3]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-mib-v0.1.2...satrs-mib-v0.1.3

View File

@@ -1,6 +1,6 @@
[package]
name = "satrs-mib"
version = "0.1.2"
version = "0.1.3"
edition = "2021"
rust-version = "1.61"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
@@ -23,7 +23,8 @@ version = "1"
optional = true
[dependencies.satrs-shared]
version = ">=0.1.3, <=0.2"
version = "0.2"
path = "../satrs-shared"
features = ["serde"]
[dependencies.satrs-mib-codegen]

View File

@@ -28,7 +28,8 @@ features = ["full"]
trybuild = { version = "1", features = ["diff"] }
[dev-dependencies.satrs-shared]
version = ">=0.1.3, <=0.2"
version = "0.2"
path = "../../satrs-shared"
[dev-dependencies.satrs-mib]
path = ".."

View File

@@ -11,7 +11,7 @@ serde_json = "1"
log = "0.4"
thiserror = "2"
fern = "0.7"
strum = { version = "0.26", features = ["derive"] }
strum = { version = "0.27", features = ["derive"] }
num_enum = "0.7"
humantime = "2"
tai-time = { version = "0.3", features = ["serde"] }

View File

@@ -120,7 +120,7 @@ impl SimController {
fn handle_ctrl_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let sim_ctrl_request = SimCtrlRequest::from_sim_message(request)?;
if SIM_CTRL_REQ_WIRETAPPING {
log::info!("received sim ctrl request: {:?}", sim_ctrl_request);
log::info!("received sim ctrl request: {sim_ctrl_request:?}");
}
match sim_ctrl_request {
SimCtrlRequest::Ping => {
@@ -139,7 +139,7 @@ impl SimController {
) -> Result<(), SimRequestError> {
let mgm_request = MgmRequestLis3Mdl::from_sim_message(request)?;
if MGM_REQ_WIRETAPPING {
log::info!("received MGM request: {:?}", mgm_request);
log::info!("received MGM request: {mgm_request:?}");
}
match mgm_request {
MgmRequestLis3Mdl::RequestSensorData => {
@@ -160,7 +160,7 @@ impl SimController {
fn handle_pcdu_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let pcdu_request = PcduRequest::from_sim_message(request)?;
if PCDU_REQ_WIRETAPPING {
log::info!("received PCDU request: {:?}", pcdu_request);
log::info!("received PCDU request: {pcdu_request:?}");
}
match pcdu_request {
PcduRequest::RequestSwitchInfo => {
@@ -188,7 +188,7 @@ impl SimController {
fn handle_mgt_request(&mut self, request: &SimRequest) -> Result<(), SimRequestError> {
let mgt_request = MgtRequest::from_sim_message(request)?;
if MGT_REQ_WIRETAPPING {
log::info!("received MGT request: {:?}", mgt_request);
log::info!("received MGT request: {mgt_request:?}");
}
match mgt_request {
MgtRequest::ApplyTorque { duration, dipole } => self

View File

@@ -130,7 +130,7 @@ fn main() {
let mut udp_server =
SimUdpServer::new(SIM_CTRL_PORT, request_sender, reply_receiver, 200, None)
.expect("could not create UDP request server");
log::info!("starting UDP server on port {}", SIM_CTRL_PORT);
log::info!("starting UDP server on port {SIM_CTRL_PORT}");
// This thread manages the simulator UDP server.
let udp_tc_thread = thread::spawn(move || {
udp_server.run();

View File

@@ -8,6 +8,18 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.2.4] 2025-11-06
`spacepackets` v0.17.0
# [v0.2.3] 2025-07-22
`spacepackets` range v0.14 to v0.15
# [v0.2.2] 2025-05-10
- Bump to `spacepackests` v0.14
# [v0.2.1] 2024-11-15
Increased allowed spacepackets to v0.13
@@ -41,3 +53,8 @@ Allow `spacepackets` range starting with v0.10 and v0.11.
# [v0.1.0] 2024-02-12
Initial release.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.4...HEAD
[v0.2.3]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.3...satrs-shared-v0.2.4
[v0.2.3]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.1...satrs-shared-v0.2.3
[v0.2.2]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-shared-v0.2.1...satrs-shared-v0.2.2

View File

@@ -1,7 +1,7 @@
[package]
name = "satrs-shared"
description = "Components shared by multiple sat-rs crates"
version = "0.2.1"
version = "0.2.4"
edition = "2021"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
@@ -11,19 +11,9 @@ license = "Apache-2.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
[dependencies.serde]
version = "1"
default-features = false
optional = true
[dependencies.defmt]
version = "0.3"
optional = true
[dependencies.spacepackets]
version = ">0.9, <=0.13"
default-features = false
spacepackets = { version = "0.17", default-features = false }
serde = { version = "1", default-features = false, optional = true }
defmt = {version = "1", optional = true }
[features]
serde = ["dep:serde", "spacepackets/serde"]

View File

@@ -1,4 +1,4 @@
//! This crates contains modules shared among other sat-rs framework crates.
#![no_std]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(docsrs, feature(doc_cfg))]
pub mod res_code;

View File

@@ -14,6 +14,7 @@ pub struct ResultU16 {
}
impl ResultU16 {
#[inline]
pub const fn new(group_id: u8, unique_id: u8) -> Self {
Self {
group_id,
@@ -21,18 +22,22 @@ impl ResultU16 {
}
}
pub fn raw(&self) -> u16 {
#[inline]
pub const fn raw(&self) -> u16 {
((self.group_id as u16) << 8) | self.unique_id as u16
}
pub fn group_id(&self) -> u8 {
#[inline]
pub const fn group_id(&self) -> u8 {
self.group_id
}
pub fn unique_id(&self) -> u8 {
#[inline]
pub const fn unique_id(&self) -> u8 {
self.unique_id
}
#[inline]
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
@@ -51,6 +56,7 @@ impl From<ResultU16> for EcssEnumU16 {
}
impl UnsignedEnum for ResultU16 {
#[inline]
fn size(&self) -> usize {
core::mem::size_of::<u16>()
}
@@ -67,12 +73,14 @@ impl UnsignedEnum for ResultU16 {
Ok(self.size())
}
fn value(&self) -> u64 {
#[inline]
fn value_raw(&self) -> u64 {
self.raw() as u64
}
}
impl EcssEnumeration for ResultU16 {
#[inline]
fn pfc(&self) -> u8 {
16
}

View File

@@ -8,6 +8,31 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
# [unreleased]
# [v0.3.0-alpha.3] 2025-11-06
- Bump `sat-rs` edition to 2024.
- Bumped `spacepackets` to v0.17
- `ComponentId` is u32 now
- Simplified TCP servers
## Changed
Some trait renaming to be more in-line with Rust naming conventions.
- `EventTmHookProvider` -> `EventTmHook`
- `ActiveRequestProvider` -> `ActiveRequest`
- `EcssTcInMemConversionProvider` -> `CacheAndReadRawEcssTc`
- `ActiveRequestMapProvider` -> `ActiveRequestStore`
- `CountdownProvider` -> `Countdown`
# [v0.3.0-alpha.2] 2025-07-22
`satrs-shared` update
# [v0.3.0-alpha.1] 2025-07-22
`spacepackets` range v0.14 to v0.15
# [v0.3.0-alpha.0] 2025-02-18
`spacepackets` v0.13
@@ -198,3 +223,9 @@ docs-rs hotfix
# [v0.1.0] 2024-02-12
Initial release.
[unreleased]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.3.0-alpha.3...HEAD
[v0.3.0-alpha.3]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.3.0-alpha.2...satrs-v0.3.0-alpha.3
[v0.3.0-alpha.2]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.3.0-alpha.1...satrs-v0.3.0-alpha.2
[v0.3.0-alpha.1]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.3.0-alpha.0...satrs-v0.3.0-alpha.1
[v0.3.0-alpha.0]: https://egit.irs.uni-stuttgart.de/rust/sat-rs/compare/satrs-v0.2.1...satrs-v0.3.0-alpha.0

View File

@@ -1,11 +1,11 @@
[package]
name = "satrs"
version = "0.3.0-alpha.0"
edition = "2021"
rust-version = "1.82.0"
version = "0.3.0-alpha.3"
edition = "2024"
rust-version = "1.85.0"
authors = ["Robin Mueller <muellerr@irs.uni-stuttgart.de>"]
description = "A framework to build software for remote systems"
homepage = "https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/"
description = "A library collection to build software for remote systems"
homepage = "https://github.com/us-irs/sat-rs"
repository = "https://egit.irs.uni-stuttgart.de/rust/sat-rs"
license = "Apache-2.0"
keywords = ["no-std", "space", "aerospace"]
@@ -13,29 +13,29 @@ keywords = ["no-std", "space", "aerospace"]
categories = ["aerospace", "aerospace::space-protocols", "no-std", "hardware-support", "embedded"]
[dependencies]
satrs-shared = ">=0.1.3, <=0.2"
delegate = ">0.7, <=0.13"
satrs-shared = { version = "0.2", path = "../satrs-shared" }
spacepackets = { version = "0.17", default-features = false }
delegate = "0.13"
paste = "1"
derive-new = ">=0.6, <=0.7"
smallvec = "1"
crc = "3"
num_enum = { version = ">0.5, <=0.7", default-features = false }
spacepackets = { version = "0.13", default-features = false }
cobs = { version = "0.3", default-features = false }
num-traits = { version = "0.2", default-features = false }
derive-new = "0.7"
num_enum = { version = "0.7", default-features = false }
cobs = { version = "0.5", default-features = false }
thiserror = { version = "2", default-features = false }
hashbrown = { version = ">=0.14, <=0.15", optional = true }
static_cell = { version = "2", optional = true }
hashbrown = { version = "0.16", optional = true }
static_cell = { version = "2" }
heapless = { version = "0.9", optional = true }
dyn-clone = { version = "1", optional = true }
heapless = { version = "0.8", optional = true }
downcast-rs = { version = "2", default-features = false, optional = true }
bus = { version = "2.2", optional = true }
crossbeam-channel = { version = "0.5", default-features = false, optional = true }
postcard = { version = "1", features = ["alloc"] }
serde = { version = "1", default-features = false, optional = true }
socket2 = { version = "0.5", features = ["all"], optional = true }
socket2 = { version = "0.6", features = ["all"], optional = true }
arbitrary-int = "2"
mio = { version = "1", features = ["os-poll", "net"], optional = true }
defmt = { version = "0.3", optional = true }
defmt = { version = "1", optional = true }
[dev-dependencies]
serde = "1"
@@ -49,7 +49,7 @@ tempfile = "3"
version = "1"
[features]
default = ["std"]
default = ["std", "heapless"]
std = [
"downcast-rs/std",
"alloc",
@@ -64,6 +64,7 @@ std = [
]
alloc = [
"serde/alloc",
"cobs/alloc",
"spacepackets/alloc",
"hashbrown",
"dyn-clone",
@@ -71,7 +72,6 @@ alloc = [
]
serde = ["dep:serde", "spacepackets/serde", "satrs-shared/serde"]
crossbeam = ["crossbeam-channel"]
heapless = ["dep:heapless", "static_cell"]
defmt = ["dep:defmt", "spacepackets/defmt"]
test_util = []

View File

@@ -4,5 +4,5 @@
sat-rs
======
This crate contains the primary components of the sat-rs framework.
This crate contains the primary components of the sat-rs library collection.
You can find more information on the [homepage](https://egit.irs.uni-stuttgart.de/rust/sat-rs).

View File

@@ -1,9 +1,9 @@
use crate::{
ComponentId,
mode::{ModeAndSubmode, ModeReply, ModeRequest, ModeRequestSender},
mode_tree::{ModeStoreProvider, ModeStoreVec},
queue::{GenericSendError, GenericTargetedMessagingError},
request::{GenericMessage, RequestId},
ComponentId,
};
use core::fmt::Debug;
@@ -270,7 +270,7 @@ impl<UserHook: DevManagerUserHook> DevManagerCommandingHelper<UserHook> {
#[cfg(test)]
mod tests {
use crate::{
mode::{tests::ModeReqSenderMock, UNKNOWN_MODE},
mode::{UNKNOWN_MODE, tests::ModeReqSenderMock},
request::MessageMetadata,
};
@@ -296,18 +296,18 @@ mod tests {
fn test_mode_announce() {
let mut assy_helper = DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
let mode_req_sender = ModeReqSenderMock::default();
assy_helper.add_mode_child(ExampleId::Id1 as u64, UNKNOWN_MODE);
assy_helper.add_mode_child(ExampleId::Id2 as u64, UNKNOWN_MODE);
assy_helper.add_mode_child(ExampleId::Id1 as ComponentId, UNKNOWN_MODE);
assy_helper.add_mode_child(ExampleId::Id2 as ComponentId, UNKNOWN_MODE);
assy_helper
.send_announce_mode_cmd_to_children(1, &mode_req_sender, false)
.unwrap();
assert_eq!(mode_req_sender.requests.borrow().len(), 2);
let mut req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id1 as u64);
assert_eq!(req.target_id, ExampleId::Id1 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(req.request, ModeRequest::AnnounceMode);
req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id2 as u64);
assert_eq!(req.target_id, ExampleId::Id2 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(req.request, ModeRequest::AnnounceMode);
}
@@ -316,18 +316,18 @@ mod tests {
fn test_mode_announce_recursive() {
let mut assy_helper = DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
let mode_req_sender = ModeReqSenderMock::default();
assy_helper.add_mode_child(ExampleId::Id1 as u64, UNKNOWN_MODE);
assy_helper.add_mode_child(ExampleId::Id2 as u64, UNKNOWN_MODE);
assy_helper.add_mode_child(ExampleId::Id1 as ComponentId, UNKNOWN_MODE);
assy_helper.add_mode_child(ExampleId::Id2 as ComponentId, UNKNOWN_MODE);
assy_helper
.send_announce_mode_cmd_to_children(1, &mode_req_sender, true)
.unwrap();
assert_eq!(mode_req_sender.requests.borrow().len(), 2);
let mut req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id1 as u64);
assert_eq!(req.target_id, ExampleId::Id1 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(req.request, ModeRequest::AnnounceModeRecursive);
req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id2 as u64);
assert_eq!(req.target_id, ExampleId::Id2 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(req.request, ModeRequest::AnnounceModeRecursive);
}
@@ -337,12 +337,12 @@ mod tests {
let mut dev_mgmt_helper =
DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
let mode_req_sender = ModeReqSenderMock::default();
dev_mgmt_helper.add_mode_child(ExampleId::Id1 as u64, UNKNOWN_MODE);
dev_mgmt_helper.add_mode_child(ExampleId::Id1 as ComponentId, UNKNOWN_MODE);
let expected_mode = ModeAndSubmode::new(ExampleMode::Mode1 as u32, 0);
dev_mgmt_helper
.send_mode_cmd_to_one_child(
1,
ExampleId::Id1 as u64,
ExampleId::Id1 as ComponentId,
expected_mode,
false,
&mode_req_sender,
@@ -350,7 +350,7 @@ mod tests {
.unwrap();
assert_eq!(mode_req_sender.requests.borrow().len(), 1);
let req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id1 as u64);
assert_eq!(req.target_id, ExampleId::Id1 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(
req.request,
@@ -368,7 +368,7 @@ mod tests {
assert_eq!(ctx.active_request_id, 1);
}
let reply = GenericMessage::new(
MessageMetadata::new(1, ExampleId::Id1 as u64),
MessageMetadata::new(1, ExampleId::Id1 as ComponentId),
ModeReply::ModeReply(expected_mode),
);
if let DevManagerHelperResult::ModeCommandingDone(ActiveModeCommandContext {
@@ -387,15 +387,15 @@ mod tests {
let mut dev_mgmt_helper =
DevManagerCommandingHelper::new(TransparentDevManagerHook::default());
let mode_req_sender = ModeReqSenderMock::default();
dev_mgmt_helper.add_mode_child(ExampleId::Id1 as u64, UNKNOWN_MODE);
dev_mgmt_helper.add_mode_child(ExampleId::Id2 as u64, UNKNOWN_MODE);
dev_mgmt_helper.add_mode_child(ExampleId::Id1 as ComponentId, UNKNOWN_MODE);
dev_mgmt_helper.add_mode_child(ExampleId::Id2 as ComponentId, UNKNOWN_MODE);
let expected_mode = ModeAndSubmode::new(ExampleMode::Mode2 as u32, 0);
dev_mgmt_helper
.send_mode_cmd_to_all_children(1, expected_mode, false, &mode_req_sender)
.unwrap();
assert_eq!(mode_req_sender.requests.borrow().len(), 2);
let req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id1 as u64);
assert_eq!(req.target_id, ExampleId::Id1 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(
req.request,
@@ -405,7 +405,7 @@ mod tests {
}
);
let req = mode_req_sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req.target_id, ExampleId::Id2 as u64);
assert_eq!(req.target_id, ExampleId::Id2 as ComponentId);
assert_eq!(req.request_id, 1);
assert_eq!(
req.request,
@@ -424,7 +424,7 @@ mod tests {
}
let reply = GenericMessage::new(
MessageMetadata::new(1, ExampleId::Id1 as u64),
MessageMetadata::new(1, ExampleId::Id1 as ComponentId),
ModeReply::ModeReply(expected_mode),
);
assert_eq!(
@@ -432,7 +432,7 @@ mod tests {
DevManagerHelperResult::Busy
);
let reply = GenericMessage::new(
MessageMetadata::new(1, ExampleId::Id2 as u64),
MessageMetadata::new(1, ExampleId::Id2 as ComponentId),
ModeReply::ModeReply(expected_mode),
);
if let DevManagerHelperResult::ModeCommandingDone(ActiveModeCommandContext {

View File

@@ -1,6 +1,6 @@
use spacepackets::{CcsdsPacket, SpHeader};
use spacepackets::SpHeader;
use crate::{tmtc::PacketSenderRaw, ComponentId};
use crate::{ComponentId, tmtc::PacketHandler};
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum SpValidity {
@@ -24,13 +24,17 @@ pub trait SpacePacketValidator {
#[derive(Default, Debug, PartialEq, Eq)]
pub struct ParseResult {
pub packets_found: u32,
/// If an incomplete space packet was found, its start index is indicated by this value.
pub incomplete_tail_start: Option<usize>,
pub parsed_bytes: usize,
// If an incomplete space packet was found, its start index is indicated by this value.
//pub incomplete_packet_start: Option<usize>,
}
/// This function parses a given buffer for tightly packed CCSDS space packets. It uses the
/// [spacepackets::SpHeader] of the CCSDS packets and a user provided [SpacePacketValidator]
/// to check whether a received space packet is relevant for processing.
/// This function parses a given buffer for tightly packed CCSDS space packets.
///
/// Please note that it is recommended to use a proper data link layer instead to have proper
/// packet framing and to allow more reliable recovery from packet loss.
/// It uses the [spacepackets::SpHeader] of the CCSDS packets and a user provided
/// [SpacePacketValidator] to check whether a received space packet is relevant for processing.
///
/// This function is also able to deal with broken tail packets at the end as long a the parser
/// can read the full 7 bytes which constitue a space packet header plus one byte minimal size.
@@ -41,17 +45,18 @@ pub struct ParseResult {
/// [SpacePacketValidator]:
///
/// 1. [SpValidity::Valid]: The parser will forward all packets to the given `packet_sender` and
/// return the number of packets found.If the [PacketSenderRaw::send_packet] calls fails, the
/// return the number of packets found.If the [PacketHandler::handle_packet] calls fails, the
/// error will be returned.
/// 2. [SpValidity::Invalid]: The parser assumes that the synchronization is lost and tries to
/// find the start of a new space packet header by scanning all the following bytes.
/// 3. [SpValidity::Skip]: The parser skips the packet using the packet length determined from the
/// space packet header.
///
pub fn parse_buffer_for_ccsds_space_packets<SendError>(
buf: &[u8],
packet_validator: &(impl SpacePacketValidator + ?Sized),
sender_id: ComponentId,
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
packet_sender: &(impl PacketHandler<Error = SendError> + ?Sized),
) -> Result<ParseResult, SendError> {
let mut parse_result = ParseResult::default();
let mut current_idx = 0;
@@ -63,20 +68,21 @@ pub fn parse_buffer_for_ccsds_space_packets<SendError>(
let sp_header = SpHeader::from_be_bytes(&buf[current_idx..]).unwrap().0;
match packet_validator.validate(&sp_header, &buf[current_idx..]) {
SpValidity::Valid => {
let packet_size = sp_header.total_len();
let packet_size = sp_header.packet_len();
if (current_idx + packet_size) <= buf_len {
packet_sender
.send_packet(sender_id, &buf[current_idx..current_idx + packet_size])?;
.handle_packet(sender_id, &buf[current_idx..current_idx + packet_size])?;
parse_result.packets_found += 1;
} else {
// Move packet to start of buffer if applicable.
parse_result.incomplete_tail_start = Some(current_idx);
//parse_result.incomplete_packet_start = Some(current_idx);
break;
}
current_idx += packet_size;
continue;
}
SpValidity::Skip => {
current_idx += sp_header.total_len();
current_idx += sp_header.packet_len();
}
// We might have lost sync. Try to find the start of a new space packet header.
SpValidity::Invalid => {
@@ -84,23 +90,25 @@ pub fn parse_buffer_for_ccsds_space_packets<SendError>(
}
}
}
parse_result.parsed_bytes = current_idx;
Ok(parse_result)
}
#[cfg(test)]
mod tests {
use arbitrary_int::{u11, u14};
use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
CcsdsPacket, PacketId, PacketSequenceCtrl, PacketType, SequenceFlags, SpHeader,
CcsdsPacket, PacketId, PacketSequenceControl, PacketType, SequenceFlags, SpHeader,
ecss::{CreatorConfig, MessageTypeId, tc::PusTcCreator},
};
use crate::{encoding::tests::TcCacher, ComponentId};
use crate::{ComponentId, encoding::tests::TcCacher};
use super::{parse_buffer_for_ccsds_space_packets, SpValidity, SpacePacketValidator};
use super::{SpValidity, SpacePacketValidator, parse_buffer_for_ccsds_space_packets};
const PARSER_ID: ComponentId = 0x05;
const TEST_APID_0: u16 = 0x02;
const TEST_APID_1: u16 = 0x10;
const TEST_APID_0: u11 = u11::new(0x02);
const TEST_APID_1: u11 = u11::new(0x10);
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
@@ -131,7 +139,12 @@ mod tests {
#[test]
fn test_basic() {
let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(
sph,
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let mut buffer: [u8; 32] = [0; 32];
let packet_len = ping_tc
.write_to_bytes(&mut buffer)
@@ -156,8 +169,14 @@ mod tests {
#[test]
fn test_multi_packet() {
let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
let ping_tc = PusTcCreator::new_simple(
sph,
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let action_tc =
PusTcCreator::new_simple(sph, MessageTypeId::new(8, 0), &[], CreatorConfig::default());
let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer)
@@ -191,9 +210,15 @@ mod tests {
#[test]
fn test_multi_apid() {
let sph = SpHeader::new_from_apid(TEST_APID_0);
let ping_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(
sph,
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let sph = SpHeader::new_from_apid(TEST_APID_1);
let action_tc = PusTcCreator::new_simple(sph, 8, 0, &[], true);
let action_tc =
PusTcCreator::new_simple(sph, MessageTypeId::new(8, 0), &[], CreatorConfig::default());
let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer)
@@ -221,10 +246,18 @@ mod tests {
#[test]
fn test_split_packet_multi() {
let ping_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let action_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
let ping_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let action_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_1),
MessageTypeId::new(8, 0),
&[],
CreatorConfig::default(),
);
let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer)
@@ -243,8 +276,7 @@ mod tests {
assert!(parse_result.is_ok());
let parse_result = parse_result.unwrap();
assert_eq!(parse_result.packets_found, 1);
assert!(parse_result.incomplete_tail_start.is_some());
let incomplete_tail_idx = parse_result.incomplete_tail_start.unwrap();
let incomplete_tail_idx = parse_result.parsed_bytes;
assert_eq!(incomplete_tail_idx, packet_len_ping);
let queue = tc_cacher.tc_queue.borrow();
@@ -255,8 +287,12 @@ mod tests {
#[test]
fn test_one_split_packet() {
let ping_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let mut buffer: [u8; 32] = [0; 32];
let packet_len_ping = ping_tc
.write_to_bytes(&mut buffer)
@@ -281,7 +317,7 @@ mod tests {
fn test_smallest_packet() {
let ccsds_header_only = SpHeader::new(
PacketId::new(PacketType::Tc, true, TEST_APID_0),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 0),
PacketSequenceControl::new(SequenceFlags::Unsegmented, u14::new(0)),
0,
);
let mut buf: [u8; 7] = [0; 7];

View File

@@ -1,5 +1,5 @@
use crate::{tmtc::PacketSenderRaw, ComponentId};
use cobs::{decode_in_place, encode, max_encoding_length};
use crate::{ComponentId, tmtc::PacketHandler};
use cobs::{decode_in_place, encode_including_sentinels, max_encoding_length};
/// This function encodes the given packet with COBS and also wraps the encoded packet with
/// the sentinel value 0. It can be used repeatedly on the same encoded buffer by expecting
@@ -24,8 +24,8 @@ use cobs::{decode_in_place, encode, max_encoding_length};
/// assert!(encode_packet_with_cobs(&INVERTED_PACKET, &mut encoding_buf, &mut current_idx));
/// assert_eq!(encoding_buf[0], 0);
/// let dec_report = decode_in_place_report(&mut encoding_buf[1..]).expect("decoding failed");
/// assert_eq!(encoding_buf[1 + dec_report.src_used], 0);
/// assert_eq!(dec_report.dst_used, 5);
/// assert_eq!(encoding_buf[1 + dec_report.parsed_size()], 0);
/// assert_eq!(dec_report.frame_size(), 5);
/// assert_eq!(current_idx, 16);
/// ```
pub fn encode_packet_with_cobs(
@@ -37,15 +37,16 @@ pub fn encode_packet_with_cobs(
if *current_idx + max_encoding_len + 2 > encoded_buf.len() {
return false;
}
encoded_buf[*current_idx] = 0;
*current_idx += 1;
*current_idx += encode(packet, &mut encoded_buf[*current_idx..]);
encoded_buf[*current_idx] = 0;
*current_idx += 1;
*current_idx += encode_including_sentinels(packet, &mut encoded_buf[*current_idx..]);
true
}
/// This function parses a given buffer for COBS encoded packets. The packet structure is
/// This function parses a given buffer for COBS encoded packets.
///
/// Please note that, it is recommended to use [cobs::CobsDecoderOwned] or [cobs::CobsDecoder]
/// instead.
///
/// The packet structure is
/// expected to be like this, assuming a sentinel value of 0 as the packet delimiter:
///
/// 0 | ... Encoded Packet Data ... | 0 | 0 | ... Encoded Packet Data ... | 0
@@ -58,7 +59,7 @@ pub fn encode_packet_with_cobs(
pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
buf: &mut [u8],
sender_id: ComponentId,
packet_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
packet_sender: &(impl PacketHandler<Error = SendError> + ?Sized),
next_write_idx: &mut usize,
) -> Result<u32, SendError> {
let mut start_index_packet = 0;
@@ -79,7 +80,7 @@ pub fn parse_buffer_for_cobs_encoded_packets<SendError>(
let decode_result = decode_in_place(&mut buf[start_index_packet..i]);
if let Ok(packet_len) = decode_result {
packets_found += 1;
packet_sender.send_packet(
packet_sender.handle_packet(
sender_id,
&buf[start_index_packet..start_index_packet + packet_len],
)?;
@@ -104,8 +105,8 @@ pub(crate) mod tests {
use cobs::encode;
use crate::{
encoding::tests::{encode_simple_packet, TcCacher, INVERTED_PACKET, SIMPLE_PACKET},
ComponentId,
encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET, TcCacher, encode_simple_packet},
};
use super::parse_buffer_for_cobs_encoded_packets;

View File

@@ -11,8 +11,8 @@ pub(crate) mod tests {
use alloc::collections::VecDeque;
use crate::{
tmtc::{PacketAsVec, PacketSenderRaw},
ComponentId,
tmtc::{PacketAsVec, PacketHandler},
};
use super::cobs::encode_packet_with_cobs;
@@ -25,10 +25,10 @@ pub(crate) mod tests {
pub(crate) tc_queue: RefCell<VecDeque<PacketAsVec>>,
}
impl PacketSenderRaw for TcCacher {
impl PacketHandler for TcCacher {
type Error = ();
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
fn handle_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
let mut mut_queue = self.tc_queue.borrow_mut();
mut_queue.push_back(PacketAsVec::new(sender_id, tc_raw.to_vec()));
Ok(())

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,850 @@
//! # Event management and forwarding
//!
//! This is a legacy module. It is recommended to use [super::event_man] instead.
//!
//! It is recommended to read the
//! [sat-rs book chapter](https://absatsw.irs.uni-stuttgart.de/projects/sat-rs/book/events.html)
//! about events first.
//!
//! This module provides components to perform event routing. The most important component for this
//! task is the [EventManager]. It receives all events and then routes them to event subscribers
//! where appropriate.
//!
//! The event manager has a listener table abstracted by the [ListenerMapProvider], which maps
//! listener groups identified by [ListenerKey]s to a [listener ID][ComponentId].
//! It also contains a sender table abstracted by the [SenderMapProvider] which maps these sender
//! IDs to concrete [EventSendProvider]s. A simple approach would be to use one send event provider
//! for each OBSW thread and then subscribe for all interesting events for a particular thread
//! using the send event provider ID.
//!
//! This can be done with the [EventManager] like this:
//!
//! 1. Provide a concrete [EventReceiveProvider] implementation. This abstraction allow to use different
//! message queue backends. A straightforward implementation where dynamic memory allocation is
//! not a big concern would be to use the [std::sync::mpsc::Receiver] handle. The trait is
//! already implemented for this type.
//! 2. To set up event creators, create channel pairs using some message queue implementation.
//! Each event creator gets a (cloned) sender component which allows it to send events to the
//! manager.
//! 3. The event manager receives the receiver component as part of a [EventReceiveProvider]
//! implementation so all events are routed to the manager.
//! 4. Create the [event sender map][SenderMapProvider]s which allow routing events to
//! subscribers. You can now use the subscriber component IDs to subscribe
//! for event groups, for example by using the [EventManager::subscribe_single] method.
//! 5. Add the send provider as well using the [EventManager::add_sender] call so the event
//! manager can route listener groups to a the send provider.
//!
//! Some components like a PUS Event Service or PUS Event Action Service might require all
//! events to package them as telemetry or start actions where applicable.
//! Other components might only be interested in certain events. For example, a thermal system
//! handler might only be interested in temperature events generated by a thermal sensor component.
//!
//! # Examples
//!
//! You can check [integration test](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/pus_events.rs)
//! for a concrete example using multi-threading where events are routed to
//! different threads.
//!
//! The [satrs-example](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example)
//! also contains a full event manager instance and exposes a test event via the PUS test service.
//! The [PUS event](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/pus/event.rs)
//! module and the generic [events module](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/events.rs)
//! show how the event management modules can be integrated into a more complex software.
use crate::events_legacy::{EventU16, EventU32, GenericEvent, LargestEventRaw, LargestGroupIdRaw};
use crate::params::Params;
use crate::queue::GenericSendError;
use core::fmt::Debug;
use core::marker::PhantomData;
use core::slice::Iter;
use crate::ComponentId;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "std")]
pub use std_mod::*;
#[derive(PartialEq, Eq, Hash, Copy, Clone, Debug)]
pub enum ListenerKey {
Single(LargestEventRaw),
Group(LargestGroupIdRaw),
All,
}
#[derive(Debug)]
pub struct EventMessage<Event: GenericEvent, Parameters: Debug = Params> {
sender_id: ComponentId,
event: Event,
params: Option<Parameters>,
}
impl<Event: GenericEvent, Parameters: Debug + Clone> EventMessage<Event, Parameters> {
pub fn new_generic(sender_id: ComponentId, event: Event, params: Option<&Parameters>) -> Self {
Self {
sender_id,
event,
params: params.cloned(),
}
}
pub fn sender_id(&self) -> ComponentId {
self.sender_id
}
pub fn event(&self) -> Event {
self.event
}
pub fn params(&self) -> Option<&Parameters> {
self.params.as_ref()
}
pub fn new(sender_id: ComponentId, event: Event) -> Self {
Self::new_generic(sender_id, event, None)
}
pub fn new_with_params(sender_id: ComponentId, event: Event, params: &Parameters) -> Self {
Self::new_generic(sender_id, event, Some(params))
}
}
pub type EventMessageU32 = EventMessage<EventU32, Params>;
pub type EventMessageU16 = EventMessage<EventU16, Params>;
/// Generic abstraction
pub trait EventSendProvider<Event: GenericEvent, ParamProvider: Debug = Params> {
type Error;
fn target_id(&self) -> ComponentId;
fn send(&self, message: EventMessage<Event, ParamProvider>) -> Result<(), Self::Error>;
}
/// Generic abstraction for an event receiver.
pub trait EventReceiveProvider<Event: GenericEvent, ParamsProvider: Debug = Params> {
type Error;
/// This function has to be provided by any event receiver. A call may or may not return
/// an event and optional auxiliary data.
fn try_recv_event(&self) -> Result<Option<EventMessage<Event, ParamsProvider>>, Self::Error>;
}
pub trait ListenerMapProvider {
#[cfg(feature = "alloc")]
fn get_listeners(&self) -> alloc::vec::Vec<ListenerKey>;
fn contains_listener(&self, key: &ListenerKey) -> bool;
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<'_, ComponentId>>;
fn add_listener(&mut self, key: ListenerKey, listener_id: ComponentId) -> bool;
fn remove_duplicates(&mut self, key: &ListenerKey);
}
pub trait SenderMapProvider<
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent = EventU32,
ParamProvider: Debug = Params,
>
{
fn contains_send_event_provider(&self, target_id: &ComponentId) -> bool;
fn get_send_event_provider(&self, target_id: &ComponentId) -> Option<&EventSender>;
fn add_send_event_provider(&mut self, send_provider: EventSender) -> bool;
}
/// Generic event manager implementation.
///
/// # Generics
///
/// * `EventReceiver`: [EventReceiveProvider] used to receive all events.
/// * `SenderMap`: [SenderMapProvider] which maps channel IDs to send providers.
/// * `ListenerMap`: [ListenerMapProvider] which maps listener keys to channel IDs.
/// * `EventSender`: [EventSendProvider] contained within the sender map which sends the events.
/// * `Event`: The event type. This type must implement the [GenericEvent]. Currently only [EventU32]
/// and [EventU16] are supported.
/// * `ParamProvider`: Auxiliary data which is sent with the event to provide optional context
/// information
pub struct EventManager<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSender, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent = EventU32,
ParamProvider: Debug = Params,
> {
event_receiver: EventReceiver,
sender_map: SenderMap,
listener_map: ListenerMap,
phantom: core::marker::PhantomData<(EventSender, Event, ParamProvider)>,
}
#[derive(Debug)]
pub enum EventRoutingResult<Event: GenericEvent, ParamProvider: Debug> {
/// No event was received
Empty,
/// An event was received and routed to listeners.
Handled {
num_recipients: u32,
event_msg: EventMessage<Event, ParamProvider>,
},
}
#[derive(Debug)]
pub enum EventRoutingError {
Send(GenericSendError),
NoSendersForKey(ListenerKey),
NoSenderForId(ComponentId),
}
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSender, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent + Copy,
ParamProvider: Debug,
> EventManager<EventReceiver, SenderMap, ListenerMap, EventSender, Event, ParamProvider>
{
pub fn remove_duplicates(&mut self, key: &ListenerKey) {
self.listener_map.remove_duplicates(key)
}
/// Subscribe for a unique event.
pub fn subscribe_single(&mut self, event: &Event, sender_id: ComponentId) {
self.update_listeners(ListenerKey::Single(event.raw_as_largest_type()), sender_id);
}
/// Subscribe for an event group.
pub fn subscribe_group(&mut self, group_id: LargestGroupIdRaw, sender_id: ComponentId) {
self.update_listeners(ListenerKey::Group(group_id), sender_id);
}
/// Subscribe for all events received by the manager.
///
/// For example, this can be useful for a handler component which sends every event as
/// a telemetry packet.
pub fn subscribe_all(&mut self, sender_id: ComponentId) {
self.update_listeners(ListenerKey::All, sender_id);
}
}
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSenderMap, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSenderMap: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent + Copy,
ParamProvider: Debug,
> EventManager<EventReceiver, SenderMap, ListenerMap, EventSenderMap, Event, ParamProvider>
{
pub fn new_with_custom_maps(
event_receiver: EventReceiver,
sender_map: SenderMap,
listener_map: ListenerMap,
) -> Self {
EventManager {
listener_map,
sender_map,
event_receiver,
phantom: PhantomData,
}
}
/// Add a new sender component which can be used to send events to subscribers.
pub fn add_sender(&mut self, send_provider: EventSenderMap) {
if !self
.sender_map
.contains_send_event_provider(&send_provider.target_id())
{
self.sender_map.add_send_event_provider(send_provider);
}
}
/// Generic function to update the event subscribers.
fn update_listeners(&mut self, key: ListenerKey, sender_id: ComponentId) {
self.listener_map.add_listener(key, sender_id);
}
}
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
SenderMap: SenderMapProvider<EventSenderMap, Event, ParamProvider>,
ListenerMap: ListenerMapProvider,
EventSenderMap: EventSendProvider<Event, ParamProvider, Error = GenericSendError>,
Event: GenericEvent + Copy,
ParamProvider: Clone + Debug,
> EventManager<EventReceiver, SenderMap, ListenerMap, EventSenderMap, Event, ParamProvider>
{
/// This function will use the cached event receiver and try to receive one event.
/// If an event was received, it will try to route that event to all subscribed event listeners.
/// If this works without any issues, the [EventRoutingResult] will contain context information
/// about the routed event.
///
/// If an error occurs during the routing, the error handler will be called. The error handler
/// should take a reference to the event message as the first argument, and the routing error
/// as the second argument.
pub fn try_event_handling<E: FnMut(&EventMessage<Event, ParamProvider>, EventRoutingError)>(
&self,
mut error_handler: E,
) -> EventRoutingResult<Event, ParamProvider> {
let mut num_recipients = 0;
let mut send_handler =
|key: &ListenerKey, event_msg: &EventMessage<Event, ParamProvider>| {
if self.listener_map.contains_listener(key) {
if let Some(ids) = self.listener_map.get_listener_ids(key) {
for id in ids {
if let Some(sender) = self.sender_map.get_send_event_provider(id) {
if let Err(e) = sender.send(EventMessage::new_generic(
event_msg.sender_id,
event_msg.event,
event_msg.params.as_ref(),
)) {
error_handler(event_msg, EventRoutingError::Send(e));
} else {
num_recipients += 1;
}
} else {
error_handler(event_msg, EventRoutingError::NoSenderForId(*id));
}
}
} else {
error_handler(event_msg, EventRoutingError::NoSendersForKey(*key));
}
}
};
if let Ok(Some(event_msg)) = self.event_receiver.try_recv_event() {
let single_key = ListenerKey::Single(event_msg.event.raw_as_largest_type());
send_handler(&single_key, &event_msg);
let group_key = ListenerKey::Group(event_msg.event.group_id_as_largest_type());
send_handler(&group_key, &event_msg);
send_handler(&ListenerKey::All, &event_msg);
return EventRoutingResult::Handled {
num_recipients,
event_msg,
};
}
EventRoutingResult::Empty
}
}
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use alloc::vec::Vec;
use hashbrown::HashMap;
use super::*;
/// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap]
/// and the [DefaultListenerMap]. It uses regular mpsc channels as the message queue backend.
pub type EventManagerWithMpsc<Event = EventU32, ParamProvider = Params> = EventManager<
EventU32ReceiverMpsc<ParamProvider>,
DefaultSenderMap<EventSenderMpsc<Event>, Event, ParamProvider>,
DefaultListenerMap,
EventSenderMpsc<Event>,
>;
/// Helper type which constrains the sender map and listener map generics to the [DefaultSenderMap]
/// and the [DefaultListenerMap]. It uses
/// [bounded mpsc senders](https://doc.rust-lang.org/std/sync/mpsc/struct.SyncSender.html) as the
/// message queue backend.
pub type EventManagerWithBoundedMpsc<Event = EventU32, ParamProvider = Params> = EventManager<
EventU32ReceiverMpsc<ParamProvider>,
DefaultSenderMap<EventSenderMpscBounded<Event>, Event, ParamProvider>,
DefaultListenerMap,
EventSenderMpscBounded<Event>,
>;
impl<
EventReceiver: EventReceiveProvider<Event, ParamProvider>,
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent + Copy,
ParamProvider: 'static + Debug,
>
EventManager<
EventReceiver,
DefaultSenderMap<EventSender, Event, ParamProvider>,
DefaultListenerMap,
EventSender,
Event,
ParamProvider,
>
{
/// Create an event manager where the sender table will be the [DefaultSenderMap]
/// and the listener table will be the [DefaultListenerMap].
pub fn new(event_receiver: EventReceiver) -> Self {
Self {
listener_map: DefaultListenerMap::default(),
sender_map: DefaultSenderMap::default(),
event_receiver,
phantom: PhantomData,
}
}
}
/// Default listener map.
///
/// Simple implementation which uses a [HashMap] and a [Vec] internally.
#[derive(Default)]
pub struct DefaultListenerMap {
listeners: HashMap<ListenerKey, Vec<ComponentId>>,
}
impl ListenerMapProvider for DefaultListenerMap {
fn get_listeners(&self) -> Vec<ListenerKey> {
let mut key_list = Vec::new();
for key in self.listeners.keys() {
key_list.push(*key);
}
key_list
}
fn contains_listener(&self, key: &ListenerKey) -> bool {
self.listeners.contains_key(key)
}
fn get_listener_ids(&self, key: &ListenerKey) -> Option<Iter<'_, ComponentId>> {
self.listeners.get(key).map(|vec| vec.iter())
}
fn add_listener(&mut self, key: ListenerKey, sender_id: ComponentId) -> bool {
if let Some(existing_list) = self.listeners.get_mut(&key) {
existing_list.push(sender_id);
} else {
let new_list = alloc::vec![sender_id];
self.listeners.insert(key, new_list);
}
true
}
fn remove_duplicates(&mut self, key: &ListenerKey) {
if let Some(list) = self.listeners.get_mut(key) {
list.sort_unstable();
list.dedup();
}
}
}
/// Default sender map.
///
/// Simple implementation which uses a [HashMap] internally.
pub struct DefaultSenderMap<
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent = EventU32,
ParamProvider: Debug = Params,
> {
senders: HashMap<ComponentId, EventSender>,
phantom: PhantomData<(Event, ParamProvider)>,
}
impl<
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent,
ParamProvider: Debug,
> Default for DefaultSenderMap<EventSender, Event, ParamProvider>
{
fn default() -> Self {
Self {
senders: Default::default(),
phantom: Default::default(),
}
}
}
impl<
EventSender: EventSendProvider<Event, ParamProvider>,
Event: GenericEvent,
ParamProvider: Debug,
> SenderMapProvider<EventSender, Event, ParamProvider>
for DefaultSenderMap<EventSender, Event, ParamProvider>
{
fn contains_send_event_provider(&self, id: &ComponentId) -> bool {
self.senders.contains_key(id)
}
fn get_send_event_provider(&self, id: &ComponentId) -> Option<&EventSender> {
self.senders
.get(id)
.filter(|sender| sender.target_id() == *id)
}
fn add_send_event_provider(&mut self, send_provider: EventSender) -> bool {
let id = send_provider.target_id();
if self.senders.contains_key(&id) {
return false;
}
self.senders.insert(id, send_provider).is_none()
}
}
}
#[cfg(feature = "std")]
pub mod std_mod {
use crate::queue::GenericReceiveError;
use super::*;
use std::sync::mpsc;
impl<Event: GenericEvent + Send, ParamProvider: Debug>
EventReceiveProvider<Event, ParamProvider>
for mpsc::Receiver<EventMessage<Event, ParamProvider>>
{
type Error = GenericReceiveError;
fn try_recv_event(
&self,
) -> Result<Option<EventMessage<Event, ParamProvider>>, Self::Error> {
match self.try_recv() {
Ok(msg) => Ok(Some(msg)),
Err(e) => match e {
mpsc::TryRecvError::Empty => Ok(None),
mpsc::TryRecvError::Disconnected => {
Err(GenericReceiveError::TxDisconnected(None))
}
},
}
}
}
pub type EventU32ReceiverMpsc<ParamProvider = Params> =
mpsc::Receiver<EventMessage<EventU32, ParamProvider>>;
pub type EventU16ReceiverMpsc<ParamProvider = Params> =
mpsc::Receiver<EventMessage<EventU16, ParamProvider>>;
/// Generic event sender which uses a regular [mpsc::Sender] as the messaging backend to
/// send events.
#[derive(Clone)]
pub struct EventSenderMpsc<Event: GenericEvent + Send> {
target_id: ComponentId,
sender: mpsc::Sender<EventMessage<Event>>,
}
impl<Event: GenericEvent + Send> EventSenderMpsc<Event> {
pub fn new(target_id: ComponentId, sender: mpsc::Sender<EventMessage<Event>>) -> Self {
Self { target_id, sender }
}
}
impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpsc<Event> {
type Error = GenericSendError;
fn target_id(&self) -> ComponentId {
self.target_id
}
fn send(&self, event_msg: EventMessage<Event>) -> Result<(), GenericSendError> {
self.sender
.send(event_msg)
.map_err(|_| GenericSendError::RxDisconnected)
}
}
/// Generic event sender which uses the [mpsc::SyncSender] as the messaging backend to send
/// events. This has the advantage that the channel is bounded and thus more deterministic.
#[derive(Clone)]
pub struct EventSenderMpscBounded<Event: GenericEvent + Send> {
target_id: ComponentId,
sender: mpsc::SyncSender<EventMessage<Event>>,
capacity: usize,
}
impl<Event: GenericEvent + Send> EventSenderMpscBounded<Event> {
pub fn new(
target_id: ComponentId,
sender: mpsc::SyncSender<EventMessage<Event>>,
capacity: usize,
) -> Self {
Self {
target_id,
sender,
capacity,
}
}
}
impl<Event: GenericEvent + Send> EventSendProvider<Event> for EventSenderMpscBounded<Event> {
type Error = GenericSendError;
fn target_id(&self) -> ComponentId {
self.target_id
}
fn send(&self, event_msg: EventMessage<Event>) -> Result<(), Self::Error> {
if let Err(e) = self.sender.try_send(event_msg) {
return match e {
mpsc::TrySendError::Full(_) => {
Err(GenericSendError::QueueFull(Some(self.capacity as u32)))
}
mpsc::TrySendError::Disconnected(_) => Err(GenericSendError::RxDisconnected),
};
}
Ok(())
}
}
pub type EventU32SenderMpsc = EventSenderMpsc<EventU32>;
pub type EventU16SenderMpsc = EventSenderMpsc<EventU16>;
pub type EventU32SenderMpscBounded = EventSenderMpscBounded<EventU32>;
pub type EventU16SenderMpscBounded = EventSenderMpscBounded<EventU16>;
}
#[cfg(test)]
mod tests {
use super::*;
use crate::event_man_legacy::EventManager;
use crate::events_legacy::{EventU32, GenericEvent, Severity};
use crate::params::{ParamsHeapless, ParamsRaw};
use crate::pus::test_util::{TEST_COMPONENT_ID_0, TEST_COMPONENT_ID_1};
use std::format;
use std::sync::mpsc::{self};
const TEST_EVENT: EventU32 = EventU32::new(Severity::Info, 0, 5);
fn check_next_event(
expected: EventU32,
receiver: &mpsc::Receiver<EventMessageU32>,
) -> Option<Params> {
if let Ok(event_msg) = receiver.try_recv() {
assert_eq!(event_msg.event, expected);
return event_msg.params;
}
None
}
fn check_handled_event(
res: EventRoutingResult<EventU32, Params>,
expected: EventU32,
expected_num_sent: u32,
expected_sender_id: ComponentId,
) {
assert!(matches!(res, EventRoutingResult::Handled { .. }));
if let EventRoutingResult::Handled {
num_recipients,
event_msg,
} = res
{
assert_eq!(event_msg.event, expected);
assert_eq!(event_msg.sender_id, expected_sender_id);
assert_eq!(num_recipients, expected_num_sent);
}
}
fn generic_event_man() -> (mpsc::Sender<EventMessageU32>, EventManagerWithMpsc) {
let (event_sender, event_receiver) = mpsc::channel();
(event_sender, EventManager::new(event_receiver))
}
#[test]
fn test_basic() {
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let event_grp_1_0 = EventU32::new(Severity::High, 1, 0);
let (single_event_sender, single_event_receiver) = mpsc::channel();
let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.target_id());
event_man.add_sender(single_event_listener);
let (group_event_sender_0, group_event_receiver_0) = mpsc::channel();
let group_event_listener = EventU32SenderMpsc::new(1, group_event_sender_0);
event_man.subscribe_group(event_grp_1_0.group_id(), group_event_listener.target_id());
event_man.add_sender(group_event_listener);
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
// Test event with one listener
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0))
.expect("Sending single error failed");
let res = event_man.try_event_handling(&error_handler);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
check_next_event(event_grp_0, &single_event_receiver);
// Test event which is sent to all group listeners
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0))
.expect("Sending group error failed");
let res = event_man.try_event_handling(&error_handler);
check_handled_event(res, event_grp_1_0, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_grp_1_0, &group_event_receiver_0);
}
#[test]
fn test_with_basic_params() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man();
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let (single_event_sender, single_event_receiver) = mpsc::channel();
let single_event_listener = EventSenderMpsc::new(0, single_event_sender);
event_man.subscribe_single(&event_grp_0, single_event_listener.target_id());
event_man.add_sender(single_event_listener);
event_sender
.send(EventMessage::new_with_params(
TEST_COMPONENT_ID_0.id(),
event_grp_0,
&Params::Heapless((2_u32, 3_u32).into()),
))
.expect("Sending group error failed");
let res = event_man.try_event_handling(&error_handler);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
let aux = check_next_event(event_grp_0, &single_event_receiver);
assert!(aux.is_some());
let aux = aux.unwrap();
if let Params::Heapless(ParamsHeapless::Raw(ParamsRaw::U32Pair(pair))) = aux {
assert_eq!(pair.0, 2);
assert_eq!(pair.1, 3);
} else {
panic!("{}", format!("Unexpected auxiliary value type {:?}", aux));
}
}
/// Test listening for multiple groups
#[test]
fn test_multi_group() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man();
let res = event_man.try_event_handling(error_handler);
assert!(matches!(res, EventRoutingResult::Empty));
let event_grp_0 = EventU32::new(Severity::Info, 0, 0);
let event_grp_1_0 = EventU32::new(Severity::High, 1, 0);
let (event_grp_0_sender, event_grp_0_receiver) = mpsc::channel();
let event_grp_0_and_1_listener = EventU32SenderMpsc::new(0, event_grp_0_sender);
event_man.subscribe_group(
event_grp_0.group_id(),
event_grp_0_and_1_listener.target_id(),
);
event_man.subscribe_group(
event_grp_1_0.group_id(),
event_grp_0_and_1_listener.target_id(),
);
event_man.add_sender(event_grp_0_and_1_listener);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_grp_0))
.expect("Sending Event Group 0 failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_grp_1_0))
.expect("Sendign Event Group 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_grp_0, 1, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_grp_1_0, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_grp_0, &event_grp_0_receiver);
check_next_event(event_grp_1_0, &event_grp_0_receiver);
}
/// Test listening to the same event from multiple listeners. Also test listening
/// to both group and single events from one listener
#[test]
fn test_listening_to_same_event_and_multi_type() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, mut event_man) = generic_event_man();
let event_0 = EventU32::new(Severity::Info, 0, 5);
let event_1 = EventU32::new(Severity::High, 1, 0);
let (event_0_tx_0, event_0_rx_0) = mpsc::channel();
let (event_0_tx_1, event_0_rx_1) = mpsc::channel();
let event_listener_0 = EventU32SenderMpsc::new(0, event_0_tx_0);
let event_listener_1 = EventU32SenderMpsc::new(1, event_0_tx_1);
let event_listener_0_sender_id = event_listener_0.target_id();
event_man.subscribe_single(&event_0, event_listener_0_sender_id);
event_man.add_sender(event_listener_0);
let event_listener_1_sender_id = event_listener_1.target_id();
event_man.subscribe_single(&event_0, event_listener_1_sender_id);
event_man.add_sender(event_listener_1);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering Event 0 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 2, TEST_COMPONENT_ID_0.id());
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_0, &event_0_rx_1);
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering Event 0 failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1))
.expect("Triggering Event 1 failed");
// 3 Events messages will be sent now
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 2, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_1.id());
// Both the single event and the group event should arrive now
check_next_event(event_0, &event_0_rx_0);
check_next_event(event_1, &event_0_rx_0);
// Do double insertion and then remove duplicates
event_man.subscribe_group(event_1.group_id(), event_listener_0_sender_id);
event_man.remove_duplicates(&ListenerKey::Group(event_1.group_id()));
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_1))
.expect("Triggering Event 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_0.id());
}
#[test]
fn test_all_events_listener() {
let error_handler = |event_msg: &EventMessageU32, e: EventRoutingError| {
panic!("routing error occurred for event {:?}: {:?}", event_msg, e);
};
let (event_sender, event_receiver) = mpsc::channel();
let mut event_man = EventManagerWithMpsc::new(event_receiver);
let event_0 = EventU32::new(Severity::Info, 0, 5);
let event_1 = EventU32::new(Severity::High, 1, 0);
let (event_0_tx_0, all_events_rx) = mpsc::channel();
let all_events_listener = EventU32SenderMpsc::new(0, event_0_tx_0);
event_man.subscribe_all(all_events_listener.target_id());
event_man.add_sender(all_events_listener);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), event_0))
.expect("Triggering event 0 failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_1.id(), event_1))
.expect("Triggering event 1 failed");
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_0, 1, TEST_COMPONENT_ID_0.id());
let res = event_man.try_event_handling(error_handler);
check_handled_event(res, event_1, 1, TEST_COMPONENT_ID_1.id());
check_next_event(event_0, &all_events_rx);
check_next_event(event_1, &all_events_rx);
}
#[test]
fn test_bounded_event_sender_queue_full() {
let (event_sender, _event_receiver) = mpsc::sync_channel(3);
let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3);
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed");
event_sender
.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT))
.expect("sending test event failed");
let error = event_sender.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT));
if let Err(e) = error {
assert!(matches!(e, GenericSendError::QueueFull(Some(3))));
} else {
panic!("unexpected error {error:?}");
}
}
#[test]
fn test_bounded_event_sender_rx_dropped() {
let (event_sender, event_receiver) = mpsc::sync_channel(3);
let event_sender = EventU32SenderMpscBounded::new(1, event_sender, 3);
drop(event_receiver);
if let Err(e) = event_sender.send(EventMessage::new(TEST_COMPONENT_ID_0.id(), TEST_EVENT)) {
assert!(matches!(e, GenericSendError::RxDisconnected));
} else {
panic!("Expected error");
}
}
}

View File

@@ -1,8 +1,13 @@
//! Event support module
//! # Event support module
//!
//! This module includes the basic event structs [EventU32] and [EventU16] and versions with the
//! ECSS severity levels as a type parameter. These structs are simple abstractions on top of the
//! [u32] and [u16] types where the raw value is the unique identifier for a particular event.
//! The user can define events as custom structs or enumerations.
//! The event structures defined here are type erased and rely on some properties which
//! should be provided by the user through the [Event] and [serde::Serialize] trait.
//!
//! This in turn allows to use higher-level abstractions like the event manger.
//!
//! This module includes the basic type erased event structs [EventErasedAlloc] and
//! [EventErasedHeapless].
//! The abstraction also allows to group related events using a group ID, and the severity
//! of an event is encoded inside the raw value itself with four possible [Severity] levels:
//!
@@ -10,29 +15,11 @@
//! - LOW
//! - MEDIUM
//! - HIGH
//!
//! All event structs implement the [EcssEnumeration] trait and can be created as constants.
//! This allows to easily create a static list of constant events which can then be used to generate
//! event telemetry using the PUS event manager modules.
//!
//! # Examples
//!
//! ```
//! use satrs::events::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
//!
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(1, 0);
//! const MSG_FAILED: EventU32 = EventU32::new(Severity::Low, 1, 1);
//!
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(2, 0);
//!
//! let small_event = EventU16::new(Severity::Info, 3, 0);
//! ```
use core::fmt::Debug;
use core::hash::Hash;
use core::marker::PhantomData;
use delegate::delegate;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::util::{ToBeBytes, UnsignedEnum};
use arbitrary_int::{prelude::*, u14};
#[cfg(feature = "heapless")]
use spacepackets::ByteConversionError;
/// Using a type definition allows to change this to u64 in the future more easily
@@ -40,12 +27,14 @@ pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily
pub type LargestGroupIdRaw = u16;
pub const MAX_GROUP_ID_U32_EVENT: u16 = 2_u16.pow(14) - 1;
pub const MAX_GROUP_ID_U16_EVENT: u16 = 2_u16.pow(6) - 1;
pub const MAX_GROUP_ID_U32_EVENT: u16 = u14::MAX.value();
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[derive(
Copy, Clone, PartialEq, Eq, Debug, Hash, num_enum::TryFromPrimitive, num_enum::IntoPrimitive,
)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Severity {
Info = 0,
Low = 1,
@@ -57,801 +46,203 @@ pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
const SEVERITY: Severity;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityInfo {}
impl HasSeverity for SeverityInfo {
const SEVERITY: Severity = Severity::Info;
pub trait Event: Clone {
fn id(&self) -> EventId;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityLow {}
impl HasSeverity for SeverityLow {
const SEVERITY: Severity = Severity::Low;
}
pub type GroupId = u14;
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityMedium {}
impl HasSeverity for SeverityMedium {
const SEVERITY: Severity = Severity::Medium;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityHigh {}
impl HasSeverity for SeverityHigh {
const SEVERITY: Severity = Severity::High;
}
pub trait GenericEvent: EcssEnumeration + Copy + Clone {
type Raw;
type GroupId;
type UniqueId;
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
}
impl TryFrom<u8> for Severity {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == Severity::Info as u8 => Ok(Severity::Info),
x if x == Severity::Low as u8 => Ok(Severity::Low),
x if x == Severity::Medium as u8 => Ok(Severity::Medium),
x if x == Severity::High as u8 => Ok(Severity::High),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
/// Unique event identifier.
///
/// Consists of a group ID, a unique ID and the severity.
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
struct EventBase<Raw, GroupId, UniqueId> {
severity: Severity,
pub struct EventId {
group_id: GroupId,
unique_id: UniqueId,
phantom: PhantomData<Raw>,
unique_id: u16,
severity: Severity,
}
impl<Raw: ToBeBytes, GroupId, UniqueId> EventBase<Raw, GroupId, UniqueId> {
fn write_to_bytes(
&self,
raw: Raw,
buf: &mut [u8],
width: usize,
) -> Result<usize, ByteConversionError> {
if buf.len() < width {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: width,
});
impl EventId {
pub fn new(severity: Severity, group_id: u14, unique_id: u16) -> Self {
Self {
severity,
group_id,
unique_id,
}
buf.copy_from_slice(raw.to_be_bytes().as_ref());
Ok(raw.written_len())
}
}
impl EventBase<u32, u16, u16> {
#[inline]
fn raw(&self) -> u32 {
((self.severity as u32) << 30) | ((self.group_id as u32) << 16) | self.unique_id as u32
}
}
impl EventBase<u16, u8, u8> {
#[inline]
fn raw(&self) -> u16 {
((self.severity as u16) << 14) | ((self.group_id as u16) << 8) | self.unique_id as u16
}
}
impl<RAW, GID, UID> EventBase<RAW, GID, UID> {
#[inline]
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, GID> EventBase<RAW, GID, u16> {
#[inline]
pub fn unique_id(&self) -> u16 {
self.unique_id
}
}
impl<RAW, GID> EventBase<RAW, GID, u8> {
#[inline]
pub fn unique_id(&self) -> u8 {
self.unique_id
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, UID> EventBase<RAW, u16, UID> {
#[inline]
pub fn group_id(&self) -> u16 {
pub fn group_id(&self) -> u14 {
self.group_id
}
}
impl<RAW, UID> EventBase<RAW, u8, UID> {
#[inline]
pub fn group_id(&self) -> u8 {
self.group_id
pub fn raw(&self) -> u32 {
((self.severity as u32) << 30)
| ((self.group_id.as_u16() as u32) << 16)
| (self.unique_id as u32)
}
}
macro_rules! event_provider_impl {
() => {
#[inline]
fn raw(&self) -> Self::Raw {
self.base.raw()
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
#[inline]
fn severity(&self) -> Severity {
self.base.severity()
}
#[inline]
fn group_id(&self) -> Self::GroupId {
self.base.group_id()
}
#[inline]
fn unique_id(&self) -> Self::UniqueId {
self.base.unique_id()
}
};
}
macro_rules! impl_event_provider {
($BaseIdent: ident, $TypedIdent: ident, $raw: ty, $gid: ty, $uid: ty) => {
impl GenericEvent for $BaseIdent {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
event_provider_impl!();
fn raw_as_largest_type(&self) -> LargestEventRaw {
self.raw().into()
}
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id().into()
}
}
impl<SEVERITY: HasSeverity> GenericEvent for $TypedIdent<SEVERITY> {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
delegate!(to self.event {
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
});
}
}
}
macro_rules! try_from_impls {
($SevIdent: ident, $severity: path, $raw: ty, $TypedSevIdent: ident) => {
impl TryFrom<$raw> for $TypedSevIdent<$SevIdent> {
type Error = Severity;
fn try_from(raw: $raw) -> Result<Self, Self::Error> {
Self::try_from_generic($severity, raw)
}
}
};
}
macro_rules! const_from_fn {
($from_fn_name: ident, $TypedIdent: ident, $SevIdent: ident) => {
pub const fn $from_fn_name(event: $TypedIdent<$SevIdent>) -> Self {
Self {
base: event.event.base,
}
}
};
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct EventU32 {
base: EventBase<u32, u16, u16>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU32TypedSev<SEVERITY> {
event: EventU32,
phantom: PhantomData<SEVERITY>,
}
impl<SEVERITY: HasSeverity> From<EventU32TypedSev<SEVERITY>> for EventU32 {
fn from(e: EventU32TypedSev<SEVERITY>) -> Self {
Self { base: e.event.base }
}
}
impl<Severity: HasSeverity> AsRef<EventU32> for EventU32TypedSev<Severity> {
fn as_ref(&self) -> &EventU32 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU32> for EventU32TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU32 {
&mut self.event
}
}
impl_event_provider!(EventU32, EventU32TypedSev, u32, u16, u16);
impl EventU32 {
/// Generate an event. The raw representation of an event has 32 bits.
/// If the passed group ID is invalid (too large), None wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > MAX_GROUP_ID_U32_EVENT {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
})
}
/// This constructor will panic if the passed group is is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > MAX_GROUP_ID_U32_EVENT {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
pub fn from_be_bytes(bytes: [u8; 4]) -> Self {
Self::from(u32::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
}
impl From<u32> for EventU32 {
impl From<u32> for EventId {
fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
let group_id = ((raw >> 16) & 0x3FFF) as u16;
let group_id = u14::new(((raw >> 16) & 0x3FFF) as u16);
let unique_id = (raw & 0xFFFF) as u16;
// Sanitized input, should never fail
Self::new(severity, group_id, unique_id)
}
}
impl UnsignedEnum for EventU32 {
fn size(&self) -> usize {
core::mem::size_of::<u32>()
/// Event which was type erased and serialized into a [alloc::vec::Vec].
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg(feature = "alloc")]
pub struct EventErasedAlloc {
id: EventId,
event_raw: alloc::vec::Vec<u8>,
}
#[cfg(feature = "alloc")]
impl EventErasedAlloc {
#[cfg(feature = "serde")]
/// Creates a new event by serializing the given event using [postcard].
pub fn new(event: &(impl serde::Serialize + Event)) -> Self {
Self {
id: event.id(),
event_raw: postcard::to_allocvec(event).unwrap(),
}
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
pub fn new_with_raw_event(id: EventId, event_raw: &[u8]) -> Self {
Self {
id,
event_raw: event_raw.to_vec(),
}
}
fn value(&self) -> u64 {
self.raw().into()
#[inline]
pub fn raw(&self) -> &[u8] {
&self.event_raw
}
}
impl EcssEnumeration for EventU32 {
fn pfc(&self) -> u8 {
u32::BITS as u8
#[cfg(feature = "serde")]
#[cfg(feature = "alloc")]
impl<T: serde::Serialize + Event> From<T> for EventErasedAlloc {
fn from(event: T) -> Self {
Self::new(&event)
}
}
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
#[cfg(feature = "alloc")]
impl Event for EventErasedAlloc {
fn id(&self) -> EventId {
self.id
}
}
/// Event which was type erased and serialized into a [heapless::vec::Vec].
#[derive(Debug, Clone, PartialEq, Eq)]
#[cfg(feature = "heapless")]
pub struct EventErasedHeapless<const N: usize> {
id: EventId,
event_raw: heapless::vec::Vec<u8, N>,
}
#[cfg(feature = "heapless")]
impl<const N: usize> Event for EventErasedHeapless<N> {
fn id(&self) -> EventId {
self.id
}
}
#[cfg(feature = "heapless")]
impl<const N: usize> EventErasedHeapless<N> {
#[cfg(feature = "serde")]
/// Creates a new event by serializing the given event using [postcard].
pub fn new(event: &(impl serde::Serialize + Event)) -> Result<Self, ByteConversionError> {
let ser_size = postcard::experimental::serialized_size(event).unwrap();
if ser_size > N {
return Err(ByteConversionError::ToSliceTooSmall {
found: N,
expected: ser_size,
});
}
let mut vec = heapless::Vec::<u8, N>::new();
vec.resize(N, 0).unwrap();
postcard::to_slice(event, vec.as_mut_slice()).unwrap();
Ok(Self {
id: event.id(),
event_raw: vec,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
pub fn new_with_raw_event(id: EventId, event_raw: heapless::Vec<u8, N>) -> Self {
Self { id, event_raw }
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
try_from_impls!(SeverityInfo, Severity::Info, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u32, EventU32TypedSev);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EventU16 {
base: EventBase<u16, u8, u8>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU16TypedSev<SEVERITY> {
event: EventU16,
phantom: PhantomData<SEVERITY>,
}
impl<Severity: HasSeverity> AsRef<EventU16> for EventU16TypedSev<Severity> {
fn as_ref(&self) -> &EventU16 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU16> for EventU16TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU16 {
&mut self.event
}
}
impl EventU16 {
/// Generate a small event. The raw representation of a small event has 16 bits.
/// If the passed group ID is invalid (too large), [None] wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > (2u8.pow(6) - 1) {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: Default::default(),
},
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > (2u8.pow(6) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
}
impl From<u16> for EventU16 {
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::new(severity, group_id, unique_id)
}
}
impl UnsignedEnum for EventU16 {
fn size(&self) -> usize {
core::mem::size_of::<u16>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
}
fn value(&self) -> u64 {
self.raw().into()
}
}
impl EcssEnumeration for EventU16 {
#[inline]
fn pfc(&self) -> u8 {
u16::BITS as u8
}
}
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(((raw >> 8) & 0x3F) as u8, (raw & 0xFF) as u8))
}
}
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value(&self) -> u64;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
try_from_impls!(SeverityInfo, Severity::Info, u16, EventU16TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u16, EventU16TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u16, EventU16TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u16, EventU16TypedSev);
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU32) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU32TypedSev<Severity>> for EventU32 {
#[inline]
fn eq(&self, other: &EventU32TypedSev<Severity>) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16> for EventU16TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU16) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16TypedSev<Severity>> for EventU16 {
#[inline]
fn eq(&self, other: &EventU16TypedSev<Severity>) -> bool {
self.raw() == other.raw()
pub fn raw(&self) -> &[u8] {
&self.event_raw
}
}
#[cfg(test)]
mod tests {
use super::EventU32TypedSev;
use super::*;
use spacepackets::ByteConversionError;
use std::mem::size_of;
fn assert_size<T>(_: T, val: usize) {
assert_eq!(size_of::<T>(), val);
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub enum TestEvent {
Info,
ErrorOtherGroup,
}
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::new(0, 0);
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> = EventU16TypedSev::new(0x3F, 0xff);
/// This working is a test in itself.
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
#[test]
fn test_normal_from_raw_conversion() {
let conv_from_raw = EventU32TypedSev::<SeverityInfo>::try_from(INFO_EVENT.raw())
.expect("Creating typed EventU32 failed");
assert_eq!(conv_from_raw, INFO_EVENT);
}
#[test]
fn test_small_from_raw_conversion() {
let conv_from_raw = EventU16TypedSev::<SeverityInfo>::try_from(INFO_EVENT_SMALL.raw())
.expect("Creating typed EventU16 failed");
assert_eq!(conv_from_raw, INFO_EVENT_SMALL);
}
#[test]
fn verify_normal_size() {
assert_size(INFO_EVENT.raw(), 4)
}
#[test]
fn verify_small_size() {
assert_size(INFO_EVENT_SMALL.raw(), 2)
impl Event for TestEvent {
fn id(&self) -> EventId {
match self {
TestEvent::Info => EventId::new(Severity::Info, u14::new(0), 0),
TestEvent::ErrorOtherGroup => EventId::new(Severity::High, u14::new(1), 1),
}
}
}
#[test]
fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::Info);
assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw();
assert_eq!(TestEvent::Info.id().severity(), Severity::Info);
assert_eq!(TestEvent::Info.id().unique_id(), 0);
assert_eq!(TestEvent::Info.id().group_id().value(), 0);
assert_eq!(TestEvent::ErrorOtherGroup.id().group_id().value(), 1);
assert_eq!(TestEvent::ErrorOtherGroup.id().unique_id(), 1);
let raw_event = TestEvent::Info.id().raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn test_small_event_getters() {
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::Info);
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw();
assert_eq!(raw_event, 0x00000000);
#[cfg(feature = "serde")]
fn test_basic_erased_alloc_event() {
let event = EventErasedAlloc::new(&TestEvent::Info);
let test_event: TestEvent = postcard::from_bytes(event.raw()).unwrap();
assert_eq!(test_event, TestEvent::Info);
}
#[test]
fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw();
assert_eq!(raw_event, 0xFFFFFFFF);
}
#[test]
fn all_ones_event_small() {
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
assert_eq!(raw_event, 0xFFFF);
}
#[test]
fn invalid_group_id_normal() {
assert!(EventU32TypedSev::<SeverityMedium>::new_checked(2_u16.pow(14), 0).is_none());
}
#[test]
fn invalid_group_id_small() {
assert!(EventU16TypedSev::<SeverityMedium>::new_checked(2_u8.pow(6), 0).is_none());
}
#[test]
fn regular_new() {
assert_eq!(
EventU32TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT
);
}
#[test]
fn small_new() {
assert_eq!(
EventU16TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT_SMALL
);
}
#[test]
fn as_largest_type() {
let event_raw = HIGH_SEV_EVENT.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFFFFFF);
}
#[test]
fn as_largest_type_for_small_event() {
let event_raw = HIGH_SEV_EVENT_SMALL.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFF);
}
#[test]
fn as_largest_group_id() {
let group_id = HIGH_SEV_EVENT.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3FFF);
}
#[test]
fn as_largest_group_id_small_event() {
let group_id = HIGH_SEV_EVENT_SMALL.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3F);
}
#[test]
fn write_to_buf() {
let mut buf: [u8; 4] = [0; 4];
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF);
let event_read_back = EventU32::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT);
}
#[test]
fn write_to_buf_small() {
let mut buf: [u8; 2] = [0; 2];
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF);
let event_read_back = EventU16::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT_SMALL);
}
#[test]
fn write_to_buf_insufficient_buf() {
let mut buf: [u8; 3] = [0; 3];
let err = HIGH_SEV_EVENT.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
assert_eq!(expected, 4);
assert_eq!(found, 3);
}
}
#[test]
fn write_to_buf_small_insufficient_buf() {
let mut buf: [u8; 1] = [0; 1];
let err = HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
assert_eq!(expected, 2);
assert_eq!(found, 1);
}
}
#[test]
fn severity_from_invalid_raw_val() {
let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::High as u8 + 1;
assert!(Severity::try_from(invalid).is_err());
}
#[test]
fn reduction() {
let event = EventU32TypedSev::<SeverityInfo>::new(1, 1);
let raw = event.raw();
let reduced: EventU32 = event.into();
assert_eq!(reduced.group_id(), 1);
assert_eq!(reduced.unique_id(), 1);
assert_eq!(raw, reduced.raw());
}
#[test]
fn const_reducation() {
assert_eq!(INFO_REDUCED.raw(), INFO_EVENT.raw());
#[cfg(all(feature = "serde", feature = "alloc"))]
fn test_basic_erased_heapless_event() {
let event = EventErasedHeapless::<8>::new(&TestEvent::Info).unwrap();
let test_event: TestEvent = postcard::from_bytes(event.raw()).unwrap();
assert_eq!(test_event, TestEvent::Info);
}
}

859
satrs/src/events_legacy.rs Normal file
View File

@@ -0,0 +1,859 @@
//! # Event support module
//!
//! This is a legacy module. It is recommended to use [super::events] instead.
//!
//! This module includes the basic event structs [EventU32] and [EventU16] and versions with the
//! ECSS severity levels as a type parameter. These structs are simple abstractions on top of the
//! [u32] and [u16] types where the raw value is the unique identifier for a particular event.
//! The abstraction also allows to group related events using a group ID, and the severity
//! of an event is encoded inside the raw value itself with four possible [Severity] levels:
//!
//! - INFO
//! - LOW
//! - MEDIUM
//! - HIGH
//!
//! All event structs implement the [EcssEnumeration] trait and can be created as constants.
//! This allows to easily create a static list of constant events which can then be used to generate
//! event telemetry using the PUS event manager modules.
//!
//! # Examples
//!
//! ```
//! use satrs::events_legacy::{EventU16, EventU32, EventU32TypedSev, Severity, SeverityHigh, SeverityInfo};
//!
//! const MSG_RECVD: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(1, 0);
//! const MSG_FAILED: EventU32 = EventU32::new(Severity::Low, 1, 1);
//!
//! const TEMPERATURE_HIGH: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(2, 0);
//!
//! let small_event = EventU16::new(Severity::Info, 3, 0);
//! ```
use core::fmt::Debug;
use core::hash::Hash;
use core::marker::PhantomData;
use delegate::delegate;
use spacepackets::ByteConversionError;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::util::{ToBeBytes, UnsignedEnum};
/// Using a type definition allows to change this to u64 in the future more easily
pub type LargestEventRaw = u32;
/// Using a type definition allows to change this to u32 in the future more easily
pub type LargestGroupIdRaw = u16;
pub const MAX_GROUP_ID_U32_EVENT: u16 = 2_u16.pow(14) - 1;
pub const MAX_GROUP_ID_U16_EVENT: u16 = 2_u16.pow(6) - 1;
#[derive(Copy, Clone, PartialEq, Eq, Debug, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Severity {
Info = 0,
Low = 1,
Medium = 2,
High = 3,
}
pub trait HasSeverity: Debug + PartialEq + Eq + Copy + Clone {
const SEVERITY: Severity;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityInfo {}
impl HasSeverity for SeverityInfo {
const SEVERITY: Severity = Severity::Info;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityLow {}
impl HasSeverity for SeverityLow {
const SEVERITY: Severity = Severity::Low;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityMedium {}
impl HasSeverity for SeverityMedium {
const SEVERITY: Severity = Severity::Medium;
}
/// Type level support struct
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub struct SeverityHigh {}
impl HasSeverity for SeverityHigh {
const SEVERITY: Severity = Severity::High;
}
pub trait GenericEvent: EcssEnumeration + Copy + Clone {
type Raw;
type GroupId;
type UniqueId;
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
}
impl TryFrom<u8> for Severity {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
x if x == Severity::Info as u8 => Ok(Severity::Info),
x if x == Severity::Low as u8 => Ok(Severity::Low),
x if x == Severity::Medium as u8 => Ok(Severity::Medium),
x if x == Severity::High as u8 => Ok(Severity::High),
_ => Err(()),
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
struct EventBase<Raw, GroupId, UniqueId> {
severity: Severity,
group_id: GroupId,
unique_id: UniqueId,
phantom: PhantomData<Raw>,
}
impl<Raw: ToBeBytes, GroupId, UniqueId> EventBase<Raw, GroupId, UniqueId> {
fn write_to_bytes(
&self,
raw: Raw,
buf: &mut [u8],
width: usize,
) -> Result<usize, ByteConversionError> {
if buf.len() < width {
return Err(ByteConversionError::ToSliceTooSmall {
found: buf.len(),
expected: width,
});
}
buf.copy_from_slice(raw.to_be_bytes().as_ref());
Ok(raw.written_len())
}
}
impl EventBase<u32, u16, u16> {
#[inline]
fn raw(&self) -> u32 {
((self.severity as u32) << 30) | ((self.group_id as u32) << 16) | self.unique_id as u32
}
}
impl EventBase<u16, u8, u8> {
#[inline]
fn raw(&self) -> u16 {
((self.severity as u16) << 14) | ((self.group_id as u16) << 8) | self.unique_id as u16
}
}
impl<RAW, GID, UID> EventBase<RAW, GID, UID> {
#[inline]
pub fn severity(&self) -> Severity {
self.severity
}
}
impl<RAW, GID> EventBase<RAW, GID, u16> {
#[inline]
pub fn unique_id(&self) -> u16 {
self.unique_id
}
}
impl<RAW, GID> EventBase<RAW, GID, u8> {
#[inline]
pub fn unique_id(&self) -> u8 {
self.unique_id
}
}
impl<RAW, UID> EventBase<RAW, u16, UID> {
#[inline]
pub fn group_id(&self) -> u16 {
self.group_id
}
}
impl<RAW, UID> EventBase<RAW, u8, UID> {
#[inline]
pub fn group_id(&self) -> u8 {
self.group_id
}
}
macro_rules! event_provider_impl {
() => {
#[inline]
fn raw(&self) -> Self::Raw {
self.base.raw()
}
/// Retrieve the severity of an event. Returns None if that severity bit field of the raw event
/// ID is invalid
#[inline]
fn severity(&self) -> Severity {
self.base.severity()
}
#[inline]
fn group_id(&self) -> Self::GroupId {
self.base.group_id()
}
#[inline]
fn unique_id(&self) -> Self::UniqueId {
self.base.unique_id()
}
};
}
macro_rules! impl_event_provider {
($BaseIdent: ident, $TypedIdent: ident, $raw: ty, $gid: ty, $uid: ty) => {
impl GenericEvent for $BaseIdent {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
event_provider_impl!();
fn raw_as_largest_type(&self) -> LargestEventRaw {
self.raw().into()
}
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw {
self.group_id().into()
}
}
impl<SEVERITY: HasSeverity> GenericEvent for $TypedIdent<SEVERITY> {
type Raw = $raw;
type GroupId = $gid;
type UniqueId = $uid;
delegate!(to self.event {
fn raw(&self) -> Self::Raw;
fn severity(&self) -> Severity;
fn group_id(&self) -> Self::GroupId;
fn unique_id(&self) -> Self::UniqueId;
fn raw_as_largest_type(&self) -> LargestEventRaw;
fn group_id_as_largest_type(&self) -> LargestGroupIdRaw;
});
}
}
}
macro_rules! try_from_impls {
($SevIdent: ident, $severity: path, $raw: ty, $TypedSevIdent: ident) => {
impl TryFrom<$raw> for $TypedSevIdent<$SevIdent> {
type Error = Severity;
fn try_from(raw: $raw) -> Result<Self, Self::Error> {
Self::try_from_generic($severity, raw)
}
}
};
}
macro_rules! const_from_fn {
($from_fn_name: ident, $TypedIdent: ident, $SevIdent: ident) => {
pub const fn $from_fn_name(event: $TypedIdent<$SevIdent>) -> Self {
Self {
base: event.event.base,
}
}
};
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
pub struct EventU32 {
base: EventBase<u32, u16, u16>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU32TypedSev<SEVERITY> {
event: EventU32,
phantom: PhantomData<SEVERITY>,
}
impl<SEVERITY: HasSeverity> From<EventU32TypedSev<SEVERITY>> for EventU32 {
fn from(e: EventU32TypedSev<SEVERITY>) -> Self {
Self { base: e.event.base }
}
}
impl<Severity: HasSeverity> AsRef<EventU32> for EventU32TypedSev<Severity> {
fn as_ref(&self) -> &EventU32 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU32> for EventU32TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU32 {
&mut self.event
}
}
impl_event_provider!(EventU32, EventU32TypedSev, u32, u16, u16);
impl EventU32 {
/// Generate an event. The raw representation of an event has 32 bits.
/// If the passed group ID is invalid (too large), None wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 14 bits after the severity. Therefore, the size is limited by dec 16383 hex 0x3FFF.
/// * `unique_id`: Each event has a unique 16 bit ID occupying the last 16 bits of the
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > MAX_GROUP_ID_U32_EVENT {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
})
}
/// This constructor will panic if the passed group is is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > MAX_GROUP_ID_U32_EVENT {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
pub fn from_be_bytes(bytes: [u8; 4]) -> Self {
Self::from(u32::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU32TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU32TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU32TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU32TypedSev, SeverityHigh);
}
impl From<u32> for EventU32 {
fn from(raw: u32) -> Self {
// Severity conversion from u8 should never fail
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
let group_id = ((raw >> 16) & 0x3FFF) as u16;
let unique_id = (raw & 0xFFFF) as u16;
// Sanitized input, should never fail
Self::new(severity, group_id, unique_id)
}
}
impl UnsignedEnum for EventU32 {
fn size(&self) -> usize {
core::mem::size_of::<u32>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
}
fn value_raw(&self) -> u64 {
self.raw().into()
}
}
impl EcssEnumeration for EventU32 {
fn pfc(&self) -> u8 {
u32::BITS as u8
}
}
impl<SEVERITY: HasSeverity> EventU32TypedSev<SEVERITY> {
/// This is similar to [EventU32::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU32::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U32_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU32::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u32) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 30) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(
((raw >> 16) & 0x3FFF) as u16,
(raw & 0xFFFF) as u16,
))
}
}
try_from_impls!(SeverityInfo, Severity::Info, u32, EventU32TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u32, EventU32TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u32, EventU32TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u32, EventU32TypedSev);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value_raw(&self) -> u64;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU32TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct EventU16 {
base: EventBase<u16, u8, u8>,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct EventU16TypedSev<SEVERITY> {
event: EventU16,
phantom: PhantomData<SEVERITY>,
}
impl<Severity: HasSeverity> AsRef<EventU16> for EventU16TypedSev<Severity> {
fn as_ref(&self) -> &EventU16 {
&self.event
}
}
impl<Severity: HasSeverity> AsMut<EventU16> for EventU16TypedSev<Severity> {
fn as_mut(&mut self) -> &mut EventU16 {
&mut self.event
}
}
impl EventU16 {
/// Generate a small event. The raw representation of a small event has 16 bits.
/// If the passed group ID is invalid (too large), [None] wil be returned
///
/// # Parameter
///
/// * `severity`: Each event has a [severity][Severity]. The raw value of the severity will
/// be stored inside the uppermost 2 bits of the raw event ID
/// * `group_id`: Related events can be grouped using a group ID. The group ID will occupy the
/// next 6 bits after the severity. Therefore, the size is limited by dec 63 hex 0x3F.
/// * `unique_id`: Each event has a unique 8 bit ID occupying the last 8 bits of the
/// raw event ID
pub fn new_checked(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
if group_id > (2u8.pow(6) - 1) {
return None;
}
Some(Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: Default::default(),
},
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
severity: Severity,
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
if group_id > (2u8.pow(6) - 1) {
panic!("Group ID too large");
}
Self {
base: EventBase {
severity,
group_id,
unique_id,
phantom: PhantomData,
},
}
}
pub fn from_be_bytes(bytes: [u8; 2]) -> Self {
Self::from(u16::from_be_bytes(bytes))
}
const_from_fn!(const_from_info, EventU16TypedSev, SeverityInfo);
const_from_fn!(const_from_low, EventU16TypedSev, SeverityLow);
const_from_fn!(const_from_medium, EventU16TypedSev, SeverityMedium);
const_from_fn!(const_from_high, EventU16TypedSev, SeverityHigh);
}
impl From<u16> for EventU16 {
fn from(raw: <Self as GenericEvent>::Raw) -> Self {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
let group_id = ((raw >> 8) & 0x3F) as u8;
let unique_id = (raw & 0xFF) as u8;
// Sanitized input, new call should never fail
Self::new(severity, group_id, unique_id)
}
}
impl UnsignedEnum for EventU16 {
fn size(&self) -> usize {
core::mem::size_of::<u16>()
}
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError> {
self.base.write_to_bytes(self.raw(), buf, self.size())
}
fn value_raw(&self) -> u64 {
self.raw().into()
}
}
impl EcssEnumeration for EventU16 {
#[inline]
fn pfc(&self) -> u8 {
u16::BITS as u8
}
}
impl<SEVERITY: HasSeverity> EventU16TypedSev<SEVERITY> {
/// This is similar to [EventU16::new] but the severity is a type generic, which allows to
/// have distinct types for events with different severities
pub fn new_checked(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Option<Self> {
let event = EventU16::new_checked(SEVERITY::SEVERITY, group_id, unique_id)?;
Some(Self {
event,
phantom: PhantomData,
})
}
/// This constructor will panic if the `group_id` is larger than [MAX_GROUP_ID_U16_EVENT].
pub const fn new(
group_id: <Self as GenericEvent>::GroupId,
unique_id: <Self as GenericEvent>::UniqueId,
) -> Self {
let event = EventU16::new(SEVERITY::SEVERITY, group_id, unique_id);
Self {
event,
phantom: PhantomData,
}
}
fn try_from_generic(expected: Severity, raw: u16) -> Result<Self, Severity> {
let severity = Severity::try_from(((raw >> 14) & 0b11) as u8).unwrap();
if severity != expected {
return Err(severity);
}
Ok(Self::new(((raw >> 8) & 0x3F) as u8, (raw & 0xFF) as u8))
}
}
impl_event_provider!(EventU16, EventU16TypedSev, u16, u8, u8);
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> UnsignedEnum for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn size(&self) -> usize;
fn write_to_be_bytes(&self, buf: &mut [u8]) -> Result<usize, ByteConversionError>;
fn value_raw(&self) -> u64;
});
}
//noinspection RsTraitImplementation
impl<SEVERITY: HasSeverity> EcssEnumeration for EventU16TypedSev<SEVERITY> {
delegate!(to self.event {
fn pfc(&self) -> u8;
});
}
try_from_impls!(SeverityInfo, Severity::Info, u16, EventU16TypedSev);
try_from_impls!(SeverityLow, Severity::Low, u16, EventU16TypedSev);
try_from_impls!(SeverityMedium, Severity::Medium, u16, EventU16TypedSev);
try_from_impls!(SeverityHigh, Severity::High, u16, EventU16TypedSev);
impl<Severity: HasSeverity> PartialEq<EventU32> for EventU32TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU32) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU32TypedSev<Severity>> for EventU32 {
#[inline]
fn eq(&self, other: &EventU32TypedSev<Severity>) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16> for EventU16TypedSev<Severity> {
#[inline]
fn eq(&self, other: &EventU16) -> bool {
self.raw() == other.raw()
}
}
impl<Severity: HasSeverity> PartialEq<EventU16TypedSev<Severity>> for EventU16 {
#[inline]
fn eq(&self, other: &EventU16TypedSev<Severity>) -> bool {
self.raw() == other.raw()
}
}
#[cfg(test)]
mod tests {
use super::EventU32TypedSev;
use super::*;
use spacepackets::ByteConversionError;
use std::mem::size_of;
fn assert_size<T>(_: T, val: usize) {
assert_eq!(size_of::<T>(), val);
}
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::new(0, 0);
const INFO_EVENT_SMALL: EventU16TypedSev<SeverityInfo> = EventU16TypedSev::new(0, 0);
const HIGH_SEV_EVENT: EventU32TypedSev<SeverityHigh> = EventU32TypedSev::new(0x3FFF, 0xFFFF);
const HIGH_SEV_EVENT_SMALL: EventU16TypedSev<SeverityHigh> = EventU16TypedSev::new(0x3F, 0xff);
/// This working is a test in itself.
const INFO_REDUCED: EventU32 = EventU32::const_from_info(INFO_EVENT);
#[test]
fn test_normal_from_raw_conversion() {
let conv_from_raw = EventU32TypedSev::<SeverityInfo>::try_from(INFO_EVENT.raw())
.expect("Creating typed EventU32 failed");
assert_eq!(conv_from_raw, INFO_EVENT);
}
#[test]
fn test_small_from_raw_conversion() {
let conv_from_raw = EventU16TypedSev::<SeverityInfo>::try_from(INFO_EVENT_SMALL.raw())
.expect("Creating typed EventU16 failed");
assert_eq!(conv_from_raw, INFO_EVENT_SMALL);
}
#[test]
fn verify_normal_size() {
assert_size(INFO_EVENT.raw(), 4)
}
#[test]
fn verify_small_size() {
assert_size(INFO_EVENT_SMALL.raw(), 2)
}
#[test]
fn test_normal_event_getters() {
assert_eq!(INFO_EVENT.severity(), Severity::Info);
assert_eq!(INFO_EVENT.unique_id(), 0);
assert_eq!(INFO_EVENT.group_id(), 0);
let raw_event = INFO_EVENT.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn test_small_event_getters() {
assert_eq!(INFO_EVENT_SMALL.severity(), Severity::Info);
assert_eq!(INFO_EVENT_SMALL.unique_id(), 0);
assert_eq!(INFO_EVENT_SMALL.group_id(), 0);
let raw_event = INFO_EVENT_SMALL.raw();
assert_eq!(raw_event, 0x00000000);
}
#[test]
fn all_ones_event_regular() {
assert_eq!(HIGH_SEV_EVENT.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT.group_id(), 0x3FFF);
assert_eq!(HIGH_SEV_EVENT.unique_id(), 0xFFFF);
let raw_event = HIGH_SEV_EVENT.raw();
assert_eq!(raw_event, 0xFFFFFFFF);
}
#[test]
fn all_ones_event_small() {
assert_eq!(HIGH_SEV_EVENT_SMALL.severity(), Severity::High);
assert_eq!(HIGH_SEV_EVENT_SMALL.group_id(), 0x3F);
assert_eq!(HIGH_SEV_EVENT_SMALL.unique_id(), 0xFF);
let raw_event = HIGH_SEV_EVENT_SMALL.raw();
assert_eq!(raw_event, 0xFFFF);
}
#[test]
fn invalid_group_id_normal() {
assert!(EventU32TypedSev::<SeverityMedium>::new_checked(2_u16.pow(14), 0).is_none());
}
#[test]
fn invalid_group_id_small() {
assert!(EventU16TypedSev::<SeverityMedium>::new_checked(2_u8.pow(6), 0).is_none());
}
#[test]
fn regular_new() {
assert_eq!(
EventU32TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT
);
}
#[test]
fn small_new() {
assert_eq!(
EventU16TypedSev::<SeverityInfo>::new_checked(0, 0)
.expect("Creating regular event failed"),
INFO_EVENT_SMALL
);
}
#[test]
fn as_largest_type() {
let event_raw = HIGH_SEV_EVENT.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFFFFFF);
}
#[test]
fn as_largest_type_for_small_event() {
let event_raw = HIGH_SEV_EVENT_SMALL.raw_as_largest_type();
assert_size(event_raw, 4);
assert_eq!(event_raw, 0xFFFF);
}
#[test]
fn as_largest_group_id() {
let group_id = HIGH_SEV_EVENT.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3FFF);
}
#[test]
fn as_largest_group_id_small_event() {
let group_id = HIGH_SEV_EVENT_SMALL.group_id_as_largest_type();
assert_size(group_id, 2);
assert_eq!(group_id, 0x3F);
}
#[test]
fn write_to_buf() {
let mut buf: [u8; 4] = [0; 4];
assert!(HIGH_SEV_EVENT.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u32::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFFFFFF);
let event_read_back = EventU32::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT);
}
#[test]
fn write_to_buf_small() {
let mut buf: [u8; 2] = [0; 2];
assert!(HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf).is_ok());
let val_from_raw = u16::from_be_bytes(buf);
assert_eq!(val_from_raw, 0xFFFF);
let event_read_back = EventU16::from_be_bytes(buf);
assert_eq!(event_read_back, HIGH_SEV_EVENT_SMALL);
}
#[test]
fn write_to_buf_insufficient_buf() {
let mut buf: [u8; 3] = [0; 3];
let err = HIGH_SEV_EVENT.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
assert_eq!(expected, 4);
assert_eq!(found, 3);
}
}
#[test]
fn write_to_buf_small_insufficient_buf() {
let mut buf: [u8; 1] = [0; 1];
let err = HIGH_SEV_EVENT_SMALL.write_to_be_bytes(&mut buf);
assert!(err.is_err());
let err = err.unwrap_err();
if let ByteConversionError::ToSliceTooSmall { found, expected } = err {
assert_eq!(expected, 2);
assert_eq!(found, 1);
}
}
#[test]
fn severity_from_invalid_raw_val() {
let invalid = 0xFF;
assert!(Severity::try_from(invalid).is_err());
let invalid = Severity::High as u8 + 1;
assert!(Severity::try_from(invalid).is_err());
}
#[test]
fn reduction() {
let event = EventU32TypedSev::<SeverityInfo>::new(1, 1);
let raw = event.raw();
let reduced: EventU32 = event.into();
assert_eq!(reduced.group_id(), 1);
assert_eq!(reduced.unique_id(), 1);
assert_eq!(raw, reduced.raw());
}
#[test]
fn const_reducation() {
assert_eq!(INFO_REDUCED.raw(), INFO_EVENT.raw());
}
}

View File

@@ -39,9 +39,9 @@ pub trait ExecutableWithType: Executable {
///
/// * `executable`: Executable task
/// * `task_freq`: Optional frequency of task. Required for periodic and fixed cycle tasks.
/// If [None] is passed, no sleeping will be performed.
/// If [None] is passed, no sleeping will be performed.
/// * `op_code`: Operation code which is passed to the executable task
/// [operation call][Executable::periodic_op]
/// [operation call][Executable::periodic_op]
/// * `termination`: Optional termination handler which can cancel threads with a broadcast
pub fn exec_sched_single<
T: ExecutableWithType<Error = E> + Send + 'static + ?Sized,
@@ -55,33 +55,35 @@ pub fn exec_sched_single<
let mut cycle_count = 0;
thread::Builder::new()
.name(String::from(executable.task_name()))
.spawn(move || loop {
if let Some(ref mut terminator) = termination {
match terminator.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
return Ok(OpResult::Ok);
}
Err(TryRecvError::Empty) => (),
}
}
match executable.exec_type() {
ExecutionType::OneShot => {
executable.periodic_op(op_code)?;
return Ok(OpResult::Ok);
}
ExecutionType::Infinite => {
executable.periodic_op(op_code)?;
}
ExecutionType::Cycles(cycles) => {
executable.periodic_op(op_code)?;
cycle_count += 1;
if cycle_count == cycles {
return Ok(OpResult::Ok);
.spawn(move || {
loop {
if let Some(ref mut terminator) = termination {
match terminator.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
return Ok(OpResult::Ok);
}
Err(TryRecvError::Empty) => (),
}
}
}
if let Some(freq) = task_freq {
thread::sleep(freq);
match executable.exec_type() {
ExecutionType::OneShot => {
executable.periodic_op(op_code)?;
return Ok(OpResult::Ok);
}
ExecutionType::Infinite => {
executable.periodic_op(op_code)?;
}
ExecutionType::Cycles(cycles) => {
executable.periodic_op(op_code)?;
cycle_count += 1;
if cycle_count == cycles {
return Ok(OpResult::Ok);
}
}
}
if let Some(freq) = task_freq {
thread::sleep(freq);
}
}
})
}
@@ -110,51 +112,53 @@ pub fn exec_sched_multi<
thread::Builder::new()
.name(String::from(task_name))
.spawn(move || loop {
if let Some(ref mut terminator) = termination {
match terminator.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
removal_flags.iter_mut().for_each(|x| *x = true);
.spawn(move || {
loop {
if let Some(ref mut terminator) = termination {
match terminator.try_recv() {
Ok(_) | Err(TryRecvError::Disconnected) => {
removal_flags.iter_mut().for_each(|x| *x = true);
}
Err(TryRecvError::Empty) => (),
}
Err(TryRecvError::Empty) => (),
}
}
for (idx, executable) in executable_vec.iter_mut().enumerate() {
match executable.exec_type() {
ExecutionType::OneShot => {
executable.periodic_op(op_code)?;
removal_flags[idx] = true;
}
ExecutionType::Infinite => {
executable.periodic_op(op_code)?;
}
ExecutionType::Cycles(cycles) => {
executable.periodic_op(op_code)?;
cycle_counts[idx] += 1;
if cycle_counts[idx] == cycles {
for (idx, executable) in executable_vec.iter_mut().enumerate() {
match executable.exec_type() {
ExecutionType::OneShot => {
executable.periodic_op(op_code)?;
removal_flags[idx] = true;
}
ExecutionType::Infinite => {
executable.periodic_op(op_code)?;
}
ExecutionType::Cycles(cycles) => {
executable.periodic_op(op_code)?;
cycle_counts[idx] += 1;
if cycle_counts[idx] == cycles {
removal_flags[idx] = true;
}
}
}
}
let mut removal_iter = removal_flags.iter();
executable_vec.retain(|_| !*removal_iter.next().unwrap());
removal_iter = removal_flags.iter();
cycle_counts.retain(|_| !*removal_iter.next().unwrap());
removal_flags.retain(|&i| !i);
if executable_vec.is_empty() {
return Ok(OpResult::Ok);
}
let freq = task_freq.unwrap_or_else(|| panic!("No task frequency specified"));
thread::sleep(freq);
}
let mut removal_iter = removal_flags.iter();
executable_vec.retain(|_| !*removal_iter.next().unwrap());
removal_iter = removal_flags.iter();
cycle_counts.retain(|_| !*removal_iter.next().unwrap());
removal_flags.retain(|&i| !i);
if executable_vec.is_empty() {
return Ok(OpResult::Ok);
}
let freq = task_freq.unwrap_or_else(|| panic!("No task frequency specified"));
thread::sleep(freq);
})
}
#[cfg(test)]
mod tests {
use super::{
exec_sched_multi, exec_sched_single, Executable, ExecutableWithType, ExecutionType,
OpResult,
Executable, ExecutableWithType, ExecutionType, OpResult, exec_sched_multi,
exec_sched_single,
};
use bus::Bus;
use std::boxed::Box;

View File

@@ -2,5 +2,5 @@
pub mod tcp_server;
pub mod udp_server;
mod tcp_cobs_server;
mod tcp_spacepackets_server;
pub mod tcp_cobs_server;
pub mod tcp_spacepackets_server;

View File

@@ -1,5 +1,7 @@
use alloc::sync::Arc;
use alloc::vec;
use cobs::CobsDecoderOwned;
use cobs::DecodeError;
use cobs::encode;
use core::sync::atomic::AtomicBool;
use core::time::Duration;
@@ -9,40 +11,58 @@ use std::io::Write;
use std::net::SocketAddr;
use std::vec::Vec;
use crate::encoding::parse_buffer_for_cobs_encoded_packets;
use crate::tmtc::PacketSenderRaw;
use crate::queue::GenericSendError;
use crate::tmtc::PacketHandler;
use crate::tmtc::PacketSource;
use crate::hal::std::tcp_server::{
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
};
use crate::ComponentId;
use crate::hal::std::tcp_server::{
ConnectionResult, ServerConfig, TcpTcParser, TcpTmSender, TcpTmtcGenericServer,
};
use super::tcp_server::HandledConnectionHandler;
use super::tcp_server::HandledConnectionInfo;
/// Concrete [TcpTcParser] implementation for the [TcpTmtcInCobsServer].
#[derive(Default)]
pub struct CobsTcParser {}
pub struct CobsTcParser<PacketHandlerInstance: PacketHandler> {
sender_id: ComponentId,
owned_decoder: CobsDecoderOwned,
packet_handler: PacketHandlerInstance,
last_decode_error: Option<DecodeError>,
}
impl<TmError, TcError: 'static> TcpTcParser<TmError, TcError> for CobsTcParser {
fn handle_tc_parsing(
&mut self,
tc_buffer: &mut [u8],
impl<PacketHandlerInstance: PacketHandler> CobsTcParser<PacketHandlerInstance> {
pub fn new(
sender_id: ComponentId,
tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized),
conn_result: &mut HandledConnectionInfo,
current_write_idx: usize,
next_write_idx: &mut usize,
) -> Result<(), TcpTmtcError<TmError, TcError>> {
conn_result.num_received_tcs += parse_buffer_for_cobs_encoded_packets(
&mut tc_buffer[..current_write_idx],
decoder_buf_size: usize,
packet_handler: PacketHandlerInstance,
) -> Self {
Self {
sender_id,
tc_sender,
next_write_idx,
)
.map_err(|e| TcpTmtcError::TcError(e))?;
Ok(())
owned_decoder: CobsDecoderOwned::new(decoder_buf_size),
packet_handler,
last_decode_error: None,
}
}
}
impl<PacketHandlerInstance: PacketHandler> TcpTcParser for CobsTcParser<PacketHandlerInstance> {
fn reset(&mut self) {
self.owned_decoder.reset();
}
fn push(&mut self, data: &[u8], conn_result: &mut HandledConnectionInfo) {
for byte in data {
match self.owned_decoder.feed(*byte) {
Ok(Some(packet_len)) => {
self.packet_handler
.handle_packet(self.sender_id, &self.owned_decoder.dest()[..packet_len])
.ok();
conn_result.num_received_tcs += 1;
}
Ok(None) => (),
Err(e) => self.last_decode_error = Some(e),
}
}
}
}
@@ -61,22 +81,18 @@ impl CobsTmSender {
}
}
impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
impl TcpTmSender for CobsTmSender {
fn handle_tm_sending(
&mut self,
tm_buffer: &mut [u8],
tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
tm_source: &mut (impl PacketSource<Error = ()> + ?Sized),
conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>> {
) -> Result<bool, std::io::Error> {
let mut tm_was_sent = false;
loop {
// Write TM until TM source is exhausted. For now, there is no limit for the amount
// of TM written this way.
let read_tm_len = tm_source
.retrieve_packet(tm_buffer)
.map_err(|e| TcpTmtcError::TmError(e))?;
// Write TM until TM source is exhausted or there is an unexpected error. For now, there
// is no limit for the amount of TM written this way.
while let Ok(read_tm_len) = tm_source.retrieve_packet(tm_buffer) {
if read_tm_len == 0 {
return Ok(tm_was_sent);
}
@@ -95,6 +111,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
current_idx += 1;
stream.write_all(&self.tm_encoding_buffer[..current_idx])?;
}
Ok(tm_was_sent)
}
}
@@ -108,8 +125,8 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
///
/// Using a framing protocol like COBS imposes minimal restrictions on the type of TMTC data
/// exchanged while also allowing packets with flexible size and a reliable way to reconstruct full
/// packets even from a data stream which is split up. The server wil use the
/// [parse_buffer_for_cobs_encoded_packets] function to parse for packets and pass them to a
/// packets even from a data stream which is split up. The server wil use the streaming
/// [CobsDecoderOwned] decoder to parse for packets and pass them to a
/// generic TC receiver. The user can use [crate::encoding::encode_packet_with_cobs] to encode
/// telecommands sent to the server.
///
@@ -118,30 +135,19 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for CobsTmSender {
/// The [TCP integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// test also serves as the example application for this module.
pub struct TcpTmtcInCobsServer<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = SendError>,
TmSource: PacketSource<Error = ()>,
TcHandler: PacketHandler<Error = GenericSendError>,
HandledConnection: HandledConnectionHandler,
TmError,
SendError: 'static,
> {
pub generic_server: TcpTmtcGenericServer<
TmSource,
TcSender,
CobsTmSender,
CobsTcParser,
HandledConnection,
TmError,
SendError,
>,
pub generic_server:
TcpTmtcGenericServer<TmSource, CobsTmSender, CobsTcParser<TcHandler>, HandledConnection>,
}
impl<
TmSource: PacketSource<Error = TmError>,
TcReceiver: PacketSenderRaw<Error = TcError>,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcError: 'static,
> TcpTmtcInCobsServer<TmSource, TcReceiver, HandledConnection, TmError, TcError>
TmSource: PacketSource<Error = ()>,
TcHandler: PacketHandler<Error = GenericSendError>,
HandledConnection: HandledConnectionHandler,
> TcpTmtcInCobsServer<TmSource, TcHandler, HandledConnection>
{
/// Create a new TCP TMTC server which exchanges TMTC packets encoded with
/// [COBS protocol](https://en.wikipedia.org/wiki/Consistent_Overhead_Byte_Stuffing).
@@ -150,23 +156,22 @@ impl<
///
/// * `cfg` - Configuration of the server.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_receiver` - Any received telecommands which were decoded successfully will be
/// forwarded to this TC receiver.
/// forwarded to this TC receiver.
pub fn new(
cfg: ServerConfig,
tm_source: TmSource,
tc_receiver: TcReceiver,
cobs_tc_parser: CobsTcParser<TcHandler>,
handled_connection: HandledConnection,
stop_signal: Option<Arc<AtomicBool>>,
) -> Result<Self, std::io::Error> {
Ok(Self {
generic_server: TcpTmtcGenericServer::new(
cfg,
CobsTcParser::default(),
cobs_tc_parser,
CobsTmSender::new(cfg.tm_buffer_size),
tm_source,
tc_receiver,
handled_connection,
stop_signal,
)?,
@@ -185,7 +190,7 @@ impl<
pub fn handle_all_connections(
&mut self,
poll_duration: Option<Duration>,
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
) -> Result<ConnectionResult, std::io::Error>;
}
}
}
@@ -206,14 +211,13 @@ mod tests {
};
use crate::{
ComponentId,
encoding::tests::{INVERTED_PACKET, SIMPLE_PACKET},
hal::std::tcp_server::{
tests::{ConnectionFinishedHandler, SyncTmSource},
ConnectionResult, ServerConfig,
tests::{ConnectionFinishedHandler, SyncTmSource},
},
queue::GenericSendError,
tmtc::PacketAsVec,
ComponentId,
};
use alloc::sync::Arc;
use cobs::encode;
@@ -243,17 +247,12 @@ mod tests {
tc_sender: mpsc::Sender<PacketAsVec>,
tm_source: SyncTmSource,
stop_signal: Option<Arc<AtomicBool>>,
) -> TcpTmtcInCobsServer<
SyncTmSource,
mpsc::Sender<PacketAsVec>,
ConnectionFinishedHandler,
(),
GenericSendError,
> {
) -> TcpTmtcInCobsServer<SyncTmSource, mpsc::Sender<PacketAsVec>, ConnectionFinishedHandler>
{
TcpTmtcInCobsServer::new(
ServerConfig::new(TCP_SERVER_ID, *addr, Duration::from_millis(2), 1024, 1024),
tm_source,
tc_sender,
super::CobsTcParser::new(TCP_SERVER_ID, 1024, tc_sender),
ConnectionFinishedHandler::default(),
stop_signal,
)
@@ -377,13 +376,13 @@ mod tests {
current_idx += 1;
let mut dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
.expect("COBS decoding failed");
assert_eq!(dec_report.dst_used, 5);
assert_eq!(dec_report.frame_size(), 5);
// Skip first sentinel byte.
assert_eq!(
&read_buf[current_idx..current_idx + INVERTED_PACKET.len()],
&INVERTED_PACKET
);
current_idx += dec_report.src_used;
current_idx += dec_report.parsed_size();
// End sentinel.
assert_eq!(read_buf[current_idx], 0, "invalid sentinel end byte");
current_idx += 1;
@@ -393,13 +392,13 @@ mod tests {
current_idx += 1;
dec_report = cobs::decode_in_place_report(&mut read_buf[current_idx..])
.expect("COBS decoding failed");
assert_eq!(dec_report.dst_used, 5);
assert_eq!(dec_report.frame_size(), 5);
// Skip first sentinel byte.
assert_eq!(
&read_buf[current_idx..current_idx + SIMPLE_PACKET.len()],
&SIMPLE_PACKET
);
current_idx += dec_report.src_used;
current_idx += dec_report.parsed_size();
// End sentinel.
assert_eq!(read_buf[current_idx], 0);
break;
@@ -435,17 +434,19 @@ mod tests {
generic_tmtc_server(&auto_port_addr, tc_sender.clone(), tm_source, None);
let start = Instant::now();
// Call the connection handler in separate thread, does block.
let thread_jh = thread::spawn(move || loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
let thread_jh = thread::spawn(move || {
loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
}
}
});
thread_jh.join().expect("thread join failed");
@@ -469,20 +470,22 @@ mod tests {
let stop_signal_copy = stop_signal.clone();
let start = Instant::now();
// Call the connection handler in separate thread, does block.
let thread_jh = thread::spawn(move || loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
panic!("unexpected accept timeout");
}
if stop_signal_copy.load(Ordering::Relaxed) {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
let thread_jh = thread::spawn(move || {
loop {
let result = tcp_server.handle_all_connections(Some(Duration::from_millis(20)));
if result.is_err() {
panic!("handling connection failed: {:?}", result.unwrap_err());
}
let result = result.unwrap();
if result == ConnectionResult::AcceptTimeout {
panic!("unexpected accept timeout");
}
if stop_signal_copy.load(Ordering::Relaxed) {
break;
}
if Instant::now() - start > Duration::from_millis(100) {
panic!("regular stop signal handling failed");
}
}
});
// We connect but do not do anything.

View File

@@ -11,8 +11,8 @@ use std::io::{self, Read};
use std::net::SocketAddr;
use std::thread;
use crate::tmtc::{PacketSenderRaw, PacketSource};
use crate::ComponentId;
use crate::tmtc::PacketSource;
use thiserror::Error;
// Re-export the TMTC in COBS server.
@@ -25,22 +25,22 @@ pub use crate::hal::std::tcp_spacepackets_server::{SpacepacketsTmSender, TcpSpac
///
/// * `addr` - Address of the TCP server.
/// * `inner_loop_delay` - If a client connects for a longer period, but no TC is received or
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
/// to reduce CPU load.
/// no TM needs to be sent, the TCP server will delay for the specified amount of time
/// to reduce CPU load.
/// * `tm_buffer_size` - Size of the TM buffer used to read TM from the [PacketSource] and
/// encoding of that data. This buffer should at large enough to hold the maximum expected
/// TM size read from the packet source.
/// encoding of that data. This buffer should at large enough to hold the maximum expected
/// TM size read from the packet source.
/// * `tc_buffer_size` - Size of the TC buffer used to read encoded telecommands sent from
/// the client. It is recommended to make this buffer larger to allow reading multiple
/// consecutive packets as well, for example by using common buffer sizes like 4096 or 8192
/// byte. The buffer should at the very least be large enough to hold the maximum expected
/// telecommand size.
/// the client. It is recommended to make this buffer larger to allow reading multiple
/// consecutive packets as well, for example by using common buffer sizes like 4096 or 8192
/// byte. The buffer should at the very least be large enough to hold the maximum expected
/// telecommand size.
/// * `reuse_addr` - Can be used to set the `SO_REUSEADDR` option on the raw socket. This is
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// * `reuse_port` - Can be used to set the `SO_REUSEPORT` option on the raw socket. This is
/// especially useful if the address and port are static for the server. Set to false by
/// default.
/// especially useful if the address and port are static for the server. Set to false by
/// default.
#[derive(Debug, Copy, Clone)]
pub struct ServerConfig {
pub id: ComponentId,
@@ -73,11 +73,9 @@ impl ServerConfig {
}
#[derive(Error, Debug)]
pub enum TcpTmtcError<TmError, TcError> {
pub enum TcpTmError<TmError> {
#[error("TM retrieval error: {0}")]
TmError(TmError),
#[error("TC retrieval error: {0}")]
TcError(TcError),
#[error("io error: {0}")]
Io(#[from] std::io::Error),
}
@@ -116,32 +114,29 @@ pub trait HandledConnectionHandler {
}
/// Generic parser abstraction for an object which can parse for telecommands given a raw
/// bytestream received from a TCP socket and send them using a generic [PacketSenderRaw]
/// implementation. This allows different encoding schemes for telecommands.
pub trait TcpTcParser<TmError, SendError> {
fn handle_tc_parsing(
&mut self,
tc_buffer: &mut [u8],
sender_id: ComponentId,
tc_sender: &(impl PacketSenderRaw<Error = SendError> + ?Sized),
conn_result: &mut HandledConnectionInfo,
current_write_idx: usize,
next_write_idx: &mut usize,
) -> Result<(), TcpTmtcError<TmError, SendError>>;
/// bytestream received from a TCP socket and extract packets from them. This allows different
/// encoding schemes for telecommands.
pub trait TcpTcParser {
/// Reset the state of the parser.
fn reset(&mut self);
/// Pushes received data into the parser.
fn push(&mut self, tc_data: &[u8], conn_result: &mut HandledConnectionInfo);
}
/// Generic sender abstraction for an object which can pull telemetry from a given TM source
/// using a [PacketSource] and then send them back to a client using a given [TcpStream].
/// The concrete implementation can also perform any encoding steps which are necessary before
/// sending back the data to a client.
pub trait TcpTmSender<TmError, TcError> {
pub trait TcpTmSender {
/// Returns whether any packets were sent back to the client.
fn handle_tm_sending(
&mut self,
tm_buffer: &mut [u8],
tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
tm_source: &mut (impl PacketSource<Error = ()> + ?Sized),
conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>>;
) -> Result<bool, std::io::Error>;
}
/// TCP TMTC server implementation for exchange of generic TMTC packets in a generic way which
@@ -151,7 +146,8 @@ pub trait TcpTmSender<TmError, TcError> {
/// through the following 4 core abstractions:
///
/// 1. [TcpTcParser] to parse for telecommands from the raw bytestream received from a client.
/// 2. Parsed telecommands will be sent using the [PacketSenderRaw] object.
/// 2. Parsed telecommands will be handled by the [TcpTcParser] object as well. For example, this
/// parser can contain a message queue handle to send the packets somewhere.
/// 3. [TcpTmSender] to send telemetry pulled from a TM source back to the client.
/// 4. [PacketSource] as a generic TM source used by the [TcpTmSender].
///
@@ -163,13 +159,10 @@ pub trait TcpTmSender<TmError, TcError> {
/// 1. [TcpTmtcInCobsServer] to exchange TMTC wrapped inside the COBS framing protocol.
/// 2. [TcpSpacepacketsServer] to exchange space packets via TCP.
pub struct TcpTmtcGenericServer<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcSendError>,
TmSender: TcpTmSender<TmError, TcSendError>,
TcParser: TcpTcParser<TmError, TcSendError>,
TmSource: PacketSource<Error = ()>,
TmSender: TcpTmSender,
TcParser: TcpTcParser,
HandledConnection: HandledConnectionHandler,
TmError,
TcSendError,
> {
pub id: ComponentId,
pub finished_handler: HandledConnection,
@@ -177,7 +170,6 @@ pub struct TcpTmtcGenericServer<
pub(crate) inner_loop_delay: Duration,
pub(crate) tm_source: TmSource,
pub(crate) tm_buffer: Vec<u8>,
pub(crate) tc_sender: TcSender,
pub(crate) tc_buffer: Vec<u8>,
poll: Poll,
events: Events,
@@ -187,23 +179,11 @@ pub struct TcpTmtcGenericServer<
}
impl<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcSendError>,
TmSender: TcpTmSender<TmError, TcSendError>,
TcParser: TcpTcParser<TmError, TcSendError>,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcSendError: 'static,
>
TcpTmtcGenericServer<
TmSource,
TcSender,
TmSender,
TcParser,
HandledConnection,
TmError,
TcSendError,
>
TmSource: PacketSource<Error = ()>,
TmSender: TcpTmSender,
TcParser: TcpTcParser,
HandledConnection: HandledConnectionHandler,
> TcpTmtcGenericServer<TmSource, TmSender, TcParser, HandledConnection>
{
/// Create a new generic TMTC server instance.
///
@@ -211,19 +191,18 @@ impl<
///
/// * `cfg` - Configuration of the server.
/// * `tc_parser` - Parser which extracts telecommands from the raw bytestream received from
/// the client.
/// the client.
/// * `tm_sender` - Sends back telemetry to the client using the specified TM source.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_sender` - Any received telecommand which was decoded successfully will be forwarded
/// using this TC sender.
/// using this TC sender.
/// * `stop_signal` - Can be used to stop the server even if a connection is ongoing.
pub fn new(
cfg: ServerConfig,
tc_parser: TcParser,
tm_sender: TmSender,
tm_source: TmSource,
tc_receiver: TcSender,
finished_handler: HandledConnection,
stop_signal: Option<Arc<AtomicBool>>,
) -> Result<Self, std::io::Error> {
@@ -263,7 +242,6 @@ impl<
inner_loop_delay: cfg.inner_loop_delay,
tm_source,
tm_buffer: vec![0; cfg.tm_buffer_size],
tc_sender: tc_receiver,
tc_buffer: vec![0; cfg.tc_buffer_size],
stop_signal,
finished_handler,
@@ -297,7 +275,7 @@ impl<
pub fn handle_all_connections(
&mut self,
poll_timeout: Option<Duration>,
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcSendError>> {
) -> Result<ConnectionResult, std::io::Error> {
let mut handled_connections = 0;
// Poll Mio for events.
self.poll.poll(&mut self.events, poll_timeout)?;
@@ -327,7 +305,7 @@ impl<
Err(ref err) if err.kind() == io::ErrorKind::WouldBlock => break,
Err(err) => {
self.reregister_poll_interest()?;
return Err(TcpTmtcError::Io(err));
return Err(err);
}
}
}
@@ -350,58 +328,24 @@ impl<
&mut self,
mut stream: TcpStream,
addr: SocketAddr,
) -> Result<(), TcpTmtcError<TmError, TcSendError>> {
let mut current_write_idx;
let mut next_write_idx = 0;
) -> Result<(), std::io::Error> {
self.tc_handler.reset();
let mut connection_result = HandledConnectionInfo::new(addr);
current_write_idx = next_write_idx;
loop {
let read_result = stream.read(&mut self.tc_buffer[current_write_idx..]);
let read_result = stream.read(&mut self.tc_buffer);
match read_result {
Ok(0) => {
// Connection closed by client. If any TC was read, parse for complete packets.
// After that, break the outer loop.
if current_write_idx > 0 {
self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer,
self.id,
&self.tc_sender,
&mut connection_result,
current_write_idx,
&mut next_write_idx,
)?;
}
// Connection closed by client.
break;
}
Ok(read_len) => {
current_write_idx += read_len;
// TC buffer is full, we must parse for complete packets now.
if current_write_idx == self.tc_buffer.capacity() {
self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer,
self.id,
&self.tc_sender,
&mut connection_result,
current_write_idx,
&mut next_write_idx,
)?;
current_write_idx = next_write_idx;
}
self.tc_handler
.push(&self.tc_buffer[0..read_len], &mut connection_result);
}
Err(e) => match e.kind() {
// As per [TcpStream::set_read_timeout] documentation, this should work for
// both UNIX and Windows.
std::io::ErrorKind::WouldBlock | std::io::ErrorKind::TimedOut => {
self.tc_handler.handle_tc_parsing(
&mut self.tc_buffer,
self.id,
&self.tc_sender,
&mut connection_result,
current_write_idx,
&mut next_write_idx,
)?;
current_write_idx = next_write_idx;
if !self.tm_handler.handle_tm_sending(
&mut self.tm_buffer,
&mut self.tm_source,
@@ -426,7 +370,7 @@ impl<
}
}
_ => {
return Err(TcpTmtcError::Io(e));
return Err(e);
}
},
}
@@ -502,8 +446,11 @@ pub(crate) mod tests {
.connection_info
.pop_back()
.expect("no connection info available");
assert_eq!(last_conn_result.num_received_tcs, num_tcs);
assert_eq!(last_conn_result.num_sent_tms, num_tms);
assert_eq!(
last_conn_result.num_received_tcs, num_tcs,
"received tcs mismatch"
);
assert_eq!(last_conn_result.num_sent_tms, num_tms, "sent tms missmatch");
}
pub fn check_no_connections_left(&self) {

View File

@@ -5,41 +5,106 @@ use mio::net::{TcpListener, TcpStream};
use std::{io::Write, net::SocketAddr};
use crate::{
encoding::{ccsds::SpacePacketValidator, parse_buffer_for_ccsds_space_packets},
tmtc::{PacketSenderRaw, PacketSource},
ComponentId,
encoding::{ccsds::SpacePacketValidator, parse_buffer_for_ccsds_space_packets},
queue::GenericSendError,
tmtc::{PacketHandler, PacketSource},
};
use super::tcp_server::{
ConnectionResult, HandledConnectionHandler, HandledConnectionInfo, ServerConfig, TcpTcParser,
TcpTmSender, TcpTmtcError, TcpTmtcGenericServer,
TcpTmSender, TcpTmtcGenericServer,
};
impl<T: SpacePacketValidator, TmError, TcError: 'static> TcpTcParser<TmError, TcError> for T {
fn handle_tc_parsing(
&mut self,
tc_buffer: &mut [u8],
pub struct CcsdsPacketParser<
PacketValidator: SpacePacketValidator,
PacketHandlerInstance: PacketHandler,
> {
sender_id: ComponentId,
parsing_buffer: alloc::vec::Vec<u8>,
validator: PacketValidator,
packet_handler: PacketHandlerInstance,
current_write_index: usize,
}
impl<PacketValidator: SpacePacketValidator, PacketHandlerInstance: PacketHandler>
CcsdsPacketParser<PacketValidator, PacketHandlerInstance>
{
pub fn new(
sender_id: ComponentId,
tc_sender: &(impl PacketSenderRaw<Error = TcError> + ?Sized),
conn_result: &mut HandledConnectionInfo,
current_write_idx: usize,
next_write_idx: &mut usize,
) -> Result<(), TcpTmtcError<TmError, TcError>> {
// Reader vec full, need to parse for packets.
let parse_result = parse_buffer_for_ccsds_space_packets(
&tc_buffer[..current_write_idx],
self,
parsing_buf_size: usize,
packet_handler: PacketHandlerInstance,
validator: PacketValidator,
) -> Self {
Self {
sender_id,
tc_sender,
)
.map_err(|e| TcpTmtcError::TcError(e))?;
if let Some(broken_tail_start) = parse_result.incomplete_tail_start {
// Copy broken tail to front of buffer.
tc_buffer.copy_within(broken_tail_start..current_write_idx, 0);
*next_write_idx = current_write_idx - broken_tail_start;
parsing_buffer: alloc::vec![0; parsing_buf_size],
validator,
packet_handler,
current_write_index: 0,
}
}
fn write_to_buffer(&mut self, data: &[u8]) -> usize {
let available = self.parsing_buffer.len() - self.current_write_index;
let to_write = core::cmp::min(data.len(), available);
self.parsing_buffer[self.current_write_index..self.current_write_index + to_write]
.copy_from_slice(&data[..to_write]);
self.current_write_index += to_write;
to_write
}
fn parse_and_handle_packets(&mut self) -> u32 {
match parse_buffer_for_ccsds_space_packets(
&self.parsing_buffer[..self.current_write_index],
&self.validator,
self.sender_id,
&self.packet_handler,
) {
Ok(parse_result) => {
self.parsing_buffer
.copy_within(parse_result.parsed_bytes..self.current_write_index, 0);
self.current_write_index -= parse_result.parsed_bytes;
parse_result.packets_found
}
Err(_) => 0,
}
}
fn drop_first_half_of_buffer(&mut self) {
let mid = self.parsing_buffer.len() / 2;
self.parsing_buffer.copy_within(mid.., 0);
self.current_write_index -= mid;
}
}
impl<PacketValidator: SpacePacketValidator, PacketHandlerInstance: PacketHandler> TcpTcParser
for CcsdsPacketParser<PacketValidator, PacketHandlerInstance>
{
fn reset(&mut self) {
self.current_write_index = 0;
}
fn push(&mut self, mut tc_buffer: &[u8], conn_result: &mut HandledConnectionInfo) {
while !tc_buffer.is_empty() {
// Write as much as possible to buffer
let written = self.write_to_buffer(tc_buffer);
tc_buffer = &tc_buffer[written..];
// Parse for complete packets
let packets_found = self.parse_and_handle_packets();
conn_result.num_received_tcs += packets_found;
if tc_buffer.is_empty() {
break;
}
// Handle buffer overflow
if self.current_write_index == self.parsing_buffer.len() {
self.drop_first_half_of_buffer();
}
}
conn_result.num_received_tcs += parse_result.packets_found;
Ok(())
}
}
@@ -47,21 +112,18 @@ impl<T: SpacePacketValidator, TmError, TcError: 'static> TcpTcParser<TmError, Tc
#[derive(Default)]
pub struct SpacepacketsTmSender {}
impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
impl TcpTmSender for SpacepacketsTmSender {
fn handle_tm_sending(
&mut self,
tm_buffer: &mut [u8],
tm_source: &mut (impl PacketSource<Error = TmError> + ?Sized),
tm_source: &mut (impl PacketSource<Error = ()> + ?Sized),
conn_result: &mut HandledConnectionInfo,
stream: &mut TcpStream,
) -> Result<bool, TcpTmtcError<TmError, TcError>> {
) -> Result<bool, std::io::Error> {
let mut tm_was_sent = false;
loop {
while let Ok(read_tm_len) = tm_source.retrieve_packet(tm_buffer) {
// Write TM until TM source is exhausted. For now, there is no limit for the amount
// of TM written this way.
let read_tm_len = tm_source
.retrieve_packet(tm_buffer)
.map_err(|e| TcpTmtcError::TmError(e))?;
if read_tm_len == 0 {
return Ok(tm_was_sent);
@@ -71,6 +133,7 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
stream.write_all(&tm_buffer[..read_tm_len])?;
}
Ok(tm_was_sent)
}
}
@@ -88,62 +151,53 @@ impl<TmError, TcError> TcpTmSender<TmError, TcError> for SpacepacketsTmSender {
/// The [TCP server integration tests](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs/tests/tcp_servers.rs)
/// also serves as the example application for this module.
pub struct TcpSpacepacketsServer<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = SendError>,
TmSource: PacketSource<Error = ()>,
TcSender: PacketHandler<Error = GenericSendError>,
Validator: SpacePacketValidator,
HandledConnection: HandledConnectionHandler,
TmError,
SendError: 'static,
> {
pub generic_server: TcpTmtcGenericServer<
TmSource,
TcSender,
SpacepacketsTmSender,
Validator,
CcsdsPacketParser<Validator, TcSender>,
HandledConnection,
TmError,
SendError,
>,
}
impl<
TmSource: PacketSource<Error = TmError>,
TcSender: PacketSenderRaw<Error = TcError>,
Validator: SpacePacketValidator,
HandledConnection: HandledConnectionHandler,
TmError: 'static,
TcError: 'static,
> TcpSpacepacketsServer<TmSource, TcSender, Validator, HandledConnection, TmError, TcError>
TmSource: PacketSource<Error = ()>,
TcSender: PacketHandler<Error = GenericSendError>,
Validator: SpacePacketValidator,
HandledConnection: HandledConnectionHandler,
> TcpSpacepacketsServer<TmSource, TcSender, Validator, HandledConnection>
{
///
/// ## Parameter
///
/// * `cfg` - Configuration of the server.
/// * `tm_source` - Generic TM source used by the server to pull telemetry packets which are
/// then sent back to the client.
/// then sent back to the client.
/// * `tc_sender` - Any received telecommands which were decoded successfully will be
/// forwarded using this [PacketSenderRaw].
/// forwarded using this [PacketHandler].
/// * `validator` - Used to determine the space packets relevant for further processing and
/// to detect broken space packets.
/// to detect broken space packets.
/// * `handled_connection_hook` - Called to notify the user about a succesfully handled
/// connection.
/// connection.
/// * `stop_signal` - Can be used to shut down the TCP server even for longer running
/// connections.
/// connections.
pub fn new(
cfg: ServerConfig,
tm_source: TmSource,
tc_sender: TcSender,
validator: Validator,
tc_parser: CcsdsPacketParser<Validator, TcSender>,
handled_connection_hook: HandledConnection,
stop_signal: Option<Arc<AtomicBool>>,
) -> Result<Self, std::io::Error> {
Ok(Self {
generic_server: TcpTmtcGenericServer::new(
cfg,
validator,
tc_parser,
SpacepacketsTmSender::default(),
tm_source,
tc_sender,
handled_connection_hook,
stop_signal,
)?,
@@ -162,7 +216,7 @@ impl<
pub fn handle_all_connections(
&mut self,
poll_timeout: Option<Duration>
) -> Result<ConnectionResult, TcpTmtcError<TmError, TcError>>;
) -> Result<ConnectionResult, std::io::Error>;
}
}
}
@@ -183,29 +237,29 @@ mod tests {
};
use alloc::sync::Arc;
use arbitrary_int::u11;
use hashbrown::HashSet;
use spacepackets::{
ecss::{tc::PusTcCreator, WritablePusPacket},
CcsdsPacket, PacketId, SpHeader,
ecss::{CreatorConfig, MessageTypeId, WritablePusPacket, tc::PusTcCreator},
};
use crate::{
ComponentId,
encoding::ccsds::{SpValidity, SpacePacketValidator},
hal::std::tcp_server::{
tests::{ConnectionFinishedHandler, SyncTmSource},
ConnectionResult, ServerConfig,
tests::{ConnectionFinishedHandler, SyncTmSource},
},
queue::GenericSendError,
tmtc::PacketAsVec,
ComponentId,
};
use super::TcpSpacepacketsServer;
const TCP_SERVER_ID: ComponentId = 0x05;
const TEST_APID_0: u16 = 0x02;
const TEST_APID_0: u11 = u11::new(0x02);
const TEST_PACKET_ID_0: PacketId = PacketId::new_for_tc(true, TEST_APID_0);
const TEST_APID_1: u16 = 0x10;
const TEST_APID_1: u11 = u11::new(0x10);
const TEST_PACKET_ID_1: PacketId = PacketId::new_for_tc(true, TEST_APID_1);
#[derive(Default)]
@@ -223,23 +277,19 @@ mod tests {
fn generic_tmtc_server(
addr: &SocketAddr,
tc_sender: mpsc::Sender<PacketAsVec>,
tc_parser: super::CcsdsPacketParser<SimpleValidator, mpsc::Sender<PacketAsVec>>,
tm_source: SyncTmSource,
validator: SimpleValidator,
stop_signal: Option<Arc<AtomicBool>>,
) -> TcpSpacepacketsServer<
SyncTmSource,
mpsc::Sender<PacketAsVec>,
SimpleValidator,
ConnectionFinishedHandler,
(),
GenericSendError,
> {
TcpSpacepacketsServer::new(
ServerConfig::new(TCP_SERVER_ID, *addr, Duration::from_millis(2), 1024, 1024),
tm_source,
tc_sender,
validator,
tc_parser,
ConnectionFinishedHandler::default(),
stop_signal,
)
@@ -255,9 +305,8 @@ mod tests {
validator.0.insert(TEST_PACKET_ID_0);
let mut tcp_server = generic_tmtc_server(
&auto_port_addr,
tc_sender.clone(),
super::CcsdsPacketParser::new(TCP_SERVER_ID, 1024, tc_sender.clone(), validator),
tm_source,
validator,
None,
);
let dest_addr = tcp_server
@@ -283,8 +332,12 @@ mod tests {
.check_no_connections_left();
set_if_done.store(true, Ordering::Relaxed);
});
let ping_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let tc_0 = ping_tc.to_vec().expect("packet generation failed");
let mut stream = TcpStream::connect(dest_addr).expect("connecting to TCP server failed");
stream
@@ -314,13 +367,21 @@ mod tests {
// Add telemetry
let mut total_tm_len = 0;
let verif_tm =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 1, 1, &[], true);
let verif_tm = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
MessageTypeId::new(1, 1),
&[],
CreatorConfig::default(),
);
let tm_0 = verif_tm.to_vec().expect("writing packet failed");
total_tm_len += tm_0.len();
tm_source.add_tm(&tm_0);
let verif_tm =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 1, 3, &[], true);
let verif_tm = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_1),
MessageTypeId::new(1, 3),
&[],
CreatorConfig::default(),
);
let tm_1 = verif_tm.to_vec().expect("writing packet failed");
total_tm_len += tm_1.len();
tm_source.add_tm(&tm_1);
@@ -331,9 +392,8 @@ mod tests {
validator.0.insert(TEST_PACKET_ID_1);
let mut tcp_server = generic_tmtc_server(
&auto_port_addr,
tc_sender.clone(),
super::CcsdsPacketParser::new(TCP_SERVER_ID, 1024, tc_sender.clone(), validator),
tm_source,
validator,
None,
);
let dest_addr = tcp_server
@@ -366,14 +426,22 @@ mod tests {
.expect("setting reas timeout failed");
// Send telecommands
let ping_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_0), 17, 1, &[], true);
let ping_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_0),
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let tc_0 = ping_tc.to_vec().expect("ping tc creation failed");
stream
.write_all(&tc_0)
.expect("writing to TCP server failed");
let action_tc =
PusTcCreator::new_simple(SpHeader::new_from_apid(TEST_APID_1), 8, 0, &[], true);
let action_tc = PusTcCreator::new_simple(
SpHeader::new_from_apid(TEST_APID_1),
MessageTypeId::new(8, 0),
&[],
CreatorConfig::default(),
);
let tc_1 = action_tc.to_vec().expect("action tc creation failed");
stream
.write_all(&tc_1)

View File

@@ -1,6 +1,6 @@
//! Generic UDP TC server.
use crate::tmtc::PacketSenderRaw;
use crate::ComponentId;
use crate::tmtc::PacketHandler;
use core::fmt::Debug;
use std::io::{self, ErrorKind};
use std::net::{SocketAddr, ToSocketAddrs, UdpSocket};
@@ -12,7 +12,7 @@ use std::vec::Vec;
///
/// It caches all received telecomands into a vector. The maximum expected telecommand size should
/// be declared upfront. This avoids dynamic allocation during run-time. The user can specify a TC
/// sender in form of a special trait object which implements [PacketSenderRaw]. For example, this
/// sender in form of a special trait object which implements [PacketHandler]. For example, this
/// can be used to send the telecommands to a centralized TC source component for further
/// processing and routing.
///
@@ -24,9 +24,10 @@ use std::vec::Vec;
/// use spacepackets::ecss::WritablePusPacket;
/// use satrs::hal::std::udp_server::UdpTcServer;
/// use satrs::ComponentId;
/// use satrs::tmtc::PacketSenderRaw;
/// use satrs::tmtc::PacketHandler;
/// use spacepackets::SpHeader;
/// use spacepackets::ecss::tc::PusTcCreator;
/// use spacepackets::ecss::tc::{MessageTypeId, PusTcCreator, CreatorConfig};
/// use arbitrary_int::u11;
///
/// const UDP_SERVER_ID: ComponentId = 0x05;
///
@@ -34,8 +35,8 @@ use std::vec::Vec;
/// let dest_addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 7777);
/// let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, packet_sender)
/// .expect("Creating UDP TMTC server failed");
/// let sph = SpHeader::new_from_apid(0x02);
/// let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
/// let sph = SpHeader::new_from_apid(u11::new(0x02));
/// let pus_tc = PusTcCreator::new_simple(sph, MessageTypeId::new(17, 1), &[], CreatorConfig::default());
/// // Can not fail.
/// let ping_tc_raw = pus_tc.to_vec().unwrap();
///
@@ -59,7 +60,7 @@ use std::vec::Vec;
/// [example code](https://egit.irs.uni-stuttgart.de/rust/sat-rs/src/branch/main/satrs-example/src/tmtc.rs#L67)
/// on how to use this TC server. It uses the server to receive PUS telecommands on a specific port
/// and then forwards them to a generic CCSDS packet receiver.
pub struct UdpTcServer<TcSender: PacketSenderRaw<Error = SendError>, SendError> {
pub struct UdpTcServer<TcSender: PacketHandler<Error = SendError>, SendError> {
pub id: ComponentId,
pub socket: UdpSocket,
recv_buf: Vec<u8>,
@@ -77,7 +78,7 @@ pub enum ReceiveResult<SendError: Debug + 'static> {
Send(SendError),
}
impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
impl<TcSender: PacketHandler<Error = SendError>, SendError: Debug + 'static>
UdpTcServer<TcSender, SendError>
{
pub fn new<A: ToSocketAddrs>(
@@ -105,13 +106,13 @@ impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
Err(ReceiveResult::NothingReceived)
} else {
Err(e.into())
}
};
}
};
let (num_bytes, from) = res;
self.sender_addr = Some(from);
self.tc_sender
.send_packet(self.id, &self.recv_buf[0..num_bytes])
.handle_packet(self.id, &self.recv_buf[0..num_bytes])
.map_err(ReceiveResult::Send)?;
Ok(res)
}
@@ -123,14 +124,15 @@ impl<TcSender: PacketSenderRaw<Error = SendError>, SendError: Debug + 'static>
#[cfg(test)]
mod tests {
use crate::ComponentId;
use crate::hal::std::udp_server::{ReceiveResult, UdpTcServer};
use crate::queue::GenericSendError;
use crate::tmtc::PacketSenderRaw;
use crate::ComponentId;
use crate::tmtc::PacketHandler;
use arbitrary_int::u11;
use core::cell::RefCell;
use spacepackets::ecss::tc::PusTcCreator;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::SpHeader;
use spacepackets::ecss::tc::PusTcCreator;
use spacepackets::ecss::{CreatorConfig, MessageTypeId};
use std::collections::VecDeque;
use std::net::{IpAddr, Ipv4Addr, SocketAddr, UdpSocket};
use std::vec::Vec;
@@ -144,10 +146,10 @@ mod tests {
pub sent_cmds: RefCell<VecDeque<Vec<u8>>>,
}
impl PacketSenderRaw for PingReceiver {
impl PacketHandler for PingReceiver {
type Error = GenericSendError;
fn send_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
fn handle_packet(&self, sender_id: ComponentId, tc_raw: &[u8]) -> Result<(), Self::Error> {
assert_eq!(sender_id, UDP_SERVER_ID);
let mut sent_data = Vec::new();
sent_data.extend_from_slice(tc_raw);
@@ -166,8 +168,13 @@ mod tests {
let mut udp_tc_server = UdpTcServer::new(UDP_SERVER_ID, dest_addr, 2048, ping_receiver)
.expect("Creating UDP TMTC server failed");
is_send(&udp_tc_server);
let sph = SpHeader::new_from_apid(0x02);
let pus_tc = PusTcCreator::new_simple(sph, 17, 1, &[], true);
let sph = SpHeader::new_from_apid(u11::new(0x02));
let pus_tc = PusTcCreator::new_simple(
sph,
MessageTypeId::new(17, 1),
&[],
CreatorConfig::default(),
);
let len = pus_tc
.write_to_bytes(&mut buf)
.expect("Error writing PUS TC packet");

View File

@@ -14,8 +14,8 @@
//! - The [pus] module which provides special support for projects using
//! the [ECSS PUS C standard](https://ecss.nl/standard/ecss-e-st-70-41c-space-engineering-telemetry-and-telecommand-packet-utilization-15-april-2016/).
#![no_std]
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#[cfg(feature = "alloc")]
#![cfg_attr(docsrs, feature(doc_cfg))]
#[cfg(any(feature = "alloc", test))]
extern crate alloc;
#[cfg(feature = "alloc")]
extern crate downcast_rs;
@@ -27,7 +27,9 @@ pub mod action;
pub mod dev_mgmt;
pub mod encoding;
pub mod event_man;
pub mod event_man_legacy;
pub mod events;
pub mod events_legacy;
#[cfg(feature = "std")]
pub mod executable;
pub mod hal;
@@ -55,7 +57,7 @@ pub use spacepackets;
use spacepackets::PacketId;
/// Generic component ID type.
pub type ComponentId = u64;
pub type ComponentId = u32;
pub trait ValidatorU16Id {
fn validate(&self, id: u16) -> bool;

View File

@@ -11,11 +11,11 @@ pub use alloc_mod::*;
pub use std_mod::*;
use crate::{
ComponentId,
queue::{GenericReceiveError, GenericSendError},
request::{
GenericMessage, MessageMetadata, MessageReceiverProvider, MessageReceiverWithId, RequestId,
},
ComponentId,
};
pub type Mode = u32;
@@ -257,7 +257,7 @@ pub trait ModeRequestHandler: ModeProvider {
pub trait ModeReplyReceiver {
fn try_recv_mode_reply(&self)
-> Result<Option<GenericMessage<ModeReply>>, GenericReceiveError>;
-> Result<Option<GenericMessage<ModeReply>>, GenericReceiveError>;
}
impl<R: MessageReceiverProvider<ModeReply>> ModeReplyReceiver
@@ -309,12 +309,11 @@ pub mod alloc_mod {
}
impl<
From,
Sender: MessageSenderProvider<ModeReply>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<ModeReply, Sender>,
> ModeReplySender
for MessageSenderAndReceiver<ModeReply, From, Sender, Receiver, SenderStore>
From,
Sender: MessageSenderProvider<ModeReply>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<ModeReply, Sender>,
> ModeReplySender for MessageSenderAndReceiver<ModeReply, From, Sender, Receiver, SenderStore>
{
fn local_channel_id(&self) -> ComponentId {
self.local_channel_id_generic()
@@ -334,12 +333,11 @@ pub mod alloc_mod {
}
impl<
To,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<ModeReply>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> ModeReplyReceiver
for MessageSenderAndReceiver<To, ModeReply, Sender, Receiver, SenderStore>
To,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<ModeReply>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> ModeReplyReceiver for MessageSenderAndReceiver<To, ModeReply, Sender, Receiver, SenderStore>
{
fn try_recv_mode_reply(
&self,
@@ -349,15 +347,15 @@ pub mod alloc_mod {
}
impl<
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
RequestAndReplySenderAndReceiver<
Request,
ReqSender,
@@ -376,14 +374,14 @@ pub mod alloc_mod {
}
impl<
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
ReplySender: MessageSenderProvider<ModeReply>,
ReplyReceiver: MessageReceiverProvider<ModeReply>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, ReplySender>,
> ModeReplySender
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
ReplySender: MessageSenderProvider<ModeReply>,
ReplyReceiver: MessageReceiverProvider<ModeReply>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, ReplySender>,
> ModeReplySender
for RequestAndReplySenderAndReceiver<
Request,
ReqSender,
@@ -413,14 +411,14 @@ pub mod alloc_mod {
}
impl<
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
ReplySender: MessageSenderProvider<ModeReply>,
ReplyReceiver: MessageReceiverProvider<ModeReply>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, ReplySender>,
> ModeReplyReceiver
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
ReplySender: MessageSenderProvider<ModeReply>,
ReplyReceiver: MessageReceiverProvider<ModeReply>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, ReplySender>,
> ModeReplyReceiver
for RequestAndReplySenderAndReceiver<
Request,
ReqSender,
@@ -444,10 +442,10 @@ pub mod alloc_mod {
MessageSenderAndReceiver<ModeReply, ModeRequest, Sender, Receiver, ReplySenderStore>;
impl<
Sender: MessageSenderProvider<ModeReply>,
Receiver: MessageReceiverProvider<ModeRequest>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, Sender>,
> ModeRequestHandlerInterface<Sender, Receiver, ReplySenderStore>
Sender: MessageSenderProvider<ModeReply>,
Receiver: MessageReceiverProvider<ModeRequest>,
ReplySenderStore: MessageSenderStoreProvider<ModeReply, Sender>,
> ModeRequestHandlerInterface<Sender, Receiver, ReplySenderStore>
{
pub fn try_recv_mode_request(
&self,
@@ -474,10 +472,10 @@ pub mod alloc_mod {
MessageSenderAndReceiver<ModeRequest, ModeReply, Sender, Receiver, RequestSenderStore>;
impl<
Sender: MessageSenderProvider<ModeRequest>,
Receiver: MessageReceiverProvider<ModeReply>,
RequestSenderStore: MessageSenderStoreProvider<ModeRequest, Sender>,
> ModeRequestorInterface<Sender, Receiver, RequestSenderStore>
Sender: MessageSenderProvider<ModeRequest>,
Receiver: MessageReceiverProvider<ModeReply>,
RequestSenderStore: MessageSenderStoreProvider<ModeRequest, Sender>,
> ModeRequestorInterface<Sender, Receiver, RequestSenderStore>
{
pub fn try_recv_mode_reply(
&self,
@@ -531,11 +529,11 @@ pub mod alloc_mod {
}
impl<
To,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<ModeRequest>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> ModeRequestReceiver
To,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<ModeRequest>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> ModeRequestReceiver
for MessageSenderAndReceiver<To, ModeRequest, Sender, Receiver, SenderStore>
{
fn try_recv_mode_request(
@@ -546,11 +544,11 @@ pub mod alloc_mod {
}
impl<
From,
Sender: MessageSenderProvider<ModeRequest>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<ModeRequest, Sender>,
> ModeRequestSender
From,
Sender: MessageSenderProvider<ModeRequest>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<ModeRequest, Sender>,
> ModeRequestSender
for MessageSenderAndReceiver<ModeRequest, From, Sender, Receiver, SenderStore>
{
fn local_channel_id(&self) -> ComponentId {
@@ -572,14 +570,14 @@ pub mod alloc_mod {
}
impl<
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
RequestAndReplySenderAndReceiver<
ModeRequest,
ReqSender,
@@ -598,14 +596,14 @@ pub mod alloc_mod {
}
impl<
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
> ModeRequestSender
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
> ModeRequestSender
for RequestAndReplySenderAndReceiver<
ModeRequest,
ReqSender,
@@ -636,14 +634,14 @@ pub mod alloc_mod {
}
impl<
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
> ModeRequestReceiver
ReqSender: MessageSenderProvider<ModeRequest>,
ReqReceiver: MessageReceiverProvider<ModeRequest>,
ReqSenderStore: MessageSenderStoreProvider<ModeRequest, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
> ModeRequestReceiver
for RequestAndReplySenderAndReceiver<
ModeRequest,
ReqSender,
@@ -726,7 +724,7 @@ pub(crate) mod tests {
use core::cell::RefCell;
use std::collections::VecDeque;
use crate::{request::RequestId, ComponentId};
use crate::{ComponentId, request::RequestId};
use super::*;

View File

@@ -2,9 +2,9 @@ use alloc::vec::Vec;
use hashbrown::HashMap;
use crate::{
ComponentId,
mode::{Mode, ModeAndSubmode, ModeReply, ModeRequest, Submode},
request::MessageSenderProvider,
ComponentId,
};
#[cfg(feature = "alloc")]

View File

@@ -47,10 +47,10 @@ use crate::pool::PoolAddr;
use core::fmt::Debug;
use core::mem::size_of;
use paste::paste;
use spacepackets::ecss::{EcssEnumU16, EcssEnumU32, EcssEnumU64, EcssEnumU8};
use spacepackets::ByteConversionError;
use spacepackets::ecss::{EcssEnumU8, EcssEnumU16, EcssEnumU32, EcssEnumU64};
pub use spacepackets::util::ToBeBytes;
use spacepackets::util::UnsignedEnum;
use spacepackets::ByteConversionError;
#[cfg(feature = "alloc")]
use alloc::string::{String, ToString};

View File

@@ -222,7 +222,7 @@ impl Error for PoolError {
}
}
/// Generic trait for pool providers which provide memory pools for variable sized data.
/// Generic trait for pool providers which provide memory pools for variable sized packet data.
///
/// It specifies a basic API to [Self::add], [Self::modify], [Self::read] and [Self::delete] data
/// in the store at its core. The API was designed so internal optimizations can be performed
@@ -250,7 +250,7 @@ pub trait PoolProvider {
/// call the user-provided closure and pass a mutable reference to the memory block
/// to the closure. This allows the user to modify the memory block.
fn modify<U: FnMut(&mut [u8])>(&mut self, addr: &PoolAddr, updater: U)
-> Result<(), PoolError>;
-> Result<(), PoolError>;
/// The provider should copy the data from the memory block to the user-provided buffer if
/// it exists.
@@ -258,6 +258,9 @@ pub trait PoolProvider {
/// Delete data inside the pool given a [PoolAddr].
fn delete(&mut self, addr: PoolAddr) -> Result<(), PoolError>;
fn clear(&mut self) -> Result<(), PoolError>;
fn has_element_at(&self, addr: &PoolAddr) -> Result<bool, PoolError>;
/// Retrieve the length of the data at the given store address.
@@ -281,7 +284,7 @@ pub trait PoolProviderWithGuards: PoolProvider {
/// This can prevent memory leaks. Users can read the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely.
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self>;
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<'_, Self>;
/// This function behaves like [PoolProvider::modify], but consumes the provided
/// address and returns a RAII conformant guard object.
@@ -291,7 +294,7 @@ pub trait PoolProviderWithGuards: PoolProvider {
/// This can prevent memory leaks. Users can read (and modify) the data and release the guard
/// if the data in the store is valid for further processing. If the data is faulty, no
/// manual deletion is necessary when returning from a processing function prematurely.
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self>;
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<'_, Self>;
}
pub struct PoolGuard<'a, MemProvider: PoolProvider + ?Sized> {
@@ -510,11 +513,11 @@ pub mod heapless_mod {
///
/// * `subpool_memory` - Static memory for a particular subpool to store the actual data.
/// * `sizes_list` - Static sizes list structure to store the size of the data which is
/// actually stored.
/// actually stored.
/// * `num_blocks ` - The number of memory blocks inside the subpool.
/// * `set_sizes_list_to_all_free` - If this is set to true, the method will take care
/// of setting all values in the sizes list to [super::STORE_FREE]. This does not have
/// to be done if the user initializes the sizes list to that value themselves.
/// of setting all values in the sizes list to [super::STORE_FREE]. This does not have
/// to be done if the user initializes the sizes list to that value themselves.
pub fn grow(
&mut self,
subpool_memory: &'static mut [u8],
@@ -602,7 +605,7 @@ pub mod heapless_mod {
if i < start_at_subpool as usize {
continue;
}
if pool_cfg.block_size as usize >= req_size {
if pool_cfg.block_size >= req_size {
return Ok(i as u16);
}
}
@@ -638,7 +641,7 @@ pub mod heapless_mod {
fn raw_pos(&self, addr: &StaticPoolAddr) -> Option<usize> {
let (pool_cfg, _) = self.pool.get(addr.pool_idx as usize)?;
Some(addr.packet_idx as usize * pool_cfg.block_size as usize)
Some(addr.packet_idx as usize * pool_cfg.block_size)
}
}
@@ -707,13 +710,20 @@ pub mod heapless_mod {
let subpool_cfg = self.pool.get(addr.pool_idx as usize).unwrap().0;
let raw_pos = self.raw_pos(&addr).unwrap();
let block = &mut self.pool.get_mut(addr.pool_idx as usize).unwrap().1
[raw_pos..raw_pos + subpool_cfg.block_size as usize];
[raw_pos..raw_pos + subpool_cfg.block_size];
let size_list = self.sizes_lists.get_mut(addr.pool_idx as usize).unwrap();
size_list[addr.packet_idx as usize] = STORE_FREE;
block.fill(0);
Ok(())
}
fn clear(&mut self) -> Result<(), PoolError> {
for size in self.sizes_lists.iter_mut() {
size.fill(STORE_FREE);
}
Ok(())
}
fn has_element_at(&self, addr: &PoolAddr) -> Result<bool, PoolError> {
let addr = StaticPoolAddr::from(*addr);
self.validate_addr(&addr)?;
@@ -742,11 +752,11 @@ pub mod heapless_mod {
impl<const MAX_NUM_SUBPOOLS: usize> PoolProviderWithGuards
for StaticHeaplessMemoryPool<MAX_NUM_SUBPOOLS>
{
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self> {
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<'_, Self> {
PoolRwGuard::new(self, addr)
}
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self> {
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<'_, Self> {
PoolGuard::new(self, addr)
}
}
@@ -770,11 +780,11 @@ mod alloc_mod {
/// # Parameters
///
/// * `cfg` - Vector of tuples which represent a subpool. The first entry in the tuple specifies
/// the number of memory blocks in the subpool, the second entry the size of the blocks
/// the number of memory blocks in the subpool, the second entry the size of the blocks
/// * `spill_to_higher_subpools` - Specifies whether data will be spilled to higher subpools
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
/// for all data sizes as long as possible. However, an undesirable side-effect might be
/// the chocking of larger subpools by underdimensioned smaller subpools.
/// if the next fitting subpool is full. This is useful to ensure the pool remains useful
/// for all data sizes as long as possible. However, an undesirable side-effect might be
/// the chocking of larger subpools by underdimensioned smaller subpools.
#[derive(Debug, Clone)]
pub struct StaticPoolConfig {
cfg: Vec<SubpoolConfig>,
@@ -1055,14 +1065,21 @@ mod alloc_mod {
_ => size,
})
}
fn clear(&mut self) -> Result<(), PoolError> {
for size in self.sizes_lists.iter_mut() {
size.fill(STORE_FREE);
}
Ok(())
}
}
impl PoolProviderWithGuards for StaticMemoryPool {
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<Self> {
fn modify_with_guard(&mut self, addr: PoolAddr) -> PoolRwGuard<'_, Self> {
PoolRwGuard::new(self, addr)
}
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<Self> {
fn read_with_guard(&mut self, addr: PoolAddr) -> PoolGuard<'_, Self> {
PoolGuard::new(self, addr)
}
}
@@ -1307,9 +1324,11 @@ mod tests {
let addr = pool_provider.add(&test_buf).expect("Adding data failed");
let read_guard = PoolGuard::new(pool_provider, addr);
drop(read_guard);
assert!(!pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
!pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_test_pool_guard_deletion(pool_provider: &mut impl PoolProviderWithGuards) {
@@ -1317,9 +1336,11 @@ mod tests {
let addr = pool_provider.add(&test_buf).expect("Adding data failed");
let read_guard = pool_provider.read_with_guard(addr);
drop(read_guard);
assert!(!pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
!pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_test_pool_guard_with_release(pool_provider: &mut impl PoolProviderWithGuards) {
@@ -1328,9 +1349,11 @@ mod tests {
let mut read_guard = PoolGuard::new(pool_provider, addr);
read_guard.release();
drop(read_guard);
assert!(pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_test_pool_modify_guard_man_creation(
@@ -1341,9 +1364,11 @@ mod tests {
let mut rw_guard = PoolRwGuard::new(pool_provider, addr);
rw_guard.update(&mut |_| {}).expect("modify failed");
drop(rw_guard);
assert!(!pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
!pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_test_pool_modify_guard(pool_provider: &mut impl PoolProviderWithGuards) {
@@ -1352,9 +1377,11 @@ mod tests {
let mut rw_guard = pool_provider.modify_with_guard(addr);
rw_guard.update(&mut |_| {}).expect("modify failed");
drop(rw_guard);
assert!(!pool_provider
.has_element_at(&addr)
.expect("Invalid address"));
assert!(
!pool_provider
.has_element_at(&addr)
.expect("Invalid address")
);
}
fn generic_modify_pool_index_above_0(pool_provider: &mut impl PoolProvider) {
@@ -1587,228 +1614,243 @@ mod tests {
mod heapless_tests {
use super::*;
use crate::static_subpool;
use std::cell::UnsafeCell;
use std::sync::Mutex;
const SUBPOOL_1_BLOCK_SIZE: usize = 4;
const SUBPOOL_1_NUM_ELEMENTS: u16 = 4;
static SUBPOOL_1: static_cell::ConstStaticCell<
[u8; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
);
static SUBPOOL_1_SIZES: Mutex<UnsafeCell<[usize; SUBPOOL_1_NUM_ELEMENTS as usize]>> =
Mutex::new(UnsafeCell::new(
[STORE_FREE; SUBPOOL_1_NUM_ELEMENTS as usize],
));
const SUBPOOL_2_NUM_ELEMENTS: u16 = 2;
const SUBPOOL_2_BLOCK_SIZE: usize = 8;
static SUBPOOL_2: static_cell::ConstStaticCell<
[u8; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
);
static SUBPOOL_2_SIZES: static_cell::ConstStaticCell<
[usize; SUBPOOL_2_NUM_ELEMENTS as usize],
> = static_cell::ConstStaticCell::new([STORE_FREE; SUBPOOL_2_NUM_ELEMENTS as usize]);
const SUBPOOL_3_NUM_ELEMENTS: u16 = 1;
const SUBPOOL_3_BLOCK_SIZE: usize = 16;
static_subpool!(
SUBPOOL_3,
SUBPOOL_3_SIZES,
SUBPOOL_3_NUM_ELEMENTS as usize,
SUBPOOL_3_BLOCK_SIZE
);
const SUBPOOL_4_NUM_ELEMENTS: u16 = 2;
const SUBPOOL_4_BLOCK_SIZE: usize = 16;
static_subpool!(
SUBPOOL_4,
SUBPOOL_4_SIZES,
SUBPOOL_4_NUM_ELEMENTS as usize,
SUBPOOL_4_BLOCK_SIZE
);
const SUBPOOL_5_NUM_ELEMENTS: u16 = 1;
const SUBPOOL_5_BLOCK_SIZE: usize = 8;
static_subpool!(
SUBPOOL_5,
SUBPOOL_5_SIZES,
SUBPOOL_5_NUM_ELEMENTS as usize,
SUBPOOL_5_BLOCK_SIZE
);
const SUBPOOL_6_NUM_ELEMENTS: u16 = 1;
const SUBPOOL_6_BLOCK_SIZE: usize = 12;
static_subpool!(
SUBPOOL_6,
SUBPOOL_6_SIZES,
SUBPOOL_6_NUM_ELEMENTS as usize,
SUBPOOL_6_BLOCK_SIZE
);
fn small_heapless_pool() -> StaticHeaplessMemoryPool<3> {
let mut heapless_pool: StaticHeaplessMemoryPool<3> =
StaticHeaplessMemoryPool::new(false);
assert!(heapless_pool
.grow(
SUBPOOL_1.take(),
unsafe { &mut *SUBPOOL_1_SIZES.lock().unwrap().get() },
SUBPOOL_1_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_2.take(),
SUBPOOL_2_SIZES.take(),
SUBPOOL_2_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok());
heapless_pool
macro_rules! make_heapless_pool {
($prefix:ident) => {{
paste::paste! {
static [<$prefix _SUBPOOL_1>]: static_cell::ConstStaticCell<
[u8; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_1_NUM_ELEMENTS as usize * SUBPOOL_1_BLOCK_SIZE],
);
static [<$prefix _SUBPOOL_1_SIZES>]: std::sync::Mutex<
std::cell::UnsafeCell<[usize; SUBPOOL_1_NUM_ELEMENTS as usize]>
> = std::sync::Mutex::new(
std::cell::UnsafeCell::new([STORE_FREE; SUBPOOL_1_NUM_ELEMENTS as usize])
);
static [<$prefix _SUBPOOL_2>]: static_cell::ConstStaticCell<
[u8; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_2_NUM_ELEMENTS as usize * SUBPOOL_2_BLOCK_SIZE],
);
static [<$prefix _SUBPOOL_2_SIZES>]: static_cell::ConstStaticCell<
[usize; SUBPOOL_2_NUM_ELEMENTS as usize],
> = static_cell::ConstStaticCell::new(
[STORE_FREE; SUBPOOL_2_NUM_ELEMENTS as usize]
);
static [<$prefix _SUBPOOL_3>]: static_cell::ConstStaticCell<
[u8; SUBPOOL_3_NUM_ELEMENTS as usize * SUBPOOL_3_BLOCK_SIZE],
> = static_cell::ConstStaticCell::new(
[0; SUBPOOL_3_NUM_ELEMENTS as usize * SUBPOOL_3_BLOCK_SIZE],
);
static [<$prefix _SUBPOOL_3_SIZES>]: static_cell::ConstStaticCell<
[usize; SUBPOOL_3_NUM_ELEMENTS as usize],
> = static_cell::ConstStaticCell::new(
[STORE_FREE; SUBPOOL_3_NUM_ELEMENTS as usize]
);
let mut heapless_pool: StaticHeaplessMemoryPool<3> =
StaticHeaplessMemoryPool::new(false);
heapless_pool
.grow(
[<$prefix _SUBPOOL_1>].take(),
unsafe { &mut *[<$prefix _SUBPOOL_1_SIZES>].lock().unwrap().get() },
SUBPOOL_1_NUM_ELEMENTS,
true
)
.unwrap();
heapless_pool
.grow(
[<$prefix _SUBPOOL_2>].take(),
[<$prefix _SUBPOOL_2_SIZES>].take(),
SUBPOOL_2_NUM_ELEMENTS,
true
)
.unwrap();
heapless_pool
.grow(
[<$prefix _SUBPOOL_3>].take(),
[<$prefix _SUBPOOL_3_SIZES>].take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.unwrap();
heapless_pool
}
}};
}
#[test]
fn test_heapless_add_and_read() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T0);
generic_test_add_and_read::<16>(&mut pool_provider);
}
#[test]
fn test_add_smaller_than_full_slot() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T1);
generic_test_add_smaller_than_full_slot(&mut pool_provider);
}
#[test]
fn test_delete() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T2);
generic_test_delete(&mut pool_provider);
}
#[test]
fn test_modify() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T3);
generic_test_modify(&mut pool_provider);
}
#[test]
fn test_consecutive_reservation() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T4);
generic_test_consecutive_reservation(&mut pool_provider);
}
#[test]
fn test_read_does_not_exist() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T5);
generic_test_read_does_not_exist(&mut pool_provider);
}
#[test]
fn test_store_full() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T6);
generic_test_store_full(&mut pool_provider);
}
#[test]
fn test_invalid_pool_idx() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T7);
generic_test_invalid_pool_idx(&mut pool_provider);
}
#[test]
fn test_invalid_packet_idx() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T8);
generic_test_invalid_packet_idx(&mut pool_provider);
}
#[test]
fn test_add_too_large() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T9);
generic_test_add_too_large(&mut pool_provider);
}
#[test]
fn test_data_too_large_1() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T10);
generic_test_data_too_large_1(&mut pool_provider);
}
#[test]
fn test_free_element_too_large() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T11);
generic_test_free_element_too_large(&mut pool_provider);
}
#[test]
fn test_pool_guard_deletion_man_creation() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T12);
generic_test_pool_guard_deletion_man_creation(&mut pool_provider);
}
#[test]
fn test_pool_guard_deletion() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T13);
generic_test_pool_guard_deletion(&mut pool_provider);
}
#[test]
fn test_pool_guard_with_release() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T14);
generic_test_pool_guard_with_release(&mut pool_provider);
}
#[test]
fn test_pool_modify_guard_man_creation() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T15);
generic_test_pool_modify_guard_man_creation(&mut pool_provider);
}
#[test]
fn test_pool_modify_guard() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T16);
generic_test_pool_modify_guard(&mut pool_provider);
}
#[test]
fn modify_pool_index_above_0() {
let mut pool_provider = small_heapless_pool();
let mut pool_provider = make_heapless_pool!(T17);
generic_modify_pool_index_above_0(&mut pool_provider);
}
#[test]
fn test_spills_to_higher_subpools() {
static_subpool!(
SUBPOOL_2_T18,
SUBPOOL_2_SIZES_T18,
SUBPOOL_2_NUM_ELEMENTS as usize,
SUBPOOL_2_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_4_T18,
SUBPOOL_4_SIZES_T18,
SUBPOOL_4_NUM_ELEMENTS as usize,
SUBPOOL_4_BLOCK_SIZE
);
let mut heapless_pool: StaticHeaplessMemoryPool<2> =
StaticHeaplessMemoryPool::new(true);
assert!(heapless_pool
.grow(
SUBPOOL_2.take(),
SUBPOOL_2_SIZES.take(),
SUBPOOL_2_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_4.take(),
SUBPOOL_4_SIZES.take(),
SUBPOOL_4_NUM_ELEMENTS,
true
)
.is_ok());
assert!(
heapless_pool
.grow(
SUBPOOL_2_T18.take(),
SUBPOOL_2_SIZES_T18.take(),
SUBPOOL_2_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_4_T18.take(),
SUBPOOL_4_SIZES_T18.take(),
SUBPOOL_4_NUM_ELEMENTS,
true
)
.is_ok()
);
generic_test_spills_to_higher_subpools(&mut heapless_pool);
}
@@ -1816,22 +1858,38 @@ mod tests {
fn test_spillage_fails_as_well() {
let mut heapless_pool: StaticHeaplessMemoryPool<2> =
StaticHeaplessMemoryPool::new(true);
assert!(heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok());
static_subpool!(
SUBPOOL_5,
SUBPOOL_5_SIZES,
SUBPOOL_5_NUM_ELEMENTS as usize,
SUBPOOL_5_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_3,
SUBPOOL_3_SIZES,
SUBPOOL_3_NUM_ELEMENTS as usize,
SUBPOOL_3_BLOCK_SIZE
);
assert!(
heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok()
);
generic_test_spillage_fails_as_well(&mut heapless_pool);
}
@@ -1839,61 +1897,109 @@ mod tests {
fn test_spillage_works_across_multiple_subpools() {
let mut heapless_pool: StaticHeaplessMemoryPool<3> =
StaticHeaplessMemoryPool::new(true);
assert!(heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_6.take(),
SUBPOOL_6_SIZES.take(),
SUBPOOL_6_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok());
static_subpool!(
SUBPOOL_3,
SUBPOOL_3_SIZES,
SUBPOOL_3_NUM_ELEMENTS as usize,
SUBPOOL_3_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_5,
SUBPOOL_5_SIZES,
SUBPOOL_5_NUM_ELEMENTS as usize,
SUBPOOL_5_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_6,
SUBPOOL_6_SIZES,
SUBPOOL_6_NUM_ELEMENTS as usize,
SUBPOOL_6_BLOCK_SIZE
);
assert!(
heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_6.take(),
SUBPOOL_6_SIZES.take(),
SUBPOOL_6_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok()
);
generic_test_spillage_works_across_multiple_subpools(&mut heapless_pool);
}
#[test]
fn test_spillage_fails_across_multiple_subpools() {
static_subpool!(
SUBPOOL_3,
SUBPOOL_3_SIZES,
SUBPOOL_3_NUM_ELEMENTS as usize,
SUBPOOL_3_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_5,
SUBPOOL_5_SIZES,
SUBPOOL_5_NUM_ELEMENTS as usize,
SUBPOOL_5_BLOCK_SIZE
);
static_subpool!(
SUBPOOL_6,
SUBPOOL_6_SIZES,
SUBPOOL_6_NUM_ELEMENTS as usize,
SUBPOOL_6_BLOCK_SIZE
);
let mut heapless_pool: StaticHeaplessMemoryPool<3> =
StaticHeaplessMemoryPool::new(true);
assert!(heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_6.take(),
SUBPOOL_6_SIZES.take(),
SUBPOOL_6_NUM_ELEMENTS,
true
)
.is_ok());
assert!(heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok());
assert!(
heapless_pool
.grow(
SUBPOOL_5.take(),
SUBPOOL_5_SIZES.take(),
SUBPOOL_5_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_6.take(),
SUBPOOL_6_SIZES.take(),
SUBPOOL_6_NUM_ELEMENTS,
true
)
.is_ok()
);
assert!(
heapless_pool
.grow(
SUBPOOL_3.take(),
SUBPOOL_3_SIZES.take(),
SUBPOOL_3_NUM_ELEMENTS,
true
)
.is_ok()
);
generic_test_spillage_fails_across_multiple_subpools(&mut heapless_pool);
}
}

View File

@@ -190,7 +190,7 @@ mod tests {
use std::sync::mpsc::{self, TryRecvError};
use crate::{queue::GenericSendError, request::GenericMessage, ComponentId};
use crate::{ComponentId, queue::GenericSendError, request::GenericMessage};
use super::*;

View File

@@ -65,13 +65,13 @@ impl GenericActionReplyPus {
#[cfg(feature = "alloc")]
pub mod alloc_mod {
use crate::{
ComponentId,
action::ActionRequest,
queue::{GenericReceiveError, GenericSendError},
request::{
GenericMessage, MessageReceiverProvider, MessageSenderAndReceiver,
MessageSenderProvider, MessageSenderStoreProvider, RequestId,
},
ComponentId,
};
use super::ActionReplyPus;
@@ -81,10 +81,10 @@ pub mod alloc_mod {
MessageSenderAndReceiver<ActionReplyPus, ActionRequest, Sender, Receiver, ReplySenderStore>;
impl<
Sender: MessageSenderProvider<ActionReplyPus>,
Receiver: MessageReceiverProvider<ActionRequest>,
ReplySender: MessageSenderStoreProvider<ActionReplyPus, Sender>,
> ActionRequestHandlerInterface<Sender, Receiver, ReplySender>
Sender: MessageSenderProvider<ActionReplyPus>,
Receiver: MessageReceiverProvider<ActionRequest>,
ReplySender: MessageSenderStoreProvider<ActionReplyPus, Sender>,
> ActionRequestHandlerInterface<Sender, Receiver, ReplySender>
{
pub fn try_recv_action_request(
&self,
@@ -114,10 +114,10 @@ pub mod alloc_mod {
>;
impl<
Sender: MessageSenderProvider<ActionRequest>,
Receiver: MessageReceiverProvider<ActionReplyPus>,
RequestSenderStore: MessageSenderStoreProvider<ActionRequest, Sender>,
> ActionRequestorInterface<Sender, Receiver, RequestSenderStore>
Sender: MessageSenderProvider<ActionRequest>,
Receiver: MessageReceiverProvider<ActionReplyPus>,
RequestSenderStore: MessageSenderStoreProvider<ActionRequest, Sender>,
> ActionRequestorInterface<Sender, Receiver, RequestSenderStore>
{
pub fn try_recv_action_reply(
&self,
@@ -141,12 +141,12 @@ pub mod std_mod {
use std::sync::mpsc;
use crate::{
ComponentId,
pus::{
ActivePusRequestStd, ActiveRequest, DefaultActiveRequestMap,
verification::{self, TcStateToken},
ActivePusRequestStd, ActiveRequestProvider, DefaultActiveRequestMap,
},
request::{MessageSenderMap, OneMessageSender},
ComponentId,
};
use super::*;
@@ -157,7 +157,7 @@ pub mod std_mod {
common: ActivePusRequestStd,
}
impl ActiveRequestProvider for ActivePusActionRequestStd {
impl ActiveRequest for ActivePusActionRequestStd {
delegate::delegate! {
to self.common {
fn target_id(&self) -> ComponentId;

View File

@@ -1,9 +1,12 @@
use crate::pus::source_buffer_large_enough;
use arbitrary_int::u11;
use spacepackets::ByteConversionError;
use spacepackets::SpHeader;
use spacepackets::ecss::CreatorConfig;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::ecss::MessageTypeId;
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::tm::PusTmSecondaryHeader;
use spacepackets::ecss::EcssEnumeration;
use spacepackets::ByteConversionError;
use spacepackets::{SpHeader, MAX_APID};
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
@@ -11,16 +14,13 @@ pub use alloc_mod::*;
pub use spacepackets::ecss::event::*;
pub struct EventReportCreator {
apid: u16,
apid: u11,
pub dest_id: u16,
}
impl EventReportCreator {
pub fn new(apid: u16, dest_id: u16) -> Option<Self> {
if apid > MAX_APID {
return None;
}
Some(Self { dest_id, apid })
pub fn new(apid: u11, dest_id: u16) -> Self {
Self { dest_id, apid }
}
pub fn event_info<'time, 'src_data>(
@@ -31,7 +31,7 @@ impl EventReportCreator {
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm(
Subservice::TmInfoReport,
MessageSubtypeId::TmInfoReport,
time_stamp,
event_id,
params,
@@ -47,7 +47,7 @@ impl EventReportCreator {
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm(
Subservice::TmLowSeverityReport,
MessageSubtypeId::TmLowSeverityReport,
time_stamp,
event_id,
params,
@@ -63,7 +63,7 @@ impl EventReportCreator {
buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm(
Subservice::TmMediumSeverityReport,
MessageSubtypeId::TmMediumSeverityReport,
time_stamp,
event_id,
params,
@@ -79,7 +79,7 @@ impl EventReportCreator {
src_data_buf: &'src_data mut [u8],
) -> Result<PusTmCreator<'time, 'src_data>, ByteConversionError> {
self.generate_and_send_generic_tm(
Subservice::TmHighSeverityReport,
MessageSubtypeId::TmHighSeverityReport,
time_stamp,
event_id,
params,
@@ -89,7 +89,7 @@ impl EventReportCreator {
fn generate_and_send_generic_tm<'time, 'src_data>(
&self,
subservice: Subservice,
subservice: MessageSubtypeId,
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
params: Option<&'src_data [u8]>,
@@ -100,7 +100,7 @@ impl EventReportCreator {
fn generate_generic_event_tm<'time, 'src_data>(
&self,
subservice: Subservice,
subservice: MessageSubtypeId,
time_stamp: &'time [u8],
event_id: impl EcssEnumeration,
params: Option<&'src_data [u8]>,
@@ -111,8 +111,12 @@ impl EventReportCreator {
src_data_len += aux_data.len();
}
source_buffer_large_enough(src_data_buf.len(), src_data_len)?;
let sec_header =
PusTmSecondaryHeader::new(5, subservice.into(), 0, self.dest_id, time_stamp);
let sec_header = PusTmSecondaryHeader::new(
MessageTypeId::new(5, subservice.into()),
0,
self.dest_id,
time_stamp,
);
let mut current_idx = 0;
event_id.write_to_be_bytes(&mut src_data_buf[0..event_id.size()])?;
current_idx += event_id.size();
@@ -124,7 +128,7 @@ impl EventReportCreator {
SpHeader::new_from_apid(self.apid),
sec_header,
&src_data_buf[0..current_idx],
true,
CreatorConfig::default(),
))
}
}
@@ -132,64 +136,64 @@ impl EventReportCreator {
#[cfg(feature = "alloc")]
mod alloc_mod {
use super::*;
use crate::pus::{EcssTmSender, EcssTmtcError};
use crate::ComponentId;
use crate::pus::{EcssTmSender, EcssTmtcError};
use alloc::vec;
use alloc::vec::Vec;
use core::cell::RefCell;
use spacepackets::ecss::PusError;
pub trait EventTmHookProvider {
pub trait EventTmHook {
fn modify_tm(&self, tm: &mut PusTmCreator);
}
#[derive(Default)]
pub struct DummyEventHook {}
impl EventTmHookProvider for DummyEventHook {
impl EventTmHook for DummyEventHook {
fn modify_tm(&self, _tm: &mut PusTmCreator) {}
}
pub struct EventReporter<EventTmHook: EventTmHookProvider = DummyEventHook> {
pub struct EventReporter<EventTmHookInstance: EventTmHook = DummyEventHook> {
id: ComponentId,
// Use interior mutability pattern here. This is just an intermediate buffer to the PUS event packet
// generation.
source_data_buf: RefCell<Vec<u8>>,
pub report_creator: EventReportCreator,
pub tm_hook: EventTmHook,
pub tm_hook: EventTmHookInstance,
}
impl EventReporter<DummyEventHook> {
pub fn new(
id: ComponentId,
default_apid: u16,
default_apid: u11,
default_dest_id: u16,
max_event_id_and_aux_data_size: usize,
) -> Option<Self> {
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
Some(Self {
) -> Self {
let reporter = EventReportCreator::new(default_apid, default_dest_id);
Self {
id,
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
report_creator: reporter,
tm_hook: DummyEventHook::default(),
})
}
}
}
impl<EventTmHook: EventTmHookProvider> EventReporter<EventTmHook> {
impl<EventTmHookInstance: EventTmHook> EventReporter<EventTmHookInstance> {
pub fn new_with_hook(
id: ComponentId,
default_apid: u16,
default_apid: u11,
default_dest_id: u16,
max_event_id_and_aux_data_size: usize,
tm_hook: EventTmHook,
) -> Option<Self> {
let reporter = EventReportCreator::new(default_apid, default_dest_id)?;
Some(Self {
tm_hook: EventTmHookInstance,
) -> Self {
let reporter = EventReportCreator::new(default_apid, default_dest_id);
Self {
id,
source_data_buf: RefCell::new(vec![0; max_event_id_and_aux_data_size]),
report_creator: reporter,
tm_hook,
})
}
}
pub fn event_info(
@@ -265,18 +269,18 @@ mod alloc_mod {
#[cfg(test)]
mod tests {
use super::*;
use crate::events::{EventU32, Severity};
use crate::ComponentId;
use crate::events_legacy::{EventU32, Severity};
use crate::pus::test_util::TEST_COMPONENT_ID_0;
use crate::pus::tests::CommonTmInfo;
use crate::pus::{ChannelWithId, EcssTmSender, EcssTmtcError, PusTmVariant};
use crate::ComponentId;
use spacepackets::ecss::PusError;
use spacepackets::ByteConversionError;
use spacepackets::ecss::PusError;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::vec::Vec;
const EXAMPLE_APID: u16 = 0xee;
const EXAMPLE_APID: u11 = u11::new(0xee);
const EXAMPLE_GROUP_ID: u16 = 2;
const EXAMPLE_EVENT_ID_0: u16 = 1;
#[allow(dead_code)]
@@ -329,12 +333,12 @@ mod tests {
}
}
fn severity_to_subservice(severity: Severity) -> Subservice {
fn severity_to_subservice(severity: Severity) -> MessageSubtypeId {
match severity {
Severity::Info => Subservice::TmInfoReport,
Severity::Low => Subservice::TmLowSeverityReport,
Severity::Medium => Subservice::TmMediumSeverityReport,
Severity::High => Subservice::TmHighSeverityReport,
Severity::Info => MessageSubtypeId::TmInfoReport,
Severity::Low => MessageSubtypeId::TmLowSeverityReport,
Severity::Medium => MessageSubtypeId::TmMediumSeverityReport,
Severity::High => MessageSubtypeId::TmHighSeverityReport,
}
}
@@ -376,14 +380,12 @@ mod tests {
error_data: Option<&[u8]>,
) {
let mut sender = TestSender::default();
let reporter = EventReporter::new(
let mut reporter = EventReporter::new(
TEST_COMPONENT_ID_0.id(),
EXAMPLE_APID,
0,
max_event_aux_data_buf,
);
assert!(reporter.is_some());
let mut reporter = reporter.unwrap();
let time_stamp_empty: [u8; 7] = [0; 7];
let mut error_copy = Vec::new();
if let Some(err_data) = error_data {
@@ -474,9 +476,7 @@ mod tests {
fn insufficient_buffer() {
let mut sender = TestSender::default();
for i in 0..3 {
let reporter = EventReporter::new(0, EXAMPLE_APID, 0, i);
assert!(reporter.is_some());
let mut reporter = reporter.unwrap();
let mut reporter = EventReporter::new(0, EXAMPLE_APID, 0, i);
check_buf_too_small(&mut reporter, &mut sender, i);
}
}

View File

@@ -1,18 +1,18 @@
use crate::events::{EventU32, GenericEvent, Severity};
use crate::events_legacy::{EventU32, GenericEvent, Severity};
#[cfg(feature = "alloc")]
use crate::events::{EventU32TypedSev, HasSeverity};
use crate::events_legacy::{EventU32TypedSev, HasSeverity};
#[cfg(feature = "alloc")]
use core::hash::Hash;
#[cfg(feature = "alloc")]
use hashbrown::HashSet;
#[cfg(feature = "alloc")]
pub use crate::pus::event::EventReporter;
use crate::pus::verification::TcStateToken;
#[cfg(feature = "alloc")]
use crate::pus::EcssTmSender;
use crate::pus::EcssTmtcError;
#[cfg(feature = "alloc")]
pub use crate::pus::event::EventReporter;
use crate::pus::verification::TcStateToken;
#[cfg(feature = "alloc")]
pub use alloc_mod::*;
#[cfg(feature = "heapless")]
pub use heapless_mod::*;
@@ -46,7 +46,7 @@ pub mod heapless_mod {
// regular Event type again.
#[derive(Default)]
pub struct HeaplessPusMgmtBackendProvider<const N: usize, Provider: GenericEvent> {
disabled: heapless::FnvIndexSet<LargestEventRaw, N>,
disabled: heapless::index_set::FnvIndexSet<LargestEventRaw, N>,
phantom: PhantomData<Provider>,
}
@@ -100,9 +100,9 @@ pub mod alloc_mod {
use core::marker::PhantomData;
use crate::{
events::EventU16,
events_legacy::EventU16,
params::{Params, WritableToBeBytes},
pus::event::{DummyEventHook, EventTmHookProvider},
pus::event::{DummyEventHook, EventTmHook},
};
use super::*;
@@ -151,20 +151,20 @@ pub mod alloc_mod {
pub struct PusEventTmCreatorWithMap<
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHook: EventTmHookProvider = DummyEventHook,
EventTmHookInstance: EventTmHook = DummyEventHook,
> {
pub reporter: EventReporter<EventTmHook>,
pub reporter: EventReporter<EventTmHookInstance>,
reporting_map: ReportingMap,
phantom: PhantomData<Event>,
}
impl<
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHook: EventTmHookProvider,
> PusEventTmCreatorWithMap<ReportingMap, Event, EventTmHook>
ReportingMap: PusEventReportingMapProvider<Event>,
Event: GenericEvent,
EventTmHookInstance: EventTmHook,
> PusEventTmCreatorWithMap<ReportingMap, Event, EventTmHookInstance>
{
pub fn new(reporter: EventReporter<EventTmHook>, backend: ReportingMap) -> Self {
pub fn new(reporter: EventReporter<EventTmHookInstance>, backend: ReportingMap) -> Self {
Self {
reporter,
reporting_map: backend,
@@ -262,10 +262,10 @@ pub mod alloc_mod {
}
}
impl<Event: GenericEvent + Copy + PartialEq + Eq + Hash, EventTmHook: EventTmHookProvider>
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<Event>, Event, EventTmHook>
impl<Event: GenericEvent + Copy + PartialEq + Eq + Hash, EventTmHookInstance: EventTmHook>
PusEventTmCreatorWithMap<DefaultPusEventReportingMap<Event>, Event, EventTmHookInstance>
{
pub fn new_with_default_backend(reporter: EventReporter<EventTmHook>) -> Self {
pub fn new_with_default_backend(reporter: EventReporter<EventTmHookInstance>) -> Self {
Self {
reporter,
reporting_map: DefaultPusEventReportingMap::default(),
@@ -311,29 +311,28 @@ pub mod alloc_mod {
mod tests {
use alloc::string::{String, ToString};
use alloc::vec;
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::tm::PusTmReader;
use arbitrary_int::{u11, u21};
use spacepackets::ecss::PusPacket;
use spacepackets::ecss::event::MessageSubtypeId;
use spacepackets::ecss::tm::PusTmReader;
use super::*;
use crate::request::UniqueApidTargetId;
use crate::{events::SeverityInfo, tmtc::PacketAsVec};
use crate::{events_legacy::SeverityInfo, tmtc::PacketAsVec};
use std::sync::mpsc::{self, TryRecvError};
const INFO_EVENT: EventU32TypedSev<SeverityInfo> = EventU32TypedSev::<SeverityInfo>::new(1, 0);
const LOW_SEV_EVENT: EventU32 = EventU32::new(Severity::Low, 1, 5);
const EMPTY_STAMP: [u8; 7] = [0; 7];
const TEST_APID: u16 = 0x02;
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, 0x05);
const TEST_APID: u11 = u11::new(0x02);
const TEST_ID: UniqueApidTargetId = UniqueApidTargetId::new(TEST_APID, u21::new(0x05));
fn create_basic_man_1() -> DefaultPusEventU32TmCreator {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
.expect("Creating event repoter failed");
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128);
PusEventTmCreatorWithMap::new_with_default_backend(reporter)
}
fn create_basic_man_2() -> DefaultPusEventU32TmCreator {
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128)
.expect("Creating event repoter failed");
let reporter = EventReporter::new(TEST_ID.raw(), TEST_APID, 0, 128);
let backend = DefaultPusEventReportingMap::default();
PusEventTmCreatorWithMap::new(reporter, backend)
}
@@ -409,9 +408,12 @@ mod tests {
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
let tm = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service_type_id(), 5);
assert_eq!(
tm.message_subtype_id(),
MessageSubtypeId::TmInfoReport as u8
);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
assert_eq!(u32_event, INFO_EVENT.raw());
@@ -437,9 +439,12 @@ mod tests {
assert!(res.event_was_enabled);
assert!(res.params_were_propagated);
let event_tm = event_rx.try_recv().expect("no event received");
let (tm, _) = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service(), 5);
assert_eq!(tm.subservice(), Subservice::TmInfoReport as u8);
let tm = PusTmReader::new(&event_tm.packet, 7).expect("reading TM failed");
assert_eq!(tm.service_type_id(), 5);
assert_eq!(
tm.message_subtype_id(),
MessageSubtypeId::TmInfoReport as u8
);
assert_eq!(tm.user_data().len(), 4 + param_data.len());
let u32_event = u32::from_be_bytes(tm.user_data()[0..4].try_into().unwrap());
assert_eq!(u32_event, INFO_EVENT.raw());

View File

@@ -1,22 +1,22 @@
use crate::events::EventU32;
use crate::events_legacy::EventU32;
use crate::pus::event_man::{EventRequest, EventRequestWithToken};
use crate::pus::verification::TcStateToken;
use crate::pus::{DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError};
use crate::queue::GenericSendError;
use spacepackets::ecss::event::Subservice;
use spacepackets::ecss::PusPacket;
use spacepackets::ecss::event::MessageSubtypeId;
use std::sync::mpsc::Sender;
use super::verification::VerificationReportingProvider;
use super::{
EcssTcInMemConversionProvider, EcssTcReceiver, EcssTmSender, GenericConversionError,
CacheAndReadRawEcssTc, EcssTcReceiver, EcssTmSender, GenericConversionError,
GenericRoutingError, HandlingStatus, PusServiceHelper,
};
pub struct PusEventServiceHandler<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> {
pub service_helper:
@@ -25,11 +25,11 @@ pub struct PusEventServiceHandler<
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
VerificationReporter: VerificationReportingProvider,
> PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> PusEventServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
service_helper: PusServiceHelper<
@@ -60,11 +60,11 @@ impl<
.tc_in_mem_converter_mut()
.cache(&ecss_tc_and_token.tc_in_memory)?;
let tc = self.service_helper.tc_in_mem_converter().convert()?;
let subservice = tc.subservice();
let srv = Subservice::try_from(subservice);
let subservice = tc.message_subtype_id();
let srv = MessageSubtypeId::try_from(subservice);
if srv.is_err() {
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
tc.subservice(),
tc.message_subtype_id(),
ecss_tc_and_token.token,
));
}
@@ -116,21 +116,21 @@ impl<
};
match srv.unwrap() {
Subservice::TmInfoReport
| Subservice::TmLowSeverityReport
| Subservice::TmMediumSeverityReport
| Subservice::TmHighSeverityReport => {
MessageSubtypeId::TmInfoReport
| MessageSubtypeId::TmLowSeverityReport
| MessageSubtypeId::TmMediumSeverityReport
| MessageSubtypeId::TmHighSeverityReport => {
return Err(PusPacketHandlingError::RequestConversion(
GenericConversionError::WrongService(tc.subservice()),
))
GenericConversionError::WrongService(tc.message_subtype_id()),
));
}
Subservice::TcEnableEventGeneration => {
MessageSubtypeId::TcEnableEventGeneration => {
handle_enable_disable_request(true)?;
}
Subservice::TcDisableEventGeneration => {
MessageSubtypeId::TcDisableEventGeneration => {
handle_enable_disable_request(false)?;
}
Subservice::TcReportDisabledList | Subservice::TmDisabledEventsReport => {
MessageSubtypeId::TcReportDisabledList | MessageSubtypeId::TmDisabledEventsReport => {
return Ok(DirectPusPacketHandlerResult::SubserviceNotImplemented(
subservice,
ecss_tc_and_token.token,
@@ -144,16 +144,19 @@ impl<
#[cfg(test)]
mod tests {
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use delegate::delegate;
use spacepackets::ecss::event::Subservice;
use spacepackets::time::{cds, TimeWriter};
use spacepackets::ecss::event::MessageSubtypeId;
use spacepackets::ecss::{CreatorConfig, MessageTypeId};
use spacepackets::time::{TimeWriter, cds};
use spacepackets::util::UnsignedEnum;
use spacepackets::{
SpHeader,
ecss::{
tc::{PusTcCreator, PusTcSecondaryHeader},
tm::PusTmReader,
},
SpHeader,
};
use std::sync::mpsc::{self, Sender};
@@ -165,25 +168,25 @@ mod tests {
use crate::pus::{GenericConversionError, HandlingStatus, MpscTcReceiver};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::{
events::EventU32,
events_legacy::EventU32,
pus::{
DirectPusPacketHandlerResult, EcssTcInSharedPoolCacher, PusPacketHandlingError,
event_man::EventRequestWithToken,
tests::PusServiceHandlerWithSharedStoreCommon,
verification::{TcStateAccepted, VerificationToken},
DirectPusPacketHandlerResult, EcssTcInSharedPoolConverter, PusPacketHandlingError,
},
};
use super::PusEventServiceHandler;
const TEST_EVENT_0: EventU32 = EventU32::new(crate::events::Severity::Info, 5, 25);
const TEST_EVENT_0: EventU32 = EventU32::new(crate::events_legacy::Severity::Info, 5, 25);
struct Pus5HandlerWithStoreTester {
common: PusServiceHandlerWithSharedStoreCommon,
handler: PusEventServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
>,
}
@@ -199,8 +202,12 @@ mod tests {
}
impl PusTestHarness for Pus5HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -234,18 +241,18 @@ mod tests {
fn event_test(
test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler),
subservice: Subservice,
subservice: MessageSubtypeId,
expected_event_req: EventRequest,
event_req_receiver: mpsc::Receiver<EventRequestWithToken>,
) {
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(5, subservice as u8);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(5, subservice as u8));
let mut app_data = [0; 4];
TEST_EVENT_0
.write_to_be_bytes(&mut app_data)
.expect("writing test event failed");
let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, true);
let token = test_harness.init_verification(&ping_tc);
let ping_tc = PusTcCreator::new(sp_header, sec_header, &app_data, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let request_id = token.request_id();
test_harness.handle_one_tc().unwrap();
@@ -265,7 +272,7 @@ mod tests {
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
event_test(
&mut test_harness,
Subservice::TcEnableEventGeneration,
MessageSubtypeId::TcEnableEventGeneration,
EventRequest::Enable(TEST_EVENT_0),
event_request_rx,
);
@@ -277,7 +284,7 @@ mod tests {
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
event_test(
&mut test_harness,
Subservice::TcDisableEventGeneration,
MessageSubtypeId::TcDisableEventGeneration,
EventRequest::Disable(TEST_EVENT_0),
event_request_rx,
);
@@ -303,10 +310,11 @@ mod tests {
fn test_sending_custom_subservice() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(5, 200);
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(5, 200));
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());
@@ -322,11 +330,14 @@ mod tests {
fn test_sending_invalid_app_data() {
let (event_request_tx, _) = mpsc::channel();
let mut test_harness = Pus5HandlerWithStoreTester::new(event_request_tx);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header =
PusTcSecondaryHeader::new_simple(5, Subservice::TcEnableEventGeneration as u8);
let ping_tc = PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], true);
let token = test_harness.init_verification(&ping_tc);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(
5,
MessageSubtypeId::TcEnableEventGeneration as u8,
));
let ping_tc =
PusTcCreator::new(sp_header, sec_header, &[0, 1, 2], CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());

View File

@@ -2,6 +2,7 @@
//!
//! This module contains structures to make working with the PUS C standard easier.
//! The satrs-example application contains various usage examples of these components.
use crate::ComponentId;
use crate::pool::{PoolAddr, PoolError};
use crate::pus::verification::{TcStateAccepted, TcStateToken, VerificationToken};
use crate::queue::{GenericReceiveError, GenericSendError};
@@ -9,19 +10,18 @@ use crate::request::{GenericMessage, MessageMetadata, RequestId};
#[cfg(feature = "alloc")]
use crate::tmtc::PacketAsVec;
use crate::tmtc::PacketInPool;
use crate::ComponentId;
use core::fmt::{Display, Formatter};
use core::time::Duration;
#[cfg(feature = "alloc")]
use downcast_rs::{impl_downcast, Downcast};
use downcast_rs::{Downcast, impl_downcast};
#[cfg(feature = "alloc")]
use dyn_clone::DynClone;
#[cfg(feature = "std")]
use std::error::Error;
use spacepackets::ecss::PusError;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
use spacepackets::ecss::tm::PusTmCreator;
use spacepackets::ecss::PusError;
use spacepackets::{ByteConversionError, SpHeader};
pub mod action;
@@ -296,7 +296,7 @@ pub trait PacketSenderPusTc: Send {
) -> Result<(), Self::Error>;
}
pub trait ActiveRequestMapProvider<V>: Sized {
pub trait ActiveRequestStore<V>: Sized {
fn insert(&mut self, request_id: &RequestId, request_info: V);
fn get(&self, request_id: RequestId) -> Option<&V>;
fn get_mut(&mut self, request_id: RequestId) -> Option<&mut V>;
@@ -309,7 +309,7 @@ pub trait ActiveRequestMapProvider<V>: Sized {
fn for_each_mut<F: FnMut(&RequestId, &mut V)>(&mut self, f: F);
}
pub trait ActiveRequestProvider {
pub trait ActiveRequest {
fn target_id(&self) -> ComponentId;
fn token(&self) -> TcStateToken;
fn set_token(&mut self, token: TcStateToken);
@@ -330,7 +330,7 @@ pub trait PusRequestRouter<Request> {
) -> Result<(), Self::Error>;
}
pub trait PusReplyHandler<ActiveRequestInfo: ActiveRequestProvider, ReplyType> {
pub trait PusReplyHandler<ActiveRequestInfo: ActiveRequest, ReplyType> {
type Error;
/// This function handles a reply for a given PUS request and returns whether that request
@@ -449,7 +449,7 @@ pub mod alloc_mod {
/// Having a dedicated trait for this allows maximum flexiblity and tailoring of the standard.
/// The only requirement is that a valid active request information instance and a request
/// are returned by the core conversion function. The active request type needs to fulfill
/// the [ActiveRequestProvider] trait bound.
/// the [ActiveRequest] trait bound.
///
/// The user should take care of performing the error handling as well. Some of the following
/// aspects might be relevant:
@@ -459,7 +459,7 @@ pub mod alloc_mod {
///
/// A [VerificationReportingProvider] instance is passed to the user to also allow handling
/// of the verification process as part of the PUS standard requirements.
pub trait PusTcToRequestConverter<ActiveRequestInfo: ActiveRequestProvider, Request> {
pub trait PusTcToRequestConverter<ActiveRequestInfo: ActiveRequest, Request> {
type Error;
fn convert(
&mut self,
@@ -480,7 +480,7 @@ pub mod alloc_mod {
}
}
impl<V> ActiveRequestMapProvider<V> for DefaultActiveRequestMap<V> {
impl<V> ActiveRequestStore<V> for DefaultActiveRequestMap<V> {
fn insert(&mut self, request_id: &RequestId, request: V) {
self.0.insert(*request_id, request);
}
@@ -659,18 +659,18 @@ pub mod alloc_mod {
#[cfg(feature = "std")]
pub mod std_mod {
use super::*;
use crate::ComponentId;
use crate::pool::{
PoolAddr, PoolError, PoolProvider, PoolProviderWithGuards, SharedStaticMemoryPool,
};
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use crate::ComponentId;
use alloc::vec::Vec;
use core::time::Duration;
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::StdTimestampError;
use spacepackets::ByteConversionError;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::ecss::tc::PusTcReader;
use spacepackets::time::StdTimestampError;
use std::string::String;
use std::sync::mpsc;
use std::sync::mpsc::TryRecvError;
@@ -680,7 +680,7 @@ pub mod std_mod {
pub use cb_mod::*;
use super::verification::{TcStateToken, VerificationReportingProvider};
use super::{AcceptedEcssTcAndToken, ActiveRequestProvider, TcInMemory};
use super::{AcceptedEcssTcAndToken, ActiveRequest, TcInMemory};
use crate::tmtc::PacketInPool;
impl From<mpsc::SendError<PoolAddr>> for EcssTmtcError {
@@ -845,7 +845,7 @@ pub mod std_mod {
}
}
impl ActiveRequestProvider for ActivePusRequestStd {
impl ActiveRequest for ActivePusRequestStd {
fn target_id(&self) -> ComponentId {
self.target_id
}
@@ -947,7 +947,9 @@ pub mod std_mod {
}
}
pub trait EcssTcInMemConversionProvider {
/// This trait provides an abstraction for caching a raw ECSS telecommand and then
/// providing the [PusTcReader] abstraction to read the cache raw telecommand.
pub trait CacheAndReadRawEcssTc {
fn cache(&mut self, possible_packet: &TcInMemory) -> Result<(), PusTcFromMemError>;
fn tc_slice_raw(&self) -> &[u8];
@@ -959,15 +961,11 @@ pub mod std_mod {
possible_packet: &TcInMemory,
) -> Result<PusTcReader<'_>, PusTcFromMemError> {
self.cache(possible_packet)?;
Ok(PusTcReader::new(self.tc_slice_raw())
.map_err(EcssTmtcError::Pus)?
.0)
Ok(PusTcReader::new(self.tc_slice_raw()).map_err(EcssTmtcError::Pus)?)
}
fn convert(&self) -> Result<PusTcReader<'_>, PusTcFromMemError> {
Ok(PusTcReader::new(self.tc_slice_raw())
.map_err(EcssTmtcError::Pus)?
.0)
Ok(PusTcReader::new(self.tc_slice_raw()).map_err(EcssTmtcError::Pus)?)
}
}
@@ -975,12 +973,12 @@ pub mod std_mod {
/// Please note that this structure is not able to convert TCs which are stored inside a
/// [SharedStaticMemoryPool].
#[derive(Default, Clone)]
pub struct EcssTcInVecConverter {
pub struct EcssTcVecCacher {
sender_id: Option<ComponentId>,
pub pus_tc_raw: Option<Vec<u8>>,
}
impl EcssTcInMemConversionProvider for EcssTcInVecConverter {
impl CacheAndReadRawEcssTc for EcssTcVecCacher {
fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> {
self.pus_tc_raw = None;
match tc_in_memory {
@@ -1012,13 +1010,13 @@ pub mod std_mod {
/// packets should be avoided. Please note that this structure is not able to convert TCs which
/// are stored as a `Vec<u8>`.
#[derive(Clone)]
pub struct EcssTcInSharedPoolConverter {
pub struct EcssTcInSharedPoolCacher {
sender_id: Option<ComponentId>,
shared_tc_pool: SharedStaticMemoryPool,
pus_buf: Vec<u8>,
}
impl EcssTcInSharedPoolConverter {
impl EcssTcInSharedPoolCacher {
pub fn new(shared_tc_store: SharedStaticMemoryPool, max_expected_tc_size: usize) -> Self {
Self {
sender_id: None,
@@ -1049,7 +1047,7 @@ pub mod std_mod {
}
}
impl EcssTcInMemConversionProvider for EcssTcInSharedPoolConverter {
impl CacheAndReadRawEcssTc for EcssTcInSharedPoolCacher {
fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> {
match tc_in_memory {
super::TcInMemory::Pool(packet_in_pool) => {
@@ -1074,38 +1072,38 @@ pub mod std_mod {
// TODO: alloc feature flag?
#[derive(Clone)]
pub enum EcssTcInMemConverter {
Static(EcssTcInSharedPoolConverter),
Heap(EcssTcInVecConverter),
pub enum EcssTcCacher {
Static(EcssTcInSharedPoolCacher),
Heap(EcssTcVecCacher),
}
impl EcssTcInMemConverter {
pub fn new_static(static_store_converter: EcssTcInSharedPoolConverter) -> Self {
EcssTcInMemConverter::Static(static_store_converter)
impl EcssTcCacher {
pub fn new_static(static_store_converter: EcssTcInSharedPoolCacher) -> Self {
Self::Static(static_store_converter)
}
pub fn new_heap(heap_converter: EcssTcInVecConverter) -> Self {
EcssTcInMemConverter::Heap(heap_converter)
pub fn new_heap(heap_converter: EcssTcVecCacher) -> Self {
Self::Heap(heap_converter)
}
}
impl EcssTcInMemConversionProvider for EcssTcInMemConverter {
impl CacheAndReadRawEcssTc for EcssTcCacher {
fn cache(&mut self, tc_in_memory: &TcInMemory) -> Result<(), PusTcFromMemError> {
match self {
EcssTcInMemConverter::Static(converter) => converter.cache(tc_in_memory),
EcssTcInMemConverter::Heap(converter) => converter.cache(tc_in_memory),
Self::Static(converter) => converter.cache(tc_in_memory),
Self::Heap(converter) => converter.cache(tc_in_memory),
}
}
fn tc_slice_raw(&self) -> &[u8] {
match self {
EcssTcInMemConverter::Static(converter) => converter.tc_slice_raw(),
EcssTcInMemConverter::Heap(converter) => converter.tc_slice_raw(),
Self::Static(converter) => converter.tc_slice_raw(),
Self::Heap(converter) => converter.tc_slice_raw(),
}
}
fn sender_id(&self) -> Option<ComponentId> {
match self {
EcssTcInMemConverter::Static(converter) => converter.sender_id(),
EcssTcInMemConverter::Heap(converter) => converter.sender_id(),
Self::Static(converter) => converter.sender_id(),
Self::Heap(converter) => converter.sender_id(),
}
}
}
@@ -1128,12 +1126,12 @@ pub mod std_mod {
/// groups (for example individual services).
///
/// This base class can handle PUS telecommands backed by different memory storage machanisms
/// by using the [EcssTcInMemConverter] abstraction. This object provides some convenience
/// by using the [CacheAndReadRawEcssTc] abstraction. This object provides some convenience
/// methods to make the generic parts of TC handling easier.
pub struct PusServiceHelper<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> {
pub common: PusServiceBase<TcReceiver, TmSender, VerificationReporter>,
@@ -1141,11 +1139,11 @@ pub mod std_mod {
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
VerificationReporter: VerificationReportingProvider,
> PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> PusServiceHelper<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
id: ComponentId,
@@ -1260,18 +1258,19 @@ pub(crate) fn source_buffer_large_enough(
#[cfg(any(feature = "test_util", test))]
pub mod test_util {
use arbitrary_int::{u11, u21};
use spacepackets::ecss::{tc::PusTcCreator, tm::PusTmReader};
use crate::request::UniqueApidTargetId;
use super::{
verification::{self, TcStateAccepted, VerificationToken},
DirectPusPacketHandlerResult, PusPacketHandlingError,
verification::{self, TcStateAccepted, VerificationToken},
};
pub const TEST_APID: u16 = 0x101;
pub const TEST_UNIQUE_ID_0: u32 = 0x05;
pub const TEST_UNIQUE_ID_1: u32 = 0x06;
pub const TEST_APID: u11 = u11::new(0x101);
pub const TEST_UNIQUE_ID_0: u21 = u21::new(0x05);
pub const TEST_UNIQUE_ID_1: u21 = u21::new(0x06);
pub const TEST_COMPONENT_ID_0: UniqueApidTargetId =
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_0);
@@ -1279,7 +1278,7 @@ pub mod test_util {
UniqueApidTargetId::new(TEST_APID, TEST_UNIQUE_ID_1);
pub trait PusTestHarness {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted>;
fn send_tc(&self, token: &VerificationToken<TcStateAccepted>, tc: &PusTcCreator);
fn read_next_tm(&mut self) -> PusTmReader<'_>;
fn check_no_tm_available(&self) -> bool;
@@ -1292,7 +1291,7 @@ pub mod test_util {
pub trait SimplePusPacketHandler {
fn handle_one_tc(&mut self)
-> Result<DirectPusPacketHandlerResult, PusPacketHandlingError>;
-> Result<DirectPusPacketHandlerResult, PusPacketHandlingError>;
}
}
@@ -1300,33 +1299,35 @@ pub mod test_util {
pub mod tests {
use core::cell::RefCell;
use std::sync::mpsc::TryRecvError;
use std::sync::{mpsc, RwLock};
use std::sync::{RwLock, mpsc};
use alloc::collections::VecDeque;
use alloc::vec::Vec;
use arbitrary_int::{u11, u14};
use satrs_shared::res_code::ResultU16;
use spacepackets::CcsdsPacket;
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader};
use spacepackets::ecss::tm::{GenericPusTmSecondaryHeader, PusTmCreator, PusTmReader};
use spacepackets::ecss::{PusPacket, WritablePusPacket};
use spacepackets::CcsdsPacket;
use test_util::{TEST_APID, TEST_COMPONENT_ID_0};
use crate::ComponentId;
use crate::pool::{PoolProvider, SharedStaticMemoryPool, StaticMemoryPool, StaticPoolConfig};
use crate::pus::verification::{RequestId, VerificationReporter};
use crate::tmtc::{PacketAsVec, PacketInPool, PacketSenderWithSharedPool, SharedPacketPool};
use crate::ComponentId;
use super::verification::test_util::TestVerificationReporter;
use super::verification::{
TcStateAccepted, VerificationReporterCfg, VerificationReportingProvider, VerificationToken,
TcStateAccepted, VerificationReporterConfig, VerificationReportingProvider,
VerificationToken,
};
use super::*;
#[derive(Debug, Eq, PartialEq, Clone)]
pub(crate) struct CommonTmInfo {
pub subservice: u8,
pub apid: u16,
pub seq_count: u16,
pub apid: u11,
pub seq_count: u14,
pub msg_counter: u16,
pub dest_id: u16,
pub timestamp: Vec<u8>,
@@ -1335,8 +1336,8 @@ pub mod tests {
impl CommonTmInfo {
pub fn new(
subservice: u8,
apid: u16,
seq_count: u16,
apid: u11,
seq_count: u14,
msg_counter: u16,
dest_id: u16,
timestamp: &[u8],
@@ -1352,21 +1353,21 @@ pub mod tests {
}
pub fn new_zero_seq_count(
subservice: u8,
apid: u16,
apid: u11,
dest_id: u16,
timestamp: &[u8],
) -> Self {
Self::new(subservice, apid, 0, 0, dest_id, timestamp)
Self::new(subservice, apid, u14::new(0), 0, dest_id, timestamp)
}
pub fn new_from_tm(tm: &PusTmCreator) -> Self {
let mut timestamp = [0; 7];
timestamp.clone_from_slice(&tm.timestamp()[0..7]);
Self {
subservice: PusPacket::subservice(tm),
subservice: PusPacket::message_subtype_id(tm),
apid: tm.apid(),
seq_count: tm.seq_count(),
msg_counter: tm.msg_counter(),
msg_counter: tm.msg_type_counter(),
dest_id: tm.dest_id(),
timestamp: timestamp.to_vec(),
}
@@ -1386,7 +1387,7 @@ pub mod tests {
pub type PusServiceHelperStatic = PusServiceHelper<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
>;
@@ -1408,12 +1409,12 @@ pub mod tests {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::sync_channel(10);
let (tm_tx, tm_rx) = mpsc::sync_channel(10);
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verif_cfg = VerificationReporterConfig::new(TEST_APID, 1, 2, 8);
let verification_handler =
VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &verif_cfg);
let test_srv_tm_sender =
PacketSenderWithSharedPool::new(tm_tx, shared_tm_pool_wrapper.clone());
let in_store_converter = EcssTcInSharedPoolConverter::new(shared_tc_pool.clone(), 2048);
let in_store_converter = EcssTcInSharedPoolCacher::new(shared_tc_pool.clone(), 2048);
(
Self {
pus_buf: RefCell::new([0; 2048]),
@@ -1459,7 +1460,7 @@ pub mod tests {
let tm_pool = self.tm_pool.0.read().unwrap();
let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap();
self.tm_buf[0..tm_raw.len()].copy_from_slice(&tm_raw);
PusTmReader::new(&self.tm_buf, 7).unwrap().0
PusTmReader::new(&self.tm_buf, 7).unwrap()
}
pub fn check_no_tm_available(&self) -> bool {
@@ -1476,9 +1477,9 @@ pub mod tests {
let tm_in_pool = next_msg.unwrap();
let tm_pool = self.tm_pool.0.read().unwrap();
let tm_raw = tm_pool.read_as_vec(&tm_in_pool.store_addr).unwrap();
let tm = PusTmReader::new(&tm_raw, 7).unwrap().0;
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
let tm = PusTmReader::new(&tm_raw, 7).unwrap();
assert_eq!(PusPacket::service_type_id(&tm), 1);
assert_eq!(PusPacket::message_subtype_id(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);
let req_id =
RequestId::from_bytes(tm.user_data()).expect("generating request ID failed");
@@ -1491,12 +1492,8 @@ pub mod tests {
tc_sender: mpsc::Sender<EcssTcAndToken>,
tm_receiver: mpsc::Receiver<PacketAsVec>,
}
pub type PusServiceHelperDynamic = PusServiceHelper<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
VerificationReporter,
>;
pub type PusServiceHelperDynamic =
PusServiceHelper<MpscTcReceiver, MpscTmAsVecSender, EcssTcVecCacher, VerificationReporter>;
impl PusServiceHandlerWithVecCommon {
pub fn new_with_standard_verif_reporter(
@@ -1505,10 +1502,10 @@ pub mod tests {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let verif_cfg = VerificationReporterCfg::new(TEST_APID, 1, 2, 8).unwrap();
let verif_cfg = VerificationReporterConfig::new(TEST_APID, 1, 2, 8);
let verification_handler =
VerificationReporter::new(TEST_COMPONENT_ID_0.id(), &verif_cfg);
let in_store_converter = EcssTcInVecConverter::default();
let in_store_converter = EcssTcVecCacher::default();
(
Self {
current_tm: None,
@@ -1534,14 +1531,14 @@ pub mod tests {
PusServiceHelper<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
EcssTcVecCacher,
TestVerificationReporter,
>,
) {
let (test_srv_tc_tx, test_srv_tc_rx) = mpsc::channel();
let (tm_tx, tm_rx) = mpsc::channel();
let in_store_converter = EcssTcInVecConverter::default();
let in_store_converter = EcssTcVecCacher::default();
let verification_handler = TestVerificationReporter::new(id);
(
Self {
@@ -1584,9 +1581,7 @@ pub mod tests {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
self.current_tm = Some(next_msg.unwrap().packet);
PusTmReader::new(self.current_tm.as_ref().unwrap(), 7)
.unwrap()
.0
PusTmReader::new(self.current_tm.as_ref().unwrap(), 7).unwrap()
}
pub fn check_no_tm_available(&self) -> bool {
@@ -1601,9 +1596,9 @@ pub mod tests {
let next_msg = self.tm_receiver.try_recv();
assert!(next_msg.is_ok());
let next_msg = next_msg.unwrap();
let tm = PusTmReader::new(next_msg.packet.as_slice(), 7).unwrap().0;
assert_eq!(PusPacket::service(&tm), 1);
assert_eq!(PusPacket::subservice(&tm), subservice);
let tm = PusTmReader::new(next_msg.packet.as_slice(), 7).unwrap();
assert_eq!(PusPacket::service_type_id(&tm), 1);
assert_eq!(PusPacket::message_subtype_id(&tm), subservice);
assert_eq!(tm.apid(), TEST_APID);
let req_id =
RequestId::from_bytes(tm.user_data()).expect("generating request ID failed");
@@ -1620,9 +1615,9 @@ pub mod tests {
impl<const SERVICE: u8> TestConverter<SERVICE> {
pub fn check_service(&self, tc: &PusTcReader) -> Result<(), PusPacketHandlingError> {
if tc.service() != SERVICE {
if tc.service_type_id() != SERVICE {
return Err(PusPacketHandlingError::RequestConversion(
GenericConversionError::WrongService(tc.service()),
GenericConversionError::WrongService(tc.service_type_id()),
));
}
Ok(())

View File

@@ -37,6 +37,7 @@ mod tests {
use std::sync::mpsc;
use crate::{
ComponentId,
mode::{
ModeAndSubmode, ModeReply, ModeReplySender, ModeRequest, ModeRequestSender,
ModeRequestorAndHandlerMpsc, ModeRequestorOneChildMpsc,
@@ -44,9 +45,9 @@ mod tests {
request::{GenericMessage, MessageMetadata},
};
const TEST_COMPONENT_ID_0: u64 = 5;
const TEST_COMPONENT_ID_1: u64 = 6;
const TEST_COMPONENT_ID_2: u64 = 7;
const TEST_COMPONENT_ID_0: ComponentId = 5;
const TEST_COMPONENT_ID_1: ComponentId = 6;
const TEST_COMPONENT_ID_2: ComponentId = 7;
#[test]
fn test_simple_mode_requestor() {

View File

@@ -2,6 +2,7 @@
//!
//! The core data structure of this module is the [PusScheduler]. This structure can be used
//! to perform the scheduling of telecommands like specified in the ECSS standard.
use arbitrary_int::{u11, u14};
use core::fmt::{Debug, Display, Formatter};
use core::time::Duration;
#[cfg(feature = "serde")]
@@ -24,22 +25,26 @@ pub use alloc_mod::*;
/// the source ID found in the secondary header of PUS telecommands.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct RequestId {
pub(crate) source_id: u16,
pub(crate) apid: u16,
pub(crate) seq_count: u16,
pub(crate) apid: u11,
pub(crate) seq_count: u14,
}
impl RequestId {
pub fn source_id(&self) -> u16 {
#[inline]
pub const fn source_id(&self) -> u16 {
self.source_id
}
pub fn apid(&self) -> u16 {
#[inline]
pub const fn apid(&self) -> u11 {
self.apid
}
pub fn seq_count(&self) -> u16 {
#[inline]
pub const fn seq_count(&self) -> u14 {
self.seq_count
}
@@ -53,8 +58,10 @@ impl RequestId {
}
}
pub fn as_u64(&self) -> u64 {
((self.source_id as u64) << 32) | ((self.apid as u64) << 16) | self.seq_count as u64
pub const fn as_u64(&self) -> u64 {
((self.source_id as u64) << 32)
| ((self.apid.value() as u64) << 16)
| self.seq_count.value() as u64
}
}
@@ -265,14 +272,16 @@ pub trait PusSchedulerProvider {
pus_tc: &(impl IsPusTelecommand + PusPacket + GenericPusTcSecondaryHeader),
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
if PusPacket::service(pus_tc) != 11 {
return Err(ScheduleError::WrongService(PusPacket::service(pus_tc)));
}
if PusPacket::subservice(pus_tc) != 4 {
return Err(ScheduleError::WrongSubservice(PusPacket::subservice(
if PusPacket::service_type_id(pus_tc) != 11 {
return Err(ScheduleError::WrongService(PusPacket::service_type_id(
pus_tc,
)));
}
if PusPacket::message_subtype_id(pus_tc) != 4 {
return Err(ScheduleError::WrongSubservice(
PusPacket::message_subtype_id(pus_tc),
));
}
if pus_tc.user_data().is_empty() {
return Err(ScheduleError::TcDataEmpty);
}
@@ -292,10 +301,12 @@ pub trait PusSchedulerProvider {
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
if PusPacket::service_type_id(&check_tc) == 11
&& PusPacket::message_subtype_id(&check_tc) == 4
{
return Err(ScheduleError::NestedScheduledTc);
}
let req_id = RequestId::from_tc(&check_tc.0);
let req_id = RequestId::from_tc(&check_tc);
match pool.add(tc) {
Ok(addr) => {
@@ -340,8 +351,8 @@ pub fn generate_insert_telecommand_app_data(
pub mod alloc_mod {
use alloc::{
collections::{
btree_map::{Entry, Range},
BTreeMap,
btree_map::{Entry, Range},
},
vec::Vec,
};
@@ -411,10 +422,10 @@ pub mod alloc_mod {
///
/// * `init_current_time` - The time to initialize the scheduler with.
/// * `time_margin` - This time margin is used when inserting new telecommands into the
/// schedule. If the release time of a new telecommand is earlier than the time margin
/// added to the current time, it will not be inserted into the schedule.
/// schedule. If the release time of a new telecommand is earlier than the time margin
/// added to the current time, it will not be inserted into the schedule.
/// * `tc_buf_size` - Buffer for temporary storage of telecommand packets. This buffer
/// should be large enough to accomodate the largest expected TC packets.
/// should be large enough to accomodate the largest expected TC packets.
pub fn new(init_current_time: UnixTime, time_margin: Duration) -> Self {
PusScheduler {
tc_map: Default::default(),
@@ -480,10 +491,12 @@ pub mod alloc_mod {
pool: &mut (impl PoolProvider + ?Sized),
) -> Result<TcInfo, ScheduleError> {
let check_tc = PusTcReader::new(tc)?;
if PusPacket::service(&check_tc.0) == 11 && PusPacket::subservice(&check_tc.0) == 4 {
if PusPacket::service_type_id(&check_tc) == 11
&& PusPacket::message_subtype_id(&check_tc) == 4
{
return Err(ScheduleError::NestedScheduledTc);
}
let req_id = RequestId::from_tc(&check_tc.0);
let req_id = RequestId::from_tc(&check_tc);
match pool.add(tc) {
Ok(addr) => {
@@ -683,10 +696,10 @@ pub mod alloc_mod {
/// # Arguments
///
/// * `releaser` - Closure where the first argument is whether the scheduler is enabled and
/// the second argument is the telecommand information also containing the store
/// address. This closure should return whether the command should be deleted. Please
/// note that returning false might lead to memory leaks if the TC is not cleared from
/// the store in some other way.
/// the second argument is the telecommand information also containing the store
/// address. This closure should return whether the command should be deleted. Please
/// note that returning false might lead to memory leaks if the TC is not cleared from
/// the store in some other way.
/// * `tc_store` - The holding store of the telecommands.
/// * `tc_buf` - Buffer to hold each telecommand being released.
pub fn release_telecommands_with_buffer<R: FnMut(bool, &TcInfo, &[u8]) -> bool>(
@@ -855,10 +868,12 @@ mod tests {
PoolAddr, PoolError, PoolProvider, StaticMemoryPool, StaticPoolAddr, StaticPoolConfig,
};
use alloc::collections::btree_map::Range;
use arbitrary_int::traits::Integer;
use arbitrary_int::{u11, u14};
use spacepackets::ecss::tc::{PusTcCreator, PusTcReader, PusTcSecondaryHeader};
use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::{cds, TimeWriter, UnixTime};
use spacepackets::{PacketId, PacketSequenceCtrl, PacketType, SequenceFlags, SpHeader};
use spacepackets::ecss::{CreatorConfig, MessageTypeId, WritablePusPacket};
use spacepackets::time::{TimeWriter, UnixTime, cds};
use spacepackets::{PacketId, PacketSequenceControl, PacketType, SequenceFlags, SpHeader};
use std::time::Duration;
use std::vec::Vec;
#[allow(unused_imports)]
@@ -869,59 +884,94 @@ mod tests {
cds::CdsTime::from_unix_time_with_u16_days(&timestamp, cds::SubmillisPrecision::Absent)
.unwrap();
let len_time_stamp = cds_time.write_to_bytes(buf).unwrap();
let len_packet = base_ping_tc_simple_ctor(0, &[])
let len_packet = base_ping_tc_simple_ctor(u14::new(0), &[])
.write_to_bytes(&mut buf[len_time_stamp..])
.unwrap();
(
SpHeader::new_for_unseg_tc(0x02, 0x34, len_packet as u16),
SpHeader::new_for_unseg_tc(u11::new(0x02), u14::new(0x34), len_packet as u16),
len_packet + len_time_stamp,
)
}
fn scheduled_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
fn scheduled_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator<'_> {
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(sph, 11, 4, &buf[..len_app_data], true)
PusTcCreator::new_simple(
sph,
MessageTypeId::new(11, 4),
&buf[..len_app_data],
CreatorConfig::default(),
)
}
fn wrong_tc_service(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
fn wrong_tc_service(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator<'_> {
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(sph, 12, 4, &buf[..len_app_data], true)
PusTcCreator::new_simple(
sph,
MessageTypeId::new(12, 4),
&buf[..len_app_data],
CreatorConfig::default(),
)
}
fn wrong_tc_subservice(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
fn wrong_tc_subservice(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator<'_> {
let (sph, len_app_data) = pus_tc_base(timestamp, buf);
PusTcCreator::new_simple(sph, 11, 5, &buf[..len_app_data], true)
PusTcCreator::new_simple(
sph,
MessageTypeId::new(11, 5),
&buf[..len_app_data],
CreatorConfig::default(),
)
}
fn double_wrapped_time_tagged_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator {
fn double_wrapped_time_tagged_tc(timestamp: UnixTime, buf: &mut [u8]) -> PusTcCreator<'_> {
let cds_time =
cds::CdsTime::from_unix_time_with_u16_days(&timestamp, cds::SubmillisPrecision::Absent)
.unwrap();
let len_time_stamp = cds_time.write_to_bytes(buf).unwrap();
let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 0);
let sph = SpHeader::new_for_unseg_tc(u11::new(0x02), u14::new(0x34), 0);
// app data should not matter, double wrapped time-tagged commands should be rejected right
// away
let inner_time_tagged_tc = PusTcCreator::new_simple(sph, 11, 4, &[], true);
let inner_time_tagged_tc = PusTcCreator::new_simple(
sph,
MessageTypeId::new(11, 4),
&[],
CreatorConfig::default(),
);
let packet_len = inner_time_tagged_tc
.write_to_bytes(&mut buf[len_time_stamp..])
.expect("writing inner time tagged tc failed");
PusTcCreator::new_simple(sph, 11, 4, &buf[..len_time_stamp + packet_len], true)
PusTcCreator::new_simple(
sph,
MessageTypeId::new(11, 4),
&buf[..len_time_stamp + packet_len],
CreatorConfig::default(),
)
}
fn invalid_time_tagged_cmd() -> PusTcCreator<'static> {
let sph = SpHeader::new_for_unseg_tc(0x02, 0x34, 1);
PusTcCreator::new_simple(sph, 11, 4, &[], true)
let sph = SpHeader::new_for_unseg_tc(u11::new(0x02), u14::new(0x34), 1);
PusTcCreator::new_simple(
sph,
MessageTypeId::new(11, 4),
&[],
CreatorConfig::default(),
)
}
fn base_ping_tc_simple_ctor(seq_count: u16, app_data: &'static [u8]) -> PusTcCreator<'static> {
let sph = SpHeader::new_for_unseg_tc(0x02, seq_count, 0);
PusTcCreator::new_simple(sph, 17, 1, app_data, true)
fn base_ping_tc_simple_ctor(seq_count: u14, app_data: &'static [u8]) -> PusTcCreator<'static> {
let sph = SpHeader::new_for_unseg_tc(u11::new(0x02), seq_count, 0);
PusTcCreator::new_simple(
sph,
MessageTypeId::new(17, 1),
app_data,
CreatorConfig::default(),
)
}
fn ping_tc_to_store(
pool: &mut StaticMemoryPool,
buf: &mut [u8],
seq_count: u16,
seq_count: u14,
app_data: &'static [u8],
) -> TcInfo {
let ping_tc = base_ping_tc_simple_ctor(seq_count, app_data);
@@ -949,7 +999,7 @@ mod tests {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::new(0), &[]);
scheduler
.insert_unwrapped_and_stored_tc(
@@ -959,7 +1009,7 @@ mod tests {
.unwrap();
let app_data = &[0, 1, 2];
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, app_data);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), app_data);
scheduler
.insert_unwrapped_and_stored_tc(
UnixTime::new_only_secs(200),
@@ -968,7 +1018,7 @@ mod tests {
.unwrap();
let app_data = &[0, 1, 2];
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, app_data);
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, u14::new(2), app_data);
scheduler
.insert_unwrapped_and_stored_tc(
UnixTime::new_only_secs(300),
@@ -999,8 +1049,8 @@ mod tests {
packet_idx: 1,
}),
RequestId {
seq_count: 1,
apid: 0,
seq_count: u14::new(1),
apid: u11::ZERO,
source_id: 0,
},
),
@@ -1016,8 +1066,8 @@ mod tests {
packet_idx: 2,
}),
RequestId {
seq_count: 2,
apid: 1,
seq_count: u14::new(2),
apid: u11::new(1),
source_id: 5,
},
),
@@ -1035,8 +1085,8 @@ mod tests {
.into(),
RequestId {
source_id: 10,
seq_count: 20,
apid: 23,
seq_count: u14::new(20),
apid: u11::new(23),
},
),
)
@@ -1077,19 +1127,22 @@ mod tests {
#[test]
fn test_request_id() {
let src_id_to_set = 12;
let apid_to_set = 0x22;
let seq_count = 105;
let sp_header = SpHeader::new_for_unseg_tc(apid_to_set, 105, 0);
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let apid_to_set = u11::new(0x22);
let seq_count = u14::new(105);
let sp_header = SpHeader::new_for_unseg_tc(apid_to_set, u14::new(105), 0);
let mut sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(17, 1));
sec_header.source_id = src_id_to_set;
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let req_id = RequestId::from_tc(&ping_tc);
assert_eq!(req_id.source_id(), src_id_to_set);
assert_eq!(req_id.apid(), apid_to_set);
assert_eq!(req_id.seq_count(), seq_count);
assert_eq!(
req_id.as_u64(),
((src_id_to_set as u64) << 32) | (apid_to_set as u64) << 16 | seq_count as u64
((src_id_to_set as u64) << 32)
| (apid_to_set.value() as u64) << 16
| seq_count.value() as u64
);
}
#[test]
@@ -1101,13 +1154,13 @@ mod tests {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed");
@@ -1169,13 +1222,13 @@ mod tests {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1)
.expect("insertion failed");
@@ -1231,13 +1284,13 @@ mod tests {
scheduler.disable();
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed");
@@ -1298,7 +1351,7 @@ mod tests {
false,
));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
let info = scheduler
.insert_unwrapped_tc(
@@ -1313,7 +1366,7 @@ mod tests {
let mut read_buf: [u8; 64] = [0; 64];
pool.read(&tc_info_0.addr(), &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(check_tc, base_ping_tc_simple_ctor(u14::ZERO, &[]));
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@@ -1335,8 +1388,8 @@ mod tests {
let read_len = pool.read(&addr_vec[0], &mut read_buf).unwrap();
let check_tc = PusTcReader::new(&read_buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.packet_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(u14::new(0), &[]));
}
#[test]
@@ -1362,8 +1415,8 @@ mod tests {
let read_len = pool.read(&info.addr, &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect Pus tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.packet_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(u14::ZERO, &[]));
assert_eq!(scheduler.num_scheduled_telecommands(), 1);
@@ -1387,8 +1440,8 @@ mod tests {
let read_len = pool.read(&addr_vec[0], &mut buf).unwrap();
let check_tc = PusTcReader::new(&buf).expect("incorrect PUS tc raw data");
assert_eq!(read_len, check_tc.1);
assert_eq!(check_tc.0, base_ping_tc_simple_ctor(0, &[]));
assert_eq!(read_len, check_tc.packet_len());
assert_eq!(check_tc, base_ping_tc_simple_ctor(u14::new(0), &[]));
}
#[test]
@@ -1531,7 +1584,7 @@ mod tests {
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
@@ -1568,7 +1621,7 @@ mod tests {
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
@@ -1594,7 +1647,7 @@ mod tests {
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed");
@@ -1615,7 +1668,7 @@ mod tests {
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed");
@@ -1636,15 +1689,15 @@ mod tests {
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("inserting tc failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_1)
.expect("inserting tc failed");
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, 2, &[]);
let tc_info_2 = ping_tc_to_store(&mut pool, &mut buf, u14::new(2), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_2)
.expect("inserting tc failed");
@@ -1703,7 +1756,7 @@ mod tests {
fn insert_command_with_release_time(
pool: &mut StaticMemoryPool,
scheduler: &mut PusScheduler,
seq_count: u16,
seq_count: u14,
release_secs: u64,
) -> TcInfo {
let mut buf: [u8; 32] = [0; 32];
@@ -1722,8 +1775,8 @@ mod tests {
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
assert_eq!(scheduler.num_scheduled_telecommands(), 2);
let check_range = |range: Range<UnixTime, Vec<TcInfo>>| {
let mut tcs_in_range = 0;
@@ -1754,9 +1807,9 @@ mod tests {
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
let start_stamp = cds::CdsTime::from_unix_time_with_u16_days(
&UnixTime::new_only_secs(100),
cds::SubmillisPrecision::Absent,
@@ -1789,9 +1842,9 @@ mod tests {
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let tc_info_0 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
assert_eq!(scheduler.num_scheduled_telecommands(), 3);
let end_stamp = cds::CdsTime::from_unix_time_with_u16_days(
@@ -1824,10 +1877,10 @@ mod tests {
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 200);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let tc_info_1 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let tc_info_2 = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
let _ = insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 200);
assert_eq!(scheduler.num_scheduled_telecommands(), 4);
let start_stamp = cds::CdsTime::from_unix_time_with_u16_days(
@@ -1865,8 +1918,8 @@ mod tests {
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
assert_eq!(scheduler.num_scheduled_telecommands(), 2);
let del_res = scheduler.delete_all(&mut pool);
assert!(del_res.is_ok());
@@ -1875,8 +1928,8 @@ mod tests {
// Contrary to reset, this does not disable the scheduler.
assert!(scheduler.is_enabled());
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
assert_eq!(scheduler.num_scheduled_telecommands(), 2);
let del_res = scheduler
.delete_by_time_filter(TimeWindow::<cds::CdsTime>::new_select_all(), &mut pool);
@@ -1894,9 +1947,11 @@ mod tests {
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let cmd_1_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_0_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let cmd_1_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
assert_eq!(scheduler.num_scheduled_telecommands(), 3);
let start_stamp = cds::CdsTime::from_unix_time_with_u16_days(
&UnixTime::new_only_secs(100),
@@ -1919,9 +1974,11 @@ mod tests {
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let cmd_1_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let cmd_0_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_1_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
assert_eq!(scheduler.num_scheduled_telecommands(), 3);
let end_stamp = cds::CdsTime::from_unix_time_with_u16_days(
@@ -1945,11 +2002,14 @@ mod tests {
false,
));
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let cmd_out_of_range_0 = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 50);
let cmd_0_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 100);
let cmd_1_to_delete = insert_command_with_release_time(&mut pool, &mut scheduler, 0, 150);
let cmd_out_of_range_0 =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 50);
let cmd_0_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 100);
let cmd_1_to_delete =
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 150);
let cmd_out_of_range_1 =
insert_command_with_release_time(&mut pool, &mut scheduler, 0, 200);
insert_command_with_release_time(&mut pool, &mut scheduler, u14::ZERO, 200);
assert_eq!(scheduler.num_scheduled_telecommands(), 4);
let start_stamp = cds::CdsTime::from_unix_time_with_u16_days(
@@ -1982,13 +2042,13 @@ mod tests {
let mut scheduler = PusScheduler::new(UnixTime::new_only_secs(0), Duration::from_secs(5));
let mut buf: [u8; 32] = [0; 32];
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, 0, &[]);
let tc_info_0 = ping_tc_to_store(&mut pool, &mut buf, u14::ZERO, &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(100), tc_info_0)
.expect("insertion failed");
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, 1, &[]);
let tc_info_1 = ping_tc_to_store(&mut pool, &mut buf, u14::new(1), &[]);
scheduler
.insert_unwrapped_and_stored_tc(UnixTime::new_only_secs(200), tc_info_1)
.expect("insertion failed");
@@ -2017,12 +2077,12 @@ mod tests {
fn test_generic_insert_app_data_test() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let sph = SpHeader::new(
PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
PacketId::new(PacketType::Tc, true, u11::new(0x002)),
PacketSequenceControl::new(SequenceFlags::Unsegmented, u14::new(5)),
0,
);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(17, 1));
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, CreatorConfig::default());
let mut buf: [u8; 64] = [0; 64];
let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc);
assert!(result.is_ok());
@@ -2031,7 +2091,7 @@ mod tests {
assert_eq!(n, 1);
let time_reader = cds::CdsTime::from_bytes_with_u16_days(&buf[2..2 + 7]).unwrap();
assert_eq!(time_reader, time_writer);
let pus_tc_reader = PusTcReader::new(&buf[9..]).unwrap().0;
let pus_tc_reader = PusTcReader::new(&buf[9..]).unwrap();
assert_eq!(pus_tc_reader, ping_tc);
}
@@ -2039,12 +2099,12 @@ mod tests {
fn test_generic_insert_app_data_test_byte_conv_error() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let sph = SpHeader::new(
PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
PacketId::new(PacketType::Tc, true, u11::new(0x002)),
PacketSequenceControl::new(SequenceFlags::Unsegmented, u14::new(5)),
0,
);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(17, 1));
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, CreatorConfig::default());
let mut buf: [u8; 16] = [0; 16];
let result = generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc);
assert!(result.is_err());
@@ -2068,12 +2128,12 @@ mod tests {
fn test_generic_insert_app_data_test_as_vec() {
let time_writer = cds::CdsTime::new_with_u16_days(1, 1);
let sph = SpHeader::new(
PacketId::new(PacketType::Tc, true, 0x002),
PacketSequenceCtrl::new(SequenceFlags::Unsegmented, 5),
PacketId::new(PacketType::Tc, true, u11::new(0x002)),
PacketSequenceControl::new(SequenceFlags::Unsegmented, u14::new(5)),
0,
);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, true);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(17, 1));
let ping_tc = PusTcCreator::new_no_app_data(sph, sec_header, CreatorConfig::default());
let mut buf: [u8; 64] = [0; 64];
generate_insert_telecommand_app_data(&mut buf, &time_writer, &ping_tc).unwrap();
let vec = generate_insert_telecommand_app_data_as_vec(&time_writer, &ping_tc)

View File

@@ -1,15 +1,15 @@
use super::scheduler::PusSchedulerProvider;
use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{
DirectPusPacketHandlerResult, EcssTcInMemConversionProvider, EcssTcInSharedPoolConverter,
EcssTcInVecConverter, EcssTcReceiver, EcssTmSender, HandlingStatus, MpscTcReceiver,
PartialPusHandlingError, PusServiceHelper,
CacheAndReadRawEcssTc, DirectPusPacketHandlerResult, EcssTcInSharedPoolCacher, EcssTcReceiver,
EcssTcVecCacher, EcssTmSender, HandlingStatus, MpscTcReceiver, PartialPusHandlingError,
PusServiceHelper,
};
use crate::pool::PoolProvider;
use crate::pus::PusPacketHandlingError;
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use alloc::string::ToString;
use spacepackets::ecss::{scheduling, PusPacket};
use spacepackets::ecss::{PusPacket, scheduling};
use spacepackets::time::cds::CdsTime;
use std::sync::mpsc;
@@ -24,7 +24,7 @@ use std::sync::mpsc;
pub struct PusSchedServiceHandler<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
PusScheduler: PusSchedulerProvider,
> {
@@ -34,13 +34,12 @@ pub struct PusSchedServiceHandler<
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
VerificationReporter: VerificationReportingProvider,
Scheduler: PusSchedulerProvider,
>
PusSchedServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter, Scheduler>
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
Scheduler: PusSchedulerProvider,
> PusSchedServiceHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter, Scheduler>
{
pub fn new(
service_helper: PusServiceHelper<
@@ -80,8 +79,8 @@ impl<
.tc_in_mem_converter_mut()
.cache(&ecss_tc_and_token.tc_in_memory)?;
let tc = self.service_helper.tc_in_mem_converter().convert()?;
let subservice = PusPacket::subservice(&tc);
let standard_subservice = scheduling::Subservice::try_from(subservice);
let subservice = PusPacket::message_subtype_id(&tc);
let standard_subservice = scheduling::MessageSubtypeId::try_from(subservice);
if standard_subservice.is_err() {
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
subservice,
@@ -89,7 +88,7 @@ impl<
));
}
match standard_subservice.unwrap() {
scheduling::Subservice::TcEnableScheduling => {
scheduling::MessageSubtypeId::TcEnableScheduling => {
let opt_started_token = match self.service_helper.verif_reporter().start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
@@ -102,11 +101,12 @@ impl<
}
};
self.scheduler.enable();
if self.scheduler.is_enabled() && opt_started_token.is_some() {
if self.scheduler.is_enabled()
&& let Some(started_token) = opt_started_token
{
if let Err(e) = self.service_helper.verif_reporter().completion_success(
&self.service_helper.common.tm_sender,
opt_started_token.unwrap(),
started_token,
time_stamp,
) {
error_callback(&PartialPusHandlingError::Verification(e));
@@ -117,7 +117,7 @@ impl<
));
}
}
scheduling::Subservice::TcDisableScheduling => {
scheduling::MessageSubtypeId::TcDisableScheduling => {
let opt_started_token = match self.service_helper.verif_reporter().start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
@@ -131,10 +131,12 @@ impl<
};
self.scheduler.disable();
if !self.scheduler.is_enabled() && opt_started_token.is_some() {
if !self.scheduler.is_enabled()
&& let Some(started_token) = opt_started_token
{
if let Err(e) = self.service_helper.verif_reporter().completion_success(
&self.service_helper.common.tm_sender,
opt_started_token.unwrap(),
started_token,
time_stamp,
) {
error_callback(&PartialPusHandlingError::Verification(e));
@@ -145,7 +147,7 @@ impl<
));
}
}
scheduling::Subservice::TcResetScheduling => {
scheduling::MessageSubtypeId::TcResetScheduling => {
let start_token = self
.service_helper
.verif_reporter()
@@ -169,7 +171,7 @@ impl<
)
.expect("Error sending completion success");
}
scheduling::Subservice::TcInsertActivity => {
scheduling::MessageSubtypeId::TcInsertActivity => {
let start_token = self
.service_helper
.common
@@ -211,7 +213,7 @@ impl<
pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver,
mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
PusScheduler,
>;
@@ -220,7 +222,7 @@ pub type PusService11SchedHandlerDynWithMpsc<PusScheduler> = PusSchedServiceHand
pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver,
mpsc::SyncSender<PacketAsVec>,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
PusScheduler,
>;
@@ -229,7 +231,7 @@ pub type PusService11SchedHandlerDynWithBoundedMpsc<PusScheduler> = PusSchedServ
pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
PusScheduler,
>;
@@ -238,7 +240,7 @@ pub type PusService11SchedHandlerStaticWithMpsc<PusScheduler> = PusSchedServiceH
pub type PusService11SchedHandlerStaticWithBoundedMpsc<PusScheduler> = PusSchedServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
PusScheduler,
>;
@@ -249,21 +251,23 @@ mod tests {
use crate::pus::test_util::{PusTestHarness, TEST_APID};
use crate::pus::verification::{VerificationReporter, VerificationReportingProvider};
use crate::pus::{DirectPusPacketHandlerResult, MpscTcReceiver, PusPacketHandlingError};
use crate::pus::{
EcssTcInSharedPoolCacher,
scheduler::{self, PusSchedulerProvider, TcInfo},
tests::PusServiceHandlerWithSharedStoreCommon,
verification::{RequestId, TcStateAccepted, VerificationToken},
EcssTcInSharedPoolConverter,
};
use crate::pus::{DirectPusPacketHandlerResult, MpscTcReceiver, PusPacketHandlingError};
use crate::tmtc::PacketSenderWithSharedPool;
use alloc::collections::VecDeque;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use delegate::delegate;
use spacepackets::ecss::scheduling::Subservice;
use spacepackets::ecss::tc::PusTcSecondaryHeader;
use spacepackets::ecss::WritablePusPacket;
use spacepackets::time::TimeWriter;
use spacepackets::SpHeader;
use spacepackets::ecss::scheduling::MessageSubtypeId;
use spacepackets::ecss::tc::PusTcSecondaryHeader;
use spacepackets::ecss::{CreatorConfig, MessageTypeId, WritablePusPacket};
use spacepackets::time::TimeWriter;
use spacepackets::{
ecss::{tc::PusTcCreator, tm::PusTmReader},
time::cds,
@@ -276,7 +280,7 @@ mod tests {
handler: PusSchedServiceHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
TestScheduler,
>,
@@ -309,8 +313,12 @@ mod tests {
}
impl PusTestHarness for Pus11HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -378,12 +386,13 @@ mod tests {
fn generic_subservice_send(
test_harness: &mut Pus11HandlerWithStoreTester,
subservice: Subservice,
subservice: MessageSubtypeId,
) {
let reply_header = SpHeader::new_for_unseg_tm(TEST_APID, 0, 0);
let tc_header = PusTcSecondaryHeader::new_simple(11, subservice as u8);
let enable_scheduling = PusTcCreator::new(reply_header, tc_header, &[0; 7], true);
let token = test_harness.init_verification(&enable_scheduling);
let reply_header = SpHeader::new_for_unseg_tm(TEST_APID, u14::ZERO, 0);
let tc_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(11, subservice as u8));
let enable_scheduling =
PusTcCreator::new(reply_header, tc_header, &[0; 7], CreatorConfig::default());
let token = test_harness.start_verification(&enable_scheduling);
test_harness.send_tc(&token, &enable_scheduling);
let request_id = token.request_id();
@@ -402,7 +411,7 @@ mod tests {
let mut test_harness = Pus11HandlerWithStoreTester::new();
test_harness.handler.scheduler_mut().disable();
assert!(!test_harness.handler.scheduler().is_enabled());
generic_subservice_send(&mut test_harness, Subservice::TcEnableScheduling);
generic_subservice_send(&mut test_harness, MessageSubtypeId::TcEnableScheduling);
assert!(test_harness.handler.scheduler().is_enabled());
assert_eq!(test_harness.handler.scheduler().enabled_count, 1);
}
@@ -412,7 +421,7 @@ mod tests {
let mut test_harness = Pus11HandlerWithStoreTester::new();
test_harness.handler.scheduler_mut().enable();
assert!(test_harness.handler.scheduler().is_enabled());
generic_subservice_send(&mut test_harness, Subservice::TcDisableScheduling);
generic_subservice_send(&mut test_harness, MessageSubtypeId::TcDisableScheduling);
assert!(!test_harness.handler.scheduler().is_enabled());
assert_eq!(test_harness.handler.scheduler().disabled_count, 1);
}
@@ -420,16 +429,16 @@ mod tests {
#[test]
fn test_reset_scheduler_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
generic_subservice_send(&mut test_harness, Subservice::TcResetScheduling);
generic_subservice_send(&mut test_harness, MessageSubtypeId::TcResetScheduling);
assert_eq!(test_harness.handler.scheduler().reset_count, 1);
}
#[test]
fn test_insert_activity_tc() {
let mut test_harness = Pus11HandlerWithStoreTester::new();
let mut reply_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let mut sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new(reply_header, sec_header, &[], true);
let mut reply_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let mut sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(17, 1));
let ping_tc = PusTcCreator::new(reply_header, sec_header, &[], CreatorConfig::default());
let req_id_ping_tc = scheduler::RequestId::from_tc(&ping_tc);
let stamper = cds::CdsTime::now_with_u16_days().expect("time provider failed");
let mut sched_app_data: [u8; 64] = [0; 64];
@@ -437,15 +446,18 @@ mod tests {
let ping_raw = ping_tc.to_vec().expect("generating raw tc failed");
sched_app_data[written_len..written_len + ping_raw.len()].copy_from_slice(&ping_raw);
written_len += ping_raw.len();
reply_header = SpHeader::new_for_unseg_tc(TEST_APID, 1, 0);
sec_header = PusTcSecondaryHeader::new_simple(11, Subservice::TcInsertActivity as u8);
reply_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(
11,
MessageSubtypeId::TcInsertActivity as u8,
));
let enable_scheduling = PusTcCreator::new(
reply_header,
sec_header,
&sched_app_data[..written_len],
true,
CreatorConfig::default(),
);
let token = test_harness.init_verification(&enable_scheduling);
let token = test_harness.start_verification(&enable_scheduling);
test_harness.send_tc(&token, &enable_scheduling);
let request_id = token.request_id();

View File

@@ -2,16 +2,17 @@ use crate::pus::{
DirectPusPacketHandlerResult, PartialPusHandlingError, PusPacketHandlingError, PusTmVariant,
};
use crate::tmtc::{PacketAsVec, PacketSenderWithSharedPool};
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::PusPacket;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use spacepackets::SpHeader;
use spacepackets::ecss::tm::{PusTmCreator, PusTmSecondaryHeader};
use spacepackets::ecss::{CreatorConfig, MessageTypeId, PusPacket};
use std::sync::mpsc;
use super::verification::{VerificationReporter, VerificationReportingProvider};
use super::{
EcssTcInMemConversionProvider, EcssTcInSharedPoolConverter, EcssTcInVecConverter,
EcssTcReceiver, EcssTmSender, GenericConversionError, HandlingStatus, MpscTcReceiver,
PusServiceHelper,
CacheAndReadRawEcssTc, EcssTcInSharedPoolCacher, EcssTcReceiver, EcssTcVecCacher, EcssTmSender,
GenericConversionError, HandlingStatus, MpscTcReceiver, PusServiceHelper,
};
/// This is a helper class for [std] environments to handle generic PUS 17 (test service) packets.
@@ -19,7 +20,7 @@ use super::{
pub struct PusService17TestHandler<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> {
pub service_helper:
@@ -27,11 +28,11 @@ pub struct PusService17TestHandler<
}
impl<
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: EcssTcInMemConversionProvider,
VerificationReporter: VerificationReportingProvider,
> PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
TcReceiver: EcssTcReceiver,
TmSender: EcssTmSender,
TcInMemConverter: CacheAndReadRawEcssTc,
VerificationReporter: VerificationReportingProvider,
> PusService17TestHandler<TcReceiver, TmSender, TcInMemConverter, VerificationReporter>
{
pub fn new(
service_helper: PusServiceHelper<
@@ -58,10 +59,10 @@ impl<
.tc_in_mem_converter_mut()
.cache(&ecss_tc_and_token.tc_in_memory)?;
let tc = self.service_helper.tc_in_mem_converter().convert()?;
if tc.service() != 17 {
return Err(GenericConversionError::WrongService(tc.service()).into());
if tc.service_type_id() != 17 {
return Err(GenericConversionError::WrongService(tc.service_type_id()).into());
}
if tc.subservice() == 1 {
if tc.message_subtype_id() == 1 {
let opt_started_token = match self.service_helper.verif_reporter().start_success(
&self.service_helper.common.tm_sender,
ecss_tc_and_token.token,
@@ -76,10 +77,14 @@ impl<
// Sequence count will be handled centrally in TM funnel.
// It is assumed that the verification reporter was built with a valid APID, so we use
// the unchecked API here.
let reply_header =
SpHeader::new_for_unseg_tm(self.service_helper.verif_reporter().apid(), 0, 0);
let tc_header = PusTmSecondaryHeader::new_simple(17, 2, time_stamp);
let ping_reply = PusTmCreator::new(reply_header, tc_header, &[], true);
let reply_header = SpHeader::new_for_unseg_tm(
self.service_helper.verif_reporter().apid(),
u14::ZERO,
0,
);
let tc_header = PusTmSecondaryHeader::new_simple(MessageTypeId::new(17, 2), time_stamp);
let ping_reply =
PusTmCreator::new(reply_header, tc_header, &[], CreatorConfig::default());
if let Err(e) = self
.service_helper
.common
@@ -99,7 +104,7 @@ impl<
}
} else {
return Ok(DirectPusPacketHandlerResult::CustomSubservice(
tc.subservice(),
tc.message_subtype_id(),
ecss_tc_and_token.token,
));
}
@@ -112,7 +117,7 @@ impl<
pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
MpscTcReceiver,
mpsc::Sender<PacketAsVec>,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
>;
/// Helper type definition for a PUS 17 handler with a dynamic TMTC memory backend and bounded MPSC
@@ -120,7 +125,7 @@ pub type PusService17TestHandlerDynWithMpsc = PusService17TestHandler<
pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver,
mpsc::SyncSender<PacketAsVec>,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
>;
/// Helper type definition for a PUS 17 handler with a shared store TMTC memory backend and bounded
@@ -128,12 +133,13 @@ pub type PusService17TestHandlerDynWithBoundedMpsc = PusService17TestHandler<
pub type PusService17TestHandlerStaticWithBoundedMpsc = PusService17TestHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
>;
#[cfg(test)]
mod tests {
use crate::ComponentId;
use crate::pus::test_util::{PusTestHarness, SimplePusPacketHandler, TEST_APID};
use crate::pus::tests::{
PusServiceHandlerWithSharedStoreCommon, PusServiceHandlerWithVecCommon,
@@ -143,18 +149,19 @@ mod tests {
};
use crate::pus::verification::{TcStateAccepted, VerificationToken};
use crate::pus::{
DirectPusPacketHandlerResult, EcssTcInSharedPoolConverter, EcssTcInVecConverter,
DirectPusPacketHandlerResult, EcssTcInSharedPoolCacher, EcssTcVecCacher,
GenericConversionError, HandlingStatus, MpscTcReceiver, MpscTmAsVecSender,
PartialPusHandlingError, PusPacketHandlingError,
};
use crate::tmtc::PacketSenderWithSharedPool;
use crate::ComponentId;
use arbitrary_int::traits::Integer as _;
use arbitrary_int::u14;
use delegate::delegate;
use spacepackets::SpHeader;
use spacepackets::ecss::tc::{PusTcCreator, PusTcSecondaryHeader};
use spacepackets::ecss::tm::PusTmReader;
use spacepackets::ecss::PusPacket;
use spacepackets::time::{cds, TimeWriter};
use spacepackets::SpHeader;
use spacepackets::ecss::{CreatorConfig, MessageTypeId, PusPacket};
use spacepackets::time::{TimeWriter, cds};
use super::PusService17TestHandler;
@@ -163,7 +170,7 @@ mod tests {
handler: PusService17TestHandler<
MpscTcReceiver,
PacketSenderWithSharedPool,
EcssTcInSharedPoolConverter,
EcssTcInSharedPoolCacher,
VerificationReporter,
>,
}
@@ -180,8 +187,12 @@ mod tests {
}
impl PusTestHarness for Pus17HandlerWithStoreTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -221,7 +232,7 @@ mod tests {
handler: PusService17TestHandler<
MpscTcReceiver,
MpscTmAsVecSender,
EcssTcInVecConverter,
EcssTcVecCacher,
VerificationReporter,
>,
}
@@ -238,8 +249,12 @@ mod tests {
}
impl PusTestHarness for Pus17HandlerWithVecTester {
fn init_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self.handler.service_helper.verif_reporter_mut().add_tc(tc);
fn start_verification(&mut self, tc: &PusTcCreator) -> VerificationToken<TcStateAccepted> {
let init_token = self
.handler
.service_helper
.verif_reporter_mut()
.start_verification(tc);
self.handler
.service_helper
.verif_reporter()
@@ -276,10 +291,11 @@ mod tests {
fn ping_test(test_harness: &mut (impl PusTestHarness + SimplePusPacketHandler)) {
// Create a ping TC, verify acceptance.
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(17, 1));
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let request_id = token.request_id();
let result = test_harness.handle_one_tc();
@@ -295,8 +311,8 @@ mod tests {
// Ping reply
let tm = test_harness.read_next_tm();
assert_eq!(tm.service(), 17);
assert_eq!(tm.subservice(), 2);
assert_eq!(tm.service_type_id(), 17);
assert_eq!(tm.message_subtype_id(), 2);
assert!(tm.user_data().is_empty());
// TM completion
@@ -331,10 +347,11 @@ mod tests {
#[test]
fn test_sending_unsupported_service() {
let mut test_harness = Pus17HandlerWithStoreTester::new(0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(3, 1);
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(3, 1));
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_err());
@@ -352,10 +369,11 @@ mod tests {
#[test]
fn test_sending_custom_subservice() {
let mut test_harness = Pus17HandlerWithStoreTester::new(0);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, 0, 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 200);
let ping_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let token = test_harness.init_verification(&ping_tc);
let sp_header = SpHeader::new_for_unseg_tc(TEST_APID, u14::ZERO, 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(17, 200));
let ping_tc =
PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let token = test_harness.start_verification(&ping_tc);
test_harness.send_tc(&token, &ping_tc);
let result = test_harness.handle_one_tc();
assert!(result.is_ok());

File diff suppressed because it is too large Load Diff

View File

@@ -1,3 +1,4 @@
use arbitrary_int::{prelude::*, u11, u21};
use core::{fmt, marker::PhantomData};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
@@ -9,13 +10,13 @@ pub use alloc_mod::*;
pub use std_mod::*;
use spacepackets::{
ecss::{tc::IsPusTelecommand, PusPacket},
ByteConversionError,
ecss::{PusPacket, tc::IsPusTelecommand},
};
use crate::{
queue::{GenericReceiveError, GenericSendError},
ComponentId,
queue::{GenericReceiveError, GenericSendError},
};
/// Generic request ID type. Requests can be associated with an ID to have a unique identifier
@@ -23,16 +24,16 @@ use crate::{
pub type RequestId = u32;
/// CCSDS APID type definition. Please note that the APID is a 14 bit value.
pub type Apid = u16;
pub type Apid = u11;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct UniqueApidTargetId {
pub apid: Apid,
pub unique_id: u32,
pub unique_id: u21,
}
impl UniqueApidTargetId {
pub const fn new(apid: Apid, target: u32) -> Self {
pub const fn new(apid: Apid, target: u21) -> Self {
Self {
apid,
unique_id: target,
@@ -40,7 +41,7 @@ impl UniqueApidTargetId {
}
pub fn raw(&self) -> ComponentId {
((self.apid as u64) << 32) | (self.unique_id as u64)
((self.apid.value() as u32) << 21) | (self.unique_id.value())
}
pub fn id(&self) -> ComponentId {
@@ -60,21 +61,21 @@ impl UniqueApidTargetId {
}
Ok(Self::new(
tc.apid(),
u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap()),
u21::new(u32::from_be_bytes(tc.user_data()[0..4].try_into().unwrap())),
))
}
}
impl From<u64> for UniqueApidTargetId {
fn from(raw: u64) -> Self {
impl From<ComponentId> for UniqueApidTargetId {
fn from(raw: ComponentId) -> Self {
Self {
apid: (raw >> 32) as u16,
unique_id: raw as u32,
apid: u11::new((raw >> 21) as u16),
unique_id: u21::new(raw & u21::MAX.value()),
}
}
}
impl From<UniqueApidTargetId> for u64 {
impl From<UniqueApidTargetId> for ComponentId {
fn from(target_and_apid_id: UniqueApidTargetId) -> Self {
target_and_apid_id.raw()
}
@@ -335,12 +336,12 @@ pub mod alloc_mod {
}
impl<
To,
From,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> MessageSenderAndReceiver<To, From, Sender, Receiver, SenderStore>
To,
From,
Sender: MessageSenderProvider<To>,
Receiver: MessageReceiverProvider<From>,
SenderStore: MessageSenderStoreProvider<To, Sender>,
> MessageSenderAndReceiver<To, From, Sender, Receiver, SenderStore>
{
pub fn new(local_channel_id: ComponentId, message_receiver: Receiver) -> Self {
Self {
@@ -403,15 +404,15 @@ pub mod alloc_mod {
}
impl<
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
Request,
ReqSender: MessageSenderProvider<Request>,
ReqReceiver: MessageReceiverProvider<Request>,
ReqSenderStore: MessageSenderStoreProvider<Request, ReqSender>,
Reply,
ReplySender: MessageSenderProvider<Reply>,
ReplyReceiver: MessageReceiverProvider<Reply>,
ReplySenderStore: MessageSenderStoreProvider<Reply, ReplySender>,
>
RequestAndReplySenderAndReceiver<
Request,
ReqSender,
@@ -496,33 +497,38 @@ mod tests {
use std::sync::mpsc;
use alloc::string::ToString;
use arbitrary_int::{u11, u14, u21};
use spacepackets::{
ecss::tc::{PusTcCreator, PusTcSecondaryHeader},
ByteConversionError, SpHeader,
ecss::{
CreatorConfig, MessageTypeId,
tc::{PusTcCreator, PusTcSecondaryHeader},
},
};
use crate::{
ComponentId,
queue::{GenericReceiveError, GenericSendError},
request::{MessageMetadata, MessageSenderMap, MessageSenderStoreProvider},
request::{Apid, MessageMetadata, MessageSenderMap, MessageSenderStoreProvider},
};
use super::{GenericMessage, MessageReceiverWithId, UniqueApidTargetId};
const TEST_CHANNEL_ID_0: u64 = 1;
const TEST_CHANNEL_ID_1: u64 = 2;
const TEST_CHANNEL_ID_2: u64 = 3;
const TEST_CHANNEL_ID_0: ComponentId = 1;
const TEST_CHANNEL_ID_1: ComponentId = 2;
const TEST_CHANNEL_ID_2: ComponentId = 3;
#[test]
fn test_basic_target_id_with_apid() {
let id = UniqueApidTargetId::new(0x111, 0x01);
assert_eq!(id.apid, 0x111);
assert_eq!(id.unique_id, 0x01);
let id = UniqueApidTargetId::new(Apid::new(0x111), u21::new(0x01));
assert_eq!(id.apid.value(), 0x111);
assert_eq!(id.unique_id.value(), 0x01);
assert_eq!(id.id(), id.raw());
assert_eq!(u64::from(id), id.raw());
assert_eq!(ComponentId::from(id), id.raw());
let id_raw = id.raw();
let id_from_raw = UniqueApidTargetId::from(id_raw);
assert_eq!(id_from_raw, id);
assert_eq!(id.id(), (0x111 << 32) | 0x01);
assert_eq!(id.id(), (0x111 << 21) | 0x01);
let string = id.to_string();
assert_eq!(
string,
@@ -532,19 +538,24 @@ mod tests {
#[test]
fn test_basic_target_id_with_apid_from_pus_tc() {
let sp_header = SpHeader::new_for_unseg_tc(0x111, 5, 0);
let sp_header = SpHeader::new_for_unseg_tc(u11::new(0x111), u14::new(5), 0);
let app_data = 1_u32.to_be_bytes();
let pus_tc = PusTcCreator::new_simple(sp_header, 17, 1, &app_data, true);
let pus_tc = PusTcCreator::new_simple(
sp_header,
MessageTypeId::new(17, 1),
&app_data,
CreatorConfig::default(),
);
let id = UniqueApidTargetId::from_pus_tc(&pus_tc).unwrap();
assert_eq!(id.apid, 0x111);
assert_eq!(id.unique_id, 1);
assert_eq!(id.apid.value(), 0x111);
assert_eq!(id.unique_id.value(), 1);
}
#[test]
fn test_basic_target_id_with_apid_from_pus_tc_invalid_app_data() {
let sp_header = SpHeader::new_for_unseg_tc(0x111, 5, 0);
let sec_header = PusTcSecondaryHeader::new_simple(17, 1);
let pus_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, true);
let sp_header = SpHeader::new_for_unseg_tc(u11::new(0x111), u14::new(5), 0);
let sec_header = PusTcSecondaryHeader::new_simple(MessageTypeId::new(17, 1));
let pus_tc = PusTcCreator::new_no_app_data(sp_header, sec_header, CreatorConfig::default());
let error = UniqueApidTargetId::from_pus_tc(&pus_tc);
assert!(error.is_err());
let error = error.unwrap_err();

View File

@@ -211,8 +211,8 @@ mod tests {
println,
rc::Rc,
sync::{
mpsc::{self, TryRecvError},
Arc, Mutex,
mpsc::{self, TryRecvError},
},
time::Instant,
};

View File

@@ -1,4 +1,5 @@
use crate::{
ComponentId,
health::{HealthState, HealthTableProvider},
mode::{Mode, ModeAndSubmode, ModeReply, ModeRequest, ModeRequestSender, UNKNOWN_MODE_VAL},
mode_tree::{
@@ -7,7 +8,6 @@ use crate::{
},
queue::GenericTargetedMessagingError,
request::{GenericMessage, RequestId},
ComponentId,
};
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
@@ -136,7 +136,7 @@ impl SequenceExecutionHelper {
/// with [Self::load]
/// * `sender` - The sender to send mode requests to the components
/// * `children_mode_store` - The mode store vector to keep track of the mode states of
/// children components
/// children components
pub fn run(
&mut self,
table: &SequenceModeTables,
@@ -252,10 +252,10 @@ impl SequenceExecutionHelper {
Ok(ModeCommandingResult::AwaitingSuccessCheck)
} else if seq_table_value.entries.len() - 1 == sequence_idx {
self.state = SequenceExecutionHelperState::Idle;
return Ok(ModeCommandingResult::Done);
Ok(ModeCommandingResult::Done)
} else {
self.current_sequence_index = Some(sequence_idx + 1);
return Ok(ModeCommandingResult::StepDone);
Ok(ModeCommandingResult::StepDone)
}
}
@@ -597,7 +597,7 @@ impl SubsystemCommandingHelper {
}
fn update_internal_req_id(&mut self) {
let new_internal_req_id = self.request_id().unwrap() << 8
let new_internal_req_id = (self.request_id().unwrap() << 8)
| self.seq_exec_helper.current_sequence_index().unwrap() as u32;
self.seq_exec_helper.set_request_id(new_internal_req_id);
self.active_internal_request_id = Some(new_internal_req_id);
@@ -682,9 +682,10 @@ mod tests {
use super::*;
use crate::{
ComponentId,
mode::{
tests::{ModeReqSenderMock, ModeReqWrapper},
Mode, ModeAndSubmode, ModeReply, ModeRequest, UNKNOWN_MODE,
tests::{ModeReqSenderMock, ModeReqWrapper},
},
mode_tree::{
ModeStoreProvider, ModeStoreVec, SequenceModeTables, SequenceTableEntry,
@@ -693,7 +694,6 @@ mod tests {
queue::GenericTargetedMessagingError,
request::{GenericMessage, MessageMetadata, RequestId},
subsystem::{ModeCommandingResult, ModeTreeHelperState, SequenceExecutionHelperState},
ComponentId,
};
#[derive(Debug)]
@@ -782,7 +782,7 @@ mod tests {
);
assert_eq!(self.sender.requests.borrow().len(), 2);
let req_0 = self.sender.requests.get_mut().pop_front().unwrap();
assert_eq!(req_0.target_id, ExampleTargetId::Target0 as u64);
assert_eq!(req_0.target_id, ExampleTargetId::Target0 as ComponentId);
assert_eq!(req_0.request_id, expected_req_id);
assert_eq!(
req_0.request,
@@ -792,7 +792,7 @@ mod tests {
}
);
let req_1 = self.sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req_1.target_id, ExampleTargetId::Target1 as u64);
assert_eq!(req_1.target_id, ExampleTargetId::Target1 as ComponentId);
assert_eq!(
req_1.request,
ModeRequest::SetMode {
@@ -808,7 +808,7 @@ mod tests {
);
assert_eq!(self.sender.requests.borrow().len(), 1);
let req_0 = self.sender.requests.get_mut().pop_front().unwrap();
assert_eq!(req_0.target_id, ExampleTargetId::Target2 as u64);
assert_eq!(req_0.target_id, ExampleTargetId::Target2 as ComponentId);
assert_eq!(req_0.request_id, expected_req_id);
assert_eq!(
req_0.request,
@@ -827,7 +827,7 @@ mod tests {
assert_eq!(self.execution_helper.current_sequence_index().unwrap(), 0);
assert_eq!(self.sender.requests.borrow().len(), 2);
let req_0 = self.sender.requests.get_mut().pop_front().unwrap();
assert_eq!(req_0.target_id, ExampleTargetId::Target0 as u64);
assert_eq!(req_0.target_id, ExampleTargetId::Target0 as ComponentId);
assert_eq!(req_0.request_id, expected_req_id);
assert_eq!(
req_0.request,
@@ -837,7 +837,7 @@ mod tests {
}
);
let req_1 = self.sender.requests.borrow_mut().pop_front().unwrap();
assert_eq!(req_1.target_id, ExampleTargetId::Target1 as u64);
assert_eq!(req_1.target_id, ExampleTargetId::Target1 as ComponentId);
assert_eq!(
req_1.request,
ModeRequest::SetMode {
@@ -850,9 +850,9 @@ mod tests {
fn create_default_mode_store() -> ModeStoreVec {
let mut mode_store = ModeStoreVec::default();
mode_store.add_component(ExampleTargetId::Target0 as u64, UNKNOWN_MODE);
mode_store.add_component(ExampleTargetId::Target1 as u64, UNKNOWN_MODE);
mode_store.add_component(ExampleTargetId::Target2 as u64, UNKNOWN_MODE);
mode_store.add_component(ExampleTargetId::Target0 as ComponentId, UNKNOWN_MODE);
mode_store.add_component(ExampleTargetId::Target1 as ComponentId, UNKNOWN_MODE);
mode_store.add_component(ExampleTargetId::Target2 as ComponentId, UNKNOWN_MODE);
mode_store
}
@@ -863,13 +863,13 @@ mod tests {
let mut table_seq_0 = SequenceTableMapTable::new("MODE_0_SEQ_0");
table_seq_0.add_entry(SequenceTableEntry::new(
"TARGET_0",
ExampleTargetId::Target0 as u64,
ExampleTargetId::Target0 as ComponentId,
SUBSYSTEM_MD0_TGT0_MODE,
false,
));
table_seq_0.add_entry(SequenceTableEntry::new(
"TARGET_1",
ExampleTargetId::Target1 as u64,
ExampleTargetId::Target1 as ComponentId,
SUBSYSTEM_MD0_TGT1_MODE,
false,
));
@@ -881,13 +881,13 @@ mod tests {
let mut table_seq_0 = SequenceTableMapTable::new("MODE_1_SEQ_0");
table_seq_0.add_entry(SequenceTableEntry::new(
"MD1_SEQ0_TGT0",
ExampleTargetId::Target0 as u64,
ExampleTargetId::Target0 as ComponentId,
SUBSYSTEM_MD1_ST0_TGT0_MODE,
false,
));
table_seq_0.add_entry(SequenceTableEntry::new(
"MD1_SEQ0_TGT1",
ExampleTargetId::Target1 as u64,
ExampleTargetId::Target1 as ComponentId,
SUBSYSTEM_MD1_ST0_TGT1_MODE,
false,
));
@@ -895,7 +895,7 @@ mod tests {
let mut table_seq_1 = SequenceTableMapTable::new("MODE_1_SEQ_1");
table_seq_1.add_entry(SequenceTableEntry::new(
"MD1_SEQ1_TGT2",
ExampleTargetId::Target2 as u64,
ExampleTargetId::Target2 as ComponentId,
SUBSYSTEM_MD1_ST1_TGT2_MODE,
false,
));
@@ -1263,11 +1263,11 @@ mod tests {
assert_eq!(req.request, ModeRequest::AnnounceModeRecursive);
};
let req0 = tb.sender.requests.borrow_mut().pop_front().unwrap();
check_req(req0, ExampleTargetId::Target0 as u64);
check_req(req0, ExampleTargetId::Target0 as ComponentId);
let req1 = tb.sender.requests.borrow_mut().pop_front().unwrap();
check_req(req1, ExampleTargetId::Target1 as u64);
check_req(req1, ExampleTargetId::Target1 as ComponentId);
let req2 = tb.sender.requests.borrow_mut().pop_front().unwrap();
check_req(req2, ExampleTargetId::Target2 as u64);
check_req(req2, ExampleTargetId::Target2 as ComponentId);
}
#[test]
@@ -1283,11 +1283,11 @@ mod tests {
assert_eq!(req.request, ModeRequest::AnnounceMode);
};
let req0 = tb.sender.requests.borrow_mut().pop_front().unwrap();
check_req(req0, ExampleTargetId::Target0 as u64);
check_req(req0, ExampleTargetId::Target0 as ComponentId);
let req1 = tb.sender.requests.borrow_mut().pop_front().unwrap();
check_req(req1, ExampleTargetId::Target1 as u64);
check_req(req1, ExampleTargetId::Target1 as ComponentId);
let req2 = tb.sender.requests.borrow_mut().pop_front().unwrap();
check_req(req2, ExampleTargetId::Target2 as u64);
check_req(req2, ExampleTargetId::Target2 as ComponentId);
}
#[test]

View File

@@ -1,7 +1,7 @@
use core::fmt::Debug;
/// Generic abstraction for a check/countdown timer.
pub trait CountdownProvider: Debug {
pub trait Countdown: Debug {
fn has_expired(&self) -> bool;
fn reset(&mut self);
}

Some files were not shown because too many files have changed in this diff Show More